diff --git a/.github/workflows/goreleaser.yml b/.github/workflows/goreleaser.yml new file mode 100644 index 0000000..6650ab2 --- /dev/null +++ b/.github/workflows/goreleaser.yml @@ -0,0 +1,43 @@ +# THIS FILE HAS BEEN GENERATED BY THE COMMAND `goreleaser:ci`; DO NOT EDIT; +# +# Releaser workflow setup +# https://goreleaser.com/ci/actions/ +# +name: release + +# run only on tags +on: + push: + tags: + - 'v*' + +permissions: + contents: write # needed to write releases + id-token: write # needed for keyless signing + packages: write # needed for ghcr access + +jobs: + release: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # this is important, otherwise it won't checkout the full tree (i.e. no previous tags) + - uses: actions/setup-go@v4 + with: + go-version: 1.21 + cache: true + - uses: sigstore/cosign-installer@v3.2.0 # installs cosign + - uses: anchore/sbom-action/download-syft@v0.14.3 # installs syft + - uses: docker/login-action@v3 # login to ghcr + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + - uses: goreleaser/goreleaser-action@v5 # run goreleaser + with: + version: latest + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + diff --git a/.github/workflows/trufflehog.yml b/.github/workflows/trufflehog.yml new file mode 100644 index 0000000..fb82aea --- /dev/null +++ b/.github/workflows/trufflehog.yml @@ -0,0 +1,21 @@ +# THIS FILE HAS BEEN GENERATED BY THE COMMAND `trufflehog:ci`; DO NOT EDIT; +name: trufflehog +on: + pull_request: + push: + workflow_dispatch: + schedule: + - cron: "0 4 * * *" # run once a day at 4 AM + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Secret Scanning + uses: trufflesecurity/trufflehog@main + with: + extra_args: --only-verified diff --git a/.goreleaser.yaml b/.goreleaser.yaml index c64452a..7461646 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -1,3 +1,4 @@ +# THIS FILE HAS BEEN GENERATED BY THE COMMAND `goreleaser:boilerplate`; DO NOT EDIT; # See also: .github/workflows/release.yml project_name: exporter-unifi-protect @@ -80,7 +81,7 @@ dockers: - "--label=org.opencontainers.image.name={{.ProjectName}}" - "--label=org.opencontainers.image.revision={{.FullCommit}}" - "--label=org.opencontainers.image.version={{.Version}}" - - "--label=org.opencontainers.image.description=Prometheus exporter designed to help monitor your UniFi Protect setup by exporting relevant metrics to Prometheus" + - "--label=org.opencontainers.image.description=Exporter for UniFi Protect is a Prometheus exporter designed to help monitor your UniFi Protect setup by exporting relevant metrics to Prometheus." - "--label=org.opencontainers.image.source=https://github.com/hoomy-official/exporter-unifi-protect" # signs our docker image @@ -94,4 +95,4 @@ docker_signs: args: - 'sign' - '${artifact}' - - "--yes" # needed on cosign 2.0.0+ \ No newline at end of file + - "--yes" # needed on cosign 2.0.0+ diff --git a/Dockerfile b/Dockerfile index d9c0b1f..d51d0c4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,4 +4,5 @@ COPY exporter-unifi-protect /usr/bin/exporter-unifi-protect ENTRYPOINT [ "/usr/bin/exporter-unifi-protect" ] -CMD ["serve"] \ No newline at end of file +CMD ["serve"] + diff --git a/PROJECT b/PROJECT new file mode 100644 index 0000000..d3e3976 --- /dev/null +++ b/PROJECT @@ -0,0 +1,9 @@ +NAME=exporter-unifi-protect +DISPLAY_NAME=unofficial Unifi Protect Exporter +DESCRIPTION=Exporter for UniFi Protect is a Prometheus exporter designed to help monitor your UniFi Protect setup by exporting relevant metrics to Prometheus. +SOURCE=https://github.com/hoomy-official/exporter-unifi-protect + +GITHUB_OWNER=hoomy-official +GITHUB_REPOSITORY=exporter-unifi-protect + +DOCKER_REGISTRY=ghcr.io/hoomy-official/exporter-unifi-protect \ No newline at end of file diff --git a/Taskfile.yaml b/Taskfile.yaml index afcc2c0..86b9aa5 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -7,11 +7,15 @@ vars: MARKDOWNLINT_ARGS: "--ignore=./dist README.md" includes: - default: https://raw.githubusercontent.com/hoomy-official/taskfile-default/main/tasks.yaml - git: https://raw.githubusercontent.com/hoomy-official/taskfile-git/main/tasks.yaml - markdownlint: https://raw.githubusercontent.com/hoomy-official/taskfile-markdownlint/main/tasks.yaml - golangci: https://raw.githubusercontent.com/hoomy-official/taskfile-golangci/main/tasks.yaml + default: https://raw.githubusercontent.com/vanyda-official/taskfile-default/main/tasks.yaml + git: https://raw.githubusercontent.com/vanyda-official/taskfile-git/main/tasks.yaml + markdownlint: https://raw.githubusercontent.com/vanyda-official/taskfile-markdownlint/main/tasks.yaml + golangci: https://raw.githubusercontent.com/vanyda-official/taskfile-golangci/main/tasks.yaml + goreleaser: https://raw.githubusercontent.com/vanyda-official/taskfile-goreleaser/main/tasks.yaml + trufflehog: https://raw.githubusercontent.com/vanyda-official/taskfile-trufflehog/main/tasks.yaml + dotenv: - .env + - PROJECT - .env.default \ No newline at end of file diff --git a/go.mod b/go.mod index e43e7af..08e8515 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.21.7 require ( github.com/alecthomas/kong v0.8.1 + github.com/alecthomas/kong-yaml v0.2.0 github.com/hoomy-official/go-shared v0.0.0-20240302121620-6c18a355f8e4 github.com/hoomy-official/go-unifi-protect v0.0.0-20240228010302-67430585a319 github.com/prometheus/client_golang v1.19.0 diff --git a/go.sum b/go.sum index 8d8f3e9..fad6699 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2o github.com/alecthomas/assert/v2 v2.1.0/go.mod h1:b/+1DI2Q6NckYi+3mXyH3wFb8qG37K/DuK80n7WefXA= github.com/alecthomas/kong v0.8.1 h1:acZdn3m4lLRobeh3Zi2S2EpnXTd1mOL6U7xVml+vfkY= github.com/alecthomas/kong v0.8.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U= +github.com/alecthomas/kong-yaml v0.2.0 h1:iiVVqVttmOsHKawlaW/TljPsjaEv1O4ODx6dloSA58Y= +github.com/alecthomas/kong-yaml v0.2.0/go.mod h1:vMvOIy+wpB49MCZ0TA3KMts38Mu9YfRP03Q1StN69/g= github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE= github.com/alecthomas/repr v0.1.0/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= diff --git a/vendor/github.com/alecthomas/kong-yaml/.golangci.yml b/vendor/github.com/alecthomas/kong-yaml/.golangci.yml new file mode 100644 index 0000000..120ea71 --- /dev/null +++ b/vendor/github.com/alecthomas/kong-yaml/.golangci.yml @@ -0,0 +1,90 @@ +run: + tests: true + skip-dirs: + - _examples + +output: + print-issued-lines: false + +linters: + enable-all: true + disable: + - maligned + - megacheck + - lll + - gocyclo + - dupl + - gochecknoglobals + - funlen + - godox + - wsl + - gomnd + - gocognit + - goerr113 + - nolintlint + - testpackage + - godot + - nestif + - paralleltest + - nlreturn + - cyclop + - exhaustivestruct + - gci + - gofumpt + - errorlint + - exhaustive + - ifshort + - wrapcheck + - stylecheck + - thelper + - nonamedreturns + - revive + - dupword + - exhaustruct + - varnamelen + - forcetypeassert + - ireturn + - maintidx + - govet + - nosnakecase + - testableexamples + - musttag + +linters-settings: + govet: + check-shadowing: true + gocyclo: + min-complexity: 10 + dupl: + threshold: 100 + goconst: + min-len: 8 + min-occurrences: 3 + forbidigo: + #forbid: + # - (Must)?NewLexer$ + exclude_godoc_examples: false + + +issues: + max-per-linter: 0 + max-same: 0 + exclude-use-default: false + exclude: + # Captured by errcheck. + - '^(G104|G204):' + # Very commonly not checked. + - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked' + - 'exported method (.*\.MarshalJSON|.*\.UnmarshalJSON|.*\.EntityURN|.*\.GoString|.*\.Pos) should have comment or be unexported' + - 'composite literal uses unkeyed fields' + - 'declaration of "err" shadows declaration' + - 'should not use dot imports' + - 'Potential file inclusion via variable' + - 'should have comment or be unexported' + - 'comment on exported var .* should be of the form' + - 'at least one file in a package should have a package comment' + - 'string literal contains the Unicode' + - 'methods on the same type should have the same receiver name' + - '_TokenType_name should be _TokenTypeName' + - '`_TokenType_map` should be `_TokenTypeMap`' + - 'rewrite if-else to switch statement' diff --git a/vendor/github.com/alecthomas/kong-yaml/README.md b/vendor/github.com/alecthomas/kong-yaml/README.md new file mode 100644 index 0000000..5055587 --- /dev/null +++ b/vendor/github.com/alecthomas/kong-yaml/README.md @@ -0,0 +1,26 @@ +# Kong YAML utilities [![](https://godoc.org/github.com/alecthomas/kong-yaml?status.svg)](http://godoc.org/github.com/alecthomas/kong-yaml) [![CircleCI](https://img.shields.io/circleci/project/github/alecthomas/kong-yaml.svg)](https://circleci.com/gh/alecthomas/kong-yaml) + +## Configuration loader + +Use it like so: + +```go +parser, err := kong.New(&cli, kong.Configuration(kongyaml.Loader, "/etc/myapp/config.yaml", "~/.myapp.yaml")) +``` + +## YAMLFileMapper + +YAMLFileMapper implements kong.MapperValue to decode a YAML file into +a struct field. + +Use it like so: + +```go +var cli struct { + Profile Profile `type:"yamlfile"` +} + +func main() { + kong.Parse(&cli, kong.NamedMapper("yamlfile", kongyaml.YAMLFileMapper)) +} +``` diff --git a/vendor/github.com/alecthomas/kong-yaml/mapper.go b/vendor/github.com/alecthomas/kong-yaml/mapper.go new file mode 100644 index 0000000..39eff18 --- /dev/null +++ b/vendor/github.com/alecthomas/kong-yaml/mapper.go @@ -0,0 +1,35 @@ +package kongyaml + +import ( + "os" + "reflect" + + "github.com/alecthomas/kong" + "gopkg.in/yaml.v3" +) + +// YAMLFileMapper implements kong.MapperValue to decode a YAML file into +// a struct field. +// +// var cli struct { +// Profile Profile `type:"yamlfile"` +// } +// +// func main() { +// kong.Parse(&cli, kong.NamedMapper("yamlfile", YAMLFileMapper)) +// } +var YAMLFileMapper = kong.MapperFunc(decodeYAMLFile) //nolint: gochecknoglobals + +func decodeYAMLFile(ctx *kong.DecodeContext, target reflect.Value) error { + var fname string + if err := ctx.Scan.PopValueInto("filename", &fname); err != nil { + return err + } + f, err := os.Open(fname) //nolint:gosec + if err != nil { + return err + } + defer f.Close() //nolint + + return yaml.NewDecoder(f).Decode(target.Addr().Interface()) +} diff --git a/vendor/github.com/alecthomas/kong-yaml/yaml.go b/vendor/github.com/alecthomas/kong-yaml/yaml.go new file mode 100644 index 0000000..08edfb2 --- /dev/null +++ b/vendor/github.com/alecthomas/kong-yaml/yaml.go @@ -0,0 +1,44 @@ +package kongyaml + +import ( + "errors" + "fmt" + "io" + "strings" + + "github.com/alecthomas/kong" + "gopkg.in/yaml.v3" +) + +// Loader is a Kong configuration loader for YAML. +func Loader(r io.Reader) (kong.Resolver, error) { + decoder := yaml.NewDecoder(r) + config := map[string]interface{}{} + err := decoder.Decode(config) + if err != nil && !errors.Is(err, io.EOF) { + return nil, fmt.Errorf("YAML config decode error: %w", err) + } + return kong.ResolverFunc(func(context *kong.Context, parent *kong.Path, flag *kong.Flag) (interface{}, error) { + // Build a string path up to this flag. + path := []string{} + for n := parent.Node(); n != nil && n.Type != kong.ApplicationNode; n = n.Parent { + path = append([]string{n.Name}, path...) + } + path = append(path, flag.Name) + path = strings.Split(strings.Join(path, "-"), "-") + return find(config, path), nil + }), nil +} + +func find(config map[string]interface{}, path []string) interface{} { + if len(path) == 0 { + return config + } + for i := 0; i < len(path); i++ { + prefix := strings.Join(path[:i+1], "-") + if child, ok := config[prefix].(map[string]interface{}); ok { + return find(child, path[i+1:]) + } + } + return config[strings.Join(path, "-")] +} diff --git a/vendor/github.com/alecthomas/kong/.gitignore b/vendor/github.com/alecthomas/kong/.gitignore new file mode 100644 index 0000000..ba077a4 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/.gitignore @@ -0,0 +1 @@ +bin diff --git a/vendor/github.com/alecthomas/kong/.golangci.yml b/vendor/github.com/alecthomas/kong/.golangci.yml new file mode 100644 index 0000000..1ee8fa6 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/.golangci.yml @@ -0,0 +1,68 @@ +run: + tests: true + +output: + print-issued-lines: false + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - wsl + - funlen + - gocognit + - gomnd + - goprintffuncname + - paralleltest + - nlreturn + - goerr113 + - ifshort + - testpackage + - wrapcheck + - exhaustivestruct + - forbidigo + - gci + - godot + - gofumpt + - cyclop + - errorlint + - nestif + - golint + - scopelint + - interfacer + - tagliatelle + - thelper + - godox + - goconst + - varnamelen + - ireturn + - exhaustruct + - nonamedreturns + - nilnil + +linters-settings: + govet: + check-shadowing: true + dupl: + threshold: 100 + gocyclo: + min-complexity: 20 + exhaustive: + default-signifies-exhaustive: true + +issues: + max-per-linter: 0 + max-same: 0 + exclude-use-default: false + exclude: + - '^(G104|G204):' + # Very commonly not checked. + - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked' + - 'exported method (.*\.MarshalJSON|.*\.UnmarshalJSON) should have comment or be unexported' + - 'composite literal uses unkeyed fields' + - 'bad syntax for struct tag key' + - 'bad syntax for struct tag pair' + - 'result .* \(error\) is always nil' + - 'package io/ioutil is deprecated' diff --git a/vendor/github.com/alecthomas/kong/COPYING b/vendor/github.com/alecthomas/kong/COPYING new file mode 100644 index 0000000..22707ac --- /dev/null +++ b/vendor/github.com/alecthomas/kong/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2018 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alecthomas/kong/README.md b/vendor/github.com/alecthomas/kong/README.md new file mode 100644 index 0000000..4110465 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/README.md @@ -0,0 +1,746 @@ + +

+ +# Kong is a command-line parser for Go + +[![](https://godoc.org/github.com/alecthomas/kong?status.svg)](http://godoc.org/github.com/alecthomas/kong) [![CircleCI](https://img.shields.io/circleci/project/github/alecthomas/kong.svg)](https://circleci.com/gh/alecthomas/kong) [![Go Report Card](https://goreportcard.com/badge/github.com/alecthomas/kong)](https://goreportcard.com/report/github.com/alecthomas/kong) [![Slack chat](https://img.shields.io/static/v1?logo=slack&style=flat&label=slack&color=green&message=gophers)](https://gophers.slack.com/messages/CN9DS8YF3) + + + +- [Introduction](#introduction) +- [Help](#help) + - [Help as a user of a Kong application](#help-as-a-user-of-a-kong-application) + - [Defining help in Kong](#defining-help-in-kong) +- [Command handling](#command-handling) + - [Switch on the command string](#switch-on-the-command-string) + - [Attach a Run... error method to each command](#attach-a-run-error-method-to-each-command) +- [Hooks: BeforeReset, BeforeResolve, BeforeApply, AfterApply and the Bind option](#hooks-beforereset-beforeresolve-beforeapply-afterapply-and-the-bind-option) +- [Flags](#flags) +- [Commands and sub-commands](#commands-and-sub-commands) +- [Branching positional arguments](#branching-positional-arguments) +- [Positional arguments](#positional-arguments) +- [Slices](#slices) +- [Maps](#maps) +- [Pointers](#pointers) +- [Nested data structure](#nested-data-structure) +- [Custom named decoders](#custom-named-decoders) +- [Supported field types](#supported-field-types) +- [Custom decoders mappers](#custom-decoders-mappers) +- [Supported tags](#supported-tags) +- [Plugins](#plugins) +- [Dynamic Commands](#dynamic-commands) +- [Variable interpolation](#variable-interpolation) +- [Validation](#validation) +- [Modifying Kong's behaviour](#modifying-kongs-behaviour) + - [Namehelp and Descriptionhelp - set the application name description](#namehelp-and-descriptionhelp---set-the-application-name-description) + - [Configurationloader, paths... - load defaults from configuration files](#configurationloader-paths---load-defaults-from-configuration-files) + - [Resolver... - support for default values from external sources](#resolver---support-for-default-values-from-external-sources) + - [\*Mapper... - customising how the command-line is mapped to Go values](#mapper---customising-how-the-command-line-is-mapped-to-go-values) + - [ConfigureHelpHelpOptions and HelpHelpFunc - customising help](#configurehelphelpoptions-and-helphelpfunc---customising-help) + - [Bind... - bind values for callback hooks and Run methods](#bind---bind-values-for-callback-hooks-and-run-methods) + - [Other options](#other-options) + + + +## Introduction + +Kong aims to support arbitrarily complex command-line structures with as little developer effort as possible. + +To achieve that, command-lines are expressed as Go types, with the structure and tags directing how the command line is mapped onto the struct. + +For example, the following command-line: + + shell rm [-f] [-r] ... + shell ls [ ...] + +Can be represented by the following command-line structure: + +```go +package main + +import "github.com/alecthomas/kong" + +var CLI struct { + Rm struct { + Force bool `help:"Force removal."` + Recursive bool `help:"Recursively remove files."` + + Paths []string `arg:"" name:"path" help:"Paths to remove." type:"path"` + } `cmd:"" help:"Remove files."` + + Ls struct { + Paths []string `arg:"" optional:"" name:"path" help:"Paths to list." type:"path"` + } `cmd:"" help:"List paths."` +} + +func main() { + ctx := kong.Parse(&CLI) + switch ctx.Command() { + case "rm ": + case "ls": + default: + panic(ctx.Command()) + } +} +``` + +## Help + +### Help as a user of a Kong application + +Every Kong application includes a `--help` flag that will display auto-generated help. + +eg. + + $ shell --help + usage: shell + + A shell-like example app. + + Flags: + --help Show context-sensitive help. + --debug Debug mode. + + Commands: + rm ... + Remove files. + + ls [ ...] + List paths. + +If a command is provided, the help will show full detail on the command including all available flags. + +eg. + + $ shell --help rm + usage: shell rm ... + + Remove files. + + Arguments: + ... Paths to remove. + + Flags: + --debug Debug mode. + + -f, --force Force removal. + -r, --recursive Recursively remove files. + +### Defining help in Kong + +Help is automatically generated from the command-line structure itself, +including `help:""` and other tags. [Variables](#variable-interpolation) will +also be interpolated into the help string. + +Finally, any command, or argument type implementing the interface +`Help() string` will have this function called to retrieve more detail to +augment the help tag. This allows for much more descriptive text than can +fit in Go tags. [See \_examples/shell/help](./_examples/shell/help) + +#### Showing the _command_'s detailed help + +A command's additional help text is _not_ shown from top-level help, but can be displayed within contextual help: + +**Top level help** + +```bash + $ go run ./_examples/shell/help --help +Usage: help + +An app demonstrating HelpProviders + +Flags: + -h, --help Show context-sensitive help. + --flag Regular flag help + +Commands: + echo Regular command help +``` + +**Contextual** + +```bash + $ go run ./_examples/shell/help echo --help +Usage: help echo + +Regular command help + +🚀 additional command help + +Arguments: + Regular argument help + +Flags: + -h, --help Show context-sensitive help. + --flag Regular flag help +``` + +#### Showing an _argument_'s detailed help + +Custom help will only be shown for _positional arguments with named fields_ ([see the README section on positional arguments for more details on what that means](../../../README.md#branching-positional-arguments)) + +**Contextual argument help** + +```bash + $ go run ./_examples/shell/help msg --help +Usage: help echo + +Regular argument help + +📣 additional argument help + +Flags: + -h, --help Show context-sensitive help. + --flag Regular flag help +``` + +## Command handling + +There are two ways to handle commands in Kong. + +### Switch on the command string + +When you call `kong.Parse()` it will return a unique string representation of the command. Each command branch in the hierarchy will be a bare word and each branching argument or required positional argument will be the name surrounded by angle brackets. Here's an example: + +There's an example of this pattern [here](https://github.com/alecthomas/kong/blob/master/_examples/shell/commandstring/main.go). + +eg. + +```go +package main + +import "github.com/alecthomas/kong" + +var CLI struct { + Rm struct { + Force bool `help:"Force removal."` + Recursive bool `help:"Recursively remove files."` + + Paths []string `arg:"" name:"path" help:"Paths to remove." type:"path"` + } `cmd:"" help:"Remove files."` + + Ls struct { + Paths []string `arg:"" optional:"" name:"path" help:"Paths to list." type:"path"` + } `cmd:"" help:"List paths."` +} + +func main() { + ctx := kong.Parse(&CLI) + switch ctx.Command() { + case "rm ": + case "ls": + default: + panic(ctx.Command()) + } +} +``` + +This has the advantage that it is convenient, but the downside that if you modify your CLI structure, the strings may change. This can be fragile. + +### Attach a `Run(...) error` method to each command + +A more robust approach is to break each command out into their own structs: + +1. Break leaf commands out into separate structs. +2. Attach a `Run(...) error` method to all leaf commands. +3. Call `kong.Kong.Parse()` to obtain a `kong.Context`. +4. Call `kong.Context.Run(bindings...)` to call the selected parsed command. + +Once a command node is selected by Kong it will search from that node back to the root. Each +encountered command node with a `Run(...) error` will be called in reverse order. This allows +sub-trees to be re-used fairly conveniently. + +In addition to values bound with the `kong.Bind(...)` option, any values +passed through to `kong.Context.Run(...)` are also bindable to the target's +`Run()` arguments. + +Finally, hooks can also contribute bindings via `kong.Context.Bind()` and `kong.Context.BindTo()`. + +There's a full example emulating part of the Docker CLI [here](https://github.com/alecthomas/kong/tree/master/_examples/docker). + +eg. + +```go +type Context struct { + Debug bool +} + +type RmCmd struct { + Force bool `help:"Force removal."` + Recursive bool `help:"Recursively remove files."` + + Paths []string `arg:"" name:"path" help:"Paths to remove." type:"path"` +} + +func (r *RmCmd) Run(ctx *Context) error { + fmt.Println("rm", r.Paths) + return nil +} + +type LsCmd struct { + Paths []string `arg:"" optional:"" name:"path" help:"Paths to list." type:"path"` +} + +func (l *LsCmd) Run(ctx *Context) error { + fmt.Println("ls", l.Paths) + return nil +} + +var cli struct { + Debug bool `help:"Enable debug mode."` + + Rm RmCmd `cmd:"" help:"Remove files."` + Ls LsCmd `cmd:"" help:"List paths."` +} + +func main() { + ctx := kong.Parse(&cli) + // Call the Run() method of the selected parsed command. + err := ctx.Run(&Context{Debug: cli.Debug}) + ctx.FatalIfErrorf(err) +} + +``` + +## Hooks: BeforeReset(), BeforeResolve(), BeforeApply(), AfterApply() and the Bind() option + +If a node in the grammar has a `BeforeReset(...)`, `BeforeResolve +(...)`, `BeforeApply(...) error` and/or `AfterApply(...) error` method, those +methods will be called before values are reset, before validation/assignment, +and after validation/assignment, respectively. + +The `--help` flag is implemented with a `BeforeReset` hook. + +Arguments to hooks are provided via the `Run(...)` method or `Bind(...)` option. `*Kong`, `*Context` and `*Path` are also bound and finally, hooks can also contribute bindings via `kong.Context.Bind()` and `kong.Context.BindTo()`. + +eg. + +```go +// A flag with a hook that, if triggered, will set the debug loggers output to stdout. +type debugFlag bool + +func (d debugFlag) BeforeApply(logger *log.Logger) error { + logger.SetOutput(os.Stdout) + return nil +} + +var cli struct { + Debug debugFlag `help:"Enable debug logging."` +} + +func main() { + // Debug logger going to discard. + logger := log.New(ioutil.Discard, "", log.LstdFlags) + + ctx := kong.Parse(&cli, kong.Bind(logger)) + + // ... +} +``` + +Another example of using hooks is load the env-file: + +```go +package main + +import ( + "fmt" + "github.com/alecthomas/kong" + "github.com/joho/godotenv" +) + +type EnvFlag string + +// BeforeResolve loads env file. +func (c EnvFlag) BeforeReset(ctx *kong.Context, trace *kong.Path) error { + path := string(ctx.FlagValue(trace.Flag).(EnvFlag)) // nolint + path = kong.ExpandPath(path) + if err := godotenv.Load(path); err != nil { + return err + } + return nil +} + +var CLI struct { + EnvFile EnvFlag + Flag `env:"FLAG"` +} + +func main() { + _ = kong.Parse(&CLI) + fmt.Println(CLI.Flag) +} +``` + +## Flags + +Any [mapped](#mapper---customising-how-the-command-line-is-mapped-to-go-values) field in the command structure _not_ tagged with `cmd` or `arg` will be a flag. Flags are optional by default. + +eg. The command-line `app [--flag="foo"]` can be represented by the following. + +```go +type CLI struct { + Flag string +} +``` + +## Commands and sub-commands + +Sub-commands are specified by tagging a struct field with `cmd`. Kong supports arbitrarily nested commands. + +eg. The following struct represents the CLI structure `command [--flag="str"] sub-command`. + +```go +type CLI struct { + Command struct { + Flag string + + SubCommand struct { + } `cmd` + } `cmd` +} +``` + +If a sub-command is tagged with `default:"1"` it will be selected if there are no further arguments. If a sub-command is tagged with `default:"withargs"` it will be selected even if there are further arguments or flags and those arguments or flags are valid for the sub-command. This allows the user to omit the sub-command name on the CLI if its arguments/flags are not ambiguous with the sibling commands or flags. + +## Branching positional arguments + +In addition to sub-commands, structs can also be configured as branching positional arguments. + +This is achieved by tagging an [unmapped](#mapper---customising-how-the-command-line-is-mapped-to-go-values) nested struct field with `arg`, then including a positional argument field inside that struct _with the same name_. For example, the following command structure: + + app rename to + +Can be represented with the following: + +```go +var CLI struct { + Rename struct { + Name struct { + Name string `arg` // <-- NOTE: identical name to enclosing struct field. + To struct { + Name struct { + Name string `arg` + } `arg` + } `cmd` + } `arg` + } `cmd` +} +``` + +This looks a little verbose in this contrived example, but typically this will not be the case. + +## Positional arguments + +If a field is tagged with `arg:""` it will be treated as the final positional +value to be parsed on the command line. By default positional arguments are +required, but specifying `optional:""` will alter this. + +If a positional argument is a slice, all remaining arguments will be appended +to that slice. + +## Slices + +Slice values are treated specially. First the input is split on the `sep:""` tag (defaults to `,`), then each element is parsed by the slice element type and appended to the slice. If the same value is encountered multiple times, elements continue to be appended. + +To represent the following command-line: + + cmd ls ... + +You would use the following: + +```go +var CLI struct { + Ls struct { + Files []string `arg:"" type:"existingfile"` + } `cmd` +} +``` + +## Maps + +Maps are similar to slices except that only one key/value pair can be assigned per value, and the `sep` tag denotes the assignment character and defaults to `=`. + +To represent the following command-line: + + cmd config set = = ... + +You would use the following: + +```go +var CLI struct { + Config struct { + Set struct { + Config map[string]float64 `arg:"" type:"file:"` + } `cmd` + } `cmd` +} +``` + +For flags, multiple key+value pairs should be separated by `mapsep:"rune"` tag (defaults to `;`) eg. `--set="key1=value1;key2=value2"`. + +## Pointers + +Pointers work like the underlying type, except that you can differentiate between the presence of the zero value and no value being supplied. + +For example: + +```go +var CLI struct { + Foo *int +} +``` + +Would produce a nil value for `Foo` if no `--foo` argument is supplied, but would have a pointer to the value 0 if the argument `--foo=0` was supplied. + +## Nested data structure + +Kong support a nested data structure as well with `embed:""`. You can combine `embed:""` with `prefix:""`: + +```go +var CLI struct { + Logging struct { + Level string `enum:"debug,info,warn,error" default:"info"` + Type string `enum:"json,console" default:"console"` + } `embed:"" prefix:"logging."` +} +``` + +This configures Kong to accept flags `--logging.level` and `--logging.type`. + +## Custom named decoders + +Kong includes a number of builtin custom type mappers. These can be used by +specifying the tag `type:""`. They are registered with the option +function `NamedMapper(name, mapper)`. + +| Name | Description | +| -------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `path` | A path. ~ expansion is applied. `-` is accepted for stdout, and will be passed unaltered. | +| `existingfile` | An existing file. ~ expansion is applied. `-` is accepted for stdin, and will be passed unaltered. | +| `existingdir` | An existing directory. ~ expansion is applied. | +| `counter` | Increment a numeric field. Useful for `-vvv`. Can accept `-s`, `--long` or `--long=N`. | +| `filecontent` | Read the file at path into the field. ~ expansion is applied. `-` is accepted for stdin, and will be passed unaltered. | + +Slices and maps treat type tags specially. For slices, the `type:""` tag +specifies the element type. For maps, the tag has the format +`tag:"[]:[]"` where either may be omitted. + +## Supported field types + +## Custom decoders (mappers) + +Any field implementing `encoding.TextUnmarshaler` or `json.Unmarshaler` will use those interfaces +for decoding values. Kong also includes builtin support for many common Go types: + +| Type | Description | +| --------------- | ----------------------------------------------------------------------------------------------------------- | +| `time.Duration` | Populated using `time.ParseDuration()`. | +| `time.Time` | Populated using `time.Parse()`. Format defaults to RFC3339 but can be overridden with the `format:"X"` tag. | +| `*os.File` | Path to a file that will be opened, or `-` for `os.Stdin`. File must be closed by the user. | +| `*url.URL` | Populated with `url.Parse()`. | + +For more fine-grained control, if a field implements the +[MapperValue](https://godoc.org/github.com/alecthomas/kong#MapperValue) +interface it will be used to decode arguments into the field. + +## Supported tags + +Tags can be in two forms: + +1. Standard Go syntax, eg. `kong:"required,name='foo'"`. +2. Bare tags, eg. `required:"" name:"foo"` + +Both can coexist with standard Tag parsing. + +| Tag | Description | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `cmd:""` | If present, struct is a command. | +| `arg:""` | If present, field is an argument. Required by default. | +| `env:"X,Y,..."` | Specify envars to use for default value. The envs are resolved in the declared order. The first value found is used. | +| `name:"X"` | Long name, for overriding field name. | +| `help:"X"` | Help text. | +| `type:"X"` | Specify [named types](#custom-named-decoders) to use. | +| `placeholder:"X"` | Placeholder text. | +| `default:"X"` | Default value. | +| `default:"1"` | On a command, make it the default. | +| `default:"withargs"` | On a command, make it the default and allow args/flags from that command | +| `short:"X"` | Short name, if flag. | +| `aliases:"X,Y"` | One or more aliases (for cmd). | +| `required:""` | If present, flag/arg is required. | +| `optional:""` | If present, flag/arg is optional. | +| `hidden:""` | If present, command or flag is hidden. | +| `negatable:""` | If present on a `bool` field, supports prefixing a flag with `--no-` to invert the default value | +| `format:"X"` | Format for parsing input, if supported. | +| `sep:"X"` | Separator for sequences (defaults to ","). May be `none` to disable splitting. | +| `mapsep:"X"` | Separator for maps (defaults to ";"). May be `none` to disable splitting. | +| `enum:"X,Y,..."` | Set of valid values allowed for this flag. An enum field must be `required` or have a valid `default`. | +| `group:"X"` | Logical group for a flag or command. | +| `xor:"X,Y,..."` | Exclusive OR groups for flags. Only one flag in the group can be used which is restricted within the same command. When combined with `required`, at least one of the `xor` group will be required. | +| `prefix:"X"` | Prefix for all sub-flags. | +| `envprefix:"X"` | Envar prefix for all sub-flags. | +| `set:"K=V"` | Set a variable for expansion by child elements. Multiples can occur. | +| `embed:""` | If present, this field's children will be embedded in the parent. Useful for composition. | +| `passthrough:""` | If present on a positional argument, it stops flag parsing when encountered, as if `--` was processed before. Useful for external command wrappers, like `exec`. On a command it requires that the command contains only one argument of type `[]string` which is then filled with everything following the command, unparsed. | +| `-` | Ignore the field. Useful for adding non-CLI fields to a configuration struct. e.g `` `kong:"-"` `` | + +## Plugins + +Kong CLI's can be extended by embedding the `kong.Plugin` type and populating it with pointers to Kong annotated structs. For example: + +```go +var pluginOne struct { + PluginOneFlag string +} +var pluginTwo struct { + PluginTwoFlag string +} +var cli struct { + BaseFlag string + kong.Plugins +} +cli.Plugins = kong.Plugins{&pluginOne, &pluginTwo} +``` + +Additionally if an interface type is embedded, it can also be populated with a Kong annotated struct. + +## Dynamic Commands + +While plugins give complete control over extending command-line interfaces, Kong +also supports dynamically adding commands via `kong.DynamicCommand()`. + +## Variable interpolation + +Kong supports limited variable interpolation into help strings, enum lists and +default values. + +Variables are in the form: + + ${} + ${=} + +Variables are set with the `Vars{"key": "value", ...}` option. Undefined +variable references in the grammar without a default will result in an error at +construction time. + +Variables can also be set via the `set:"K=V"` tag. In this case, those variables will be available for that +node and all children. This is useful for composition by allowing the same struct to be reused. + +When interpolating into flag or argument help strings, some extra variables +are defined from the value itself: + + ${default} + ${enum} + +For flags with associated environment variables, the variable `${env}` can be +interpolated into the help string. In the absence of this variable in the +help string, Kong will append `($$${env})` to the help string. + +eg. + +```go +type cli struct { + Config string `type:"path" default:"${config_file}"` +} + +func main() { + kong.Parse(&cli, + kong.Vars{ + "config_file": "~/.app.conf", + }) +} +``` + +## Validation + +Kong does validation on the structure of a command-line, but also supports +extensible validation. Any node in the tree may implement the following +interface: + +```go +type Validatable interface { + Validate() error + } +``` + +If one of these nodes is in the active command-line it will be called during +normal validation. + +## Modifying Kong's behaviour + +Each Kong parser can be configured via functional options passed to `New(cli interface{}, options...Option)`. + +The full set of options can be found [here](https://godoc.org/github.com/alecthomas/kong#Option). + +### `Name(help)` and `Description(help)` - set the application name description + +Set the application name and/or description. + +The name of the application will default to the binary name, but can be overridden with `Name(name)`. + +As with all help in Kong, text will be wrapped to the terminal. + +### `Configuration(loader, paths...)` - load defaults from configuration files + +This option provides Kong with support for loading defaults from a set of configuration files. Each file is opened, if possible, and the loader called to create a resolver for that file. + +eg. + +```go +kong.Parse(&cli, kong.Configuration(kong.JSON, "/etc/myapp.json", "~/.myapp.json")) +``` + +[See the tests](https://github.com/alecthomas/kong/blob/master/resolver_test.go#L206) for an example of how the JSON file is structured. + +#### List of Configuration Loaders + +- [YAML](https://github.com/alecthomas/kong-yaml) +- [HCL](https://github.com/alecthomas/kong-hcl) +- [TOML](https://github.com/alecthomas/kong-toml) +- [JSON](https://github.com/alecthomas/kong) + +### `Resolver(...)` - support for default values from external sources + +Resolvers are Kong's extension point for providing default values from external sources. As an example, support for environment variables via the `env` tag is provided by a resolver. There's also a builtin resolver for JSON configuration files. + +Example resolvers can be found in [resolver.go](https://github.com/alecthomas/kong/blob/master/resolver.go). + +### `*Mapper(...)` - customising how the command-line is mapped to Go values + +Command-line arguments are mapped to Go values via the Mapper interface: + +```go +// A Mapper represents how a field is mapped from command-line values to Go. +// +// Mappers can be associated with concrete fields via pointer, reflect.Type, reflect.Kind, or via a "type" tag. +// +// Additionally, if a type implements the MapperValue interface, it will be used. +type Mapper interface { + // Decode ctx.Value with ctx.Scanner into target. + Decode(ctx *DecodeContext, target reflect.Value) error +} +``` + +All builtin Go types (as well as a bunch of useful stdlib types like `time.Time`) have mappers registered by default. Mappers for custom types can be added using `kong.??Mapper(...)` options. Mappers are applied to fields in four ways: + +1. `NamedMapper(string, Mapper)` and using the tag key `type:""`. +2. `KindMapper(reflect.Kind, Mapper)`. +3. `TypeMapper(reflect.Type, Mapper)`. +4. `ValueMapper(interface{}, Mapper)`, passing in a pointer to a field of the grammar. + +### `ConfigureHelp(HelpOptions)` and `Help(HelpFunc)` - customising help + +The default help output is usually sufficient, but if not there are two solutions. + +1. Use `ConfigureHelp(HelpOptions)` to configure how help is formatted (see [HelpOptions](https://godoc.org/github.com/alecthomas/kong#HelpOptions) for details). +2. Custom help can be wired into Kong via the `Help(HelpFunc)` option. The `HelpFunc` is passed a `Context`, which contains the parsed context for the current command-line. See the implementation of `PrintHelp` for an example. +3. Use `ValueFormatter(HelpValueFormatter)` if you want to just customize the help text that is accompanied by flags and arguments. +4. Use `Groups([]Group)` if you want to customize group titles or add a header. + +### `Bind(...)` - bind values for callback hooks and Run() methods + +See the [section on hooks](#hooks-beforeresolve-beforeapply-afterapply-and-the-bind-option) for details. + +### Other options + +The full set of options can be found [here](https://godoc.org/github.com/alecthomas/kong#Option). diff --git a/vendor/github.com/alecthomas/kong/build.go b/vendor/github.com/alecthomas/kong/build.go new file mode 100644 index 0000000..e23c115 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/build.go @@ -0,0 +1,352 @@ +package kong + +import ( + "fmt" + "reflect" + "strings" +) + +// Plugins are dynamically embedded command-line structures. +// +// Each element in the Plugins list *must* be a pointer to a structure. +type Plugins []interface{} + +func build(k *Kong, ast interface{}) (app *Application, err error) { + v := reflect.ValueOf(ast) + iv := reflect.Indirect(v) + if v.Kind() != reflect.Ptr || iv.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected a pointer to a struct but got %T", ast) + } + + app = &Application{} + extraFlags := k.extraFlags() + seenFlags := map[string]bool{} + for _, flag := range extraFlags { + seenFlags[flag.Name] = true + } + + node, err := buildNode(k, iv, ApplicationNode, newEmptyTag(), seenFlags) + if err != nil { + return nil, err + } + if len(node.Positional) > 0 && len(node.Children) > 0 { + return nil, fmt.Errorf("can't mix positional arguments and branching arguments on %T", ast) + } + app.Node = node + app.Node.Flags = append(extraFlags, app.Node.Flags...) + app.Tag = newEmptyTag() + app.Tag.Vars = k.vars + return app, nil +} + +func dashedString(s string) string { + return strings.Join(camelCase(s), "-") +} + +type flattenedField struct { + field reflect.StructField + value reflect.Value + tag *Tag +} + +func flattenedFields(v reflect.Value, ptag *Tag) (out []flattenedField, err error) { + v = reflect.Indirect(v) + for i := 0; i < v.NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + tag, err := parseTag(v, ft) + if err != nil { + return nil, err + } + if tag.Ignored { + continue + } + // Assign group if it's not already set. + if tag.Group == "" { + tag.Group = ptag.Group + } + // Accumulate prefixes. + tag.Prefix = ptag.Prefix + tag.Prefix + tag.EnvPrefix = ptag.EnvPrefix + tag.EnvPrefix + // Combine parent vars. + tag.Vars = ptag.Vars.CloneWith(tag.Vars) + // Command and embedded structs can be pointers, so we hydrate them now. + if (tag.Cmd || tag.Embed) && ft.Type.Kind() == reflect.Ptr { + fv = reflect.New(ft.Type.Elem()).Elem() + v.FieldByIndex(ft.Index).Set(fv.Addr()) + } + if !ft.Anonymous && !tag.Embed { + if fv.CanSet() { + field := flattenedField{field: ft, value: fv, tag: tag} + out = append(out, field) + } + continue + } + + // Embedded type. + if fv.Kind() == reflect.Interface { + fv = fv.Elem() + } else if fv.Type() == reflect.TypeOf(Plugins{}) { + for i := 0; i < fv.Len(); i++ { + fields, ferr := flattenedFields(fv.Index(i).Elem(), tag) + if ferr != nil { + return nil, ferr + } + out = append(out, fields...) + } + continue + } + sub, err := flattenedFields(fv, tag) + if err != nil { + return nil, err + } + out = append(out, sub...) + } + return out, nil +} + +// Build a Node in the Kong data model. +// +// "v" is the value to create the node from, "typ" is the output Node type. +func buildNode(k *Kong, v reflect.Value, typ NodeType, tag *Tag, seenFlags map[string]bool) (*Node, error) { + node := &Node{ + Type: typ, + Target: v, + Tag: tag, + } + fields, err := flattenedFields(v, tag) + if err != nil { + return nil, err + } + +MAIN: + for _, field := range fields { + for _, r := range k.ignoreFields { + if r.MatchString(v.Type().Name() + "." + field.field.Name) { + continue MAIN + } + } + + ft := field.field + fv := field.value + + tag := field.tag + name := tag.Name + if name == "" { + name = tag.Prefix + k.flagNamer(ft.Name) + } else { + name = tag.Prefix + name + } + + if len(tag.Envs) != 0 { + for i := range tag.Envs { + tag.Envs[i] = tag.EnvPrefix + tag.Envs[i] + } + } + + // Nested structs are either commands or args, unless they implement the Mapper interface. + if field.value.Kind() == reflect.Struct && (tag.Cmd || tag.Arg) && k.registry.ForValue(fv) == nil { + typ := CommandNode + if tag.Arg { + typ = ArgumentNode + } + err = buildChild(k, node, typ, v, ft, fv, tag, name, seenFlags) + } else { + err = buildField(k, node, v, ft, fv, tag, name, seenFlags) + } + if err != nil { + return nil, err + } + } + + // Validate if there are no duplicate names + if err := checkDuplicateNames(node, v); err != nil { + return nil, err + } + + // "Unsee" flags. + for _, flag := range node.Flags { + delete(seenFlags, "--"+flag.Name) + if flag.Short != 0 { + delete(seenFlags, "-"+string(flag.Short)) + } + } + + if err := validatePositionalArguments(node); err != nil { + return nil, err + } + + return node, nil +} + +func validatePositionalArguments(node *Node) error { + var last *Value + for i, curr := range node.Positional { + if last != nil { + // Scan through argument positionals to ensure optional is never before a required. + if !last.Required && curr.Required { + return fmt.Errorf("%s: required %q cannot come after optional %q", node.FullPath(), curr.Name, last.Name) + } + + // Cumulative argument needs to be last. + if last.IsCumulative() { + return fmt.Errorf("%s: argument %q cannot come after cumulative %q", node.FullPath(), curr.Name, last.Name) + } + } + + last = curr + curr.Position = i + } + + return nil +} + +func buildChild(k *Kong, node *Node, typ NodeType, v reflect.Value, ft reflect.StructField, fv reflect.Value, tag *Tag, name string, seenFlags map[string]bool) error { + child, err := buildNode(k, fv, typ, newEmptyTag(), seenFlags) + if err != nil { + return err + } + child.Name = name + child.Tag = tag + child.Parent = node + child.Help = tag.Help + child.Hidden = tag.Hidden + child.Group = buildGroupForKey(k, tag.Group) + child.Aliases = tag.Aliases + + if provider, ok := fv.Addr().Interface().(HelpProvider); ok { + child.Detail = provider.Help() + } + + // A branching argument. This is a bit hairy, as we let buildNode() do the parsing, then check that + // a positional argument is provided to the child, and move it to the branching argument field. + if tag.Arg { + if len(child.Positional) == 0 { + return failField(v, ft, "positional branch must have at least one child positional argument named %q", name) + } + if child.Positional[0].Name != name { + return failField(v, ft, "first field in positional branch must have the same name as the parent field (%s).", child.Name) + } + + child.Argument = child.Positional[0] + child.Positional = child.Positional[1:] + if child.Help == "" { + child.Help = child.Argument.Help + } + } else { + if tag.HasDefault { + if node.DefaultCmd != nil { + return failField(v, ft, "can't have more than one default command under %s", node.Summary()) + } + if tag.Default != "withargs" && (len(child.Children) > 0 || len(child.Positional) > 0) { + return failField(v, ft, "default command %s must not have subcommands or arguments", child.Summary()) + } + node.DefaultCmd = child + } + if tag.Passthrough { + if len(child.Children) > 0 || len(child.Flags) > 0 { + return failField(v, ft, "passthrough command %s must not have subcommands or flags", child.Summary()) + } + if len(child.Positional) != 1 { + return failField(v, ft, "passthrough command %s must contain exactly one positional argument", child.Summary()) + } + if !checkPassthroughArg(child.Positional[0].Target) { + return failField(v, ft, "passthrough command %s must contain exactly one positional argument of []string type", child.Summary()) + } + child.Passthrough = true + } + } + node.Children = append(node.Children, child) + + if len(child.Positional) > 0 && len(child.Children) > 0 { + return failField(v, ft, "can't mix positional arguments and branching arguments") + } + + return nil +} + +func buildField(k *Kong, node *Node, v reflect.Value, ft reflect.StructField, fv reflect.Value, tag *Tag, name string, seenFlags map[string]bool) error { + mapper := k.registry.ForNamedValue(tag.Type, fv) + if mapper == nil { + return failField(v, ft, "unsupported field type %s, perhaps missing a cmd:\"\" tag?", ft.Type) + } + + value := &Value{ + Name: name, + Help: tag.Help, + OrigHelp: tag.Help, + HasDefault: tag.HasDefault, + Default: tag.Default, + DefaultValue: reflect.New(fv.Type()).Elem(), + Mapper: mapper, + Tag: tag, + Target: fv, + Enum: tag.Enum, + Passthrough: tag.Passthrough, + + // Flags are optional by default, and args are required by default. + Required: (!tag.Arg && tag.Required) || (tag.Arg && !tag.Optional), + Format: tag.Format, + } + + if tag.Arg { + node.Positional = append(node.Positional, value) + } else { + if seenFlags["--"+value.Name] { + return failField(v, ft, "duplicate flag --%s", value.Name) + } + seenFlags["--"+value.Name] = true + if tag.Short != 0 { + if seenFlags["-"+string(tag.Short)] { + return failField(v, ft, "duplicate short flag -%c", tag.Short) + } + seenFlags["-"+string(tag.Short)] = true + } + flag := &Flag{ + Value: value, + Short: tag.Short, + PlaceHolder: tag.PlaceHolder, + Envs: tag.Envs, + Group: buildGroupForKey(k, tag.Group), + Xor: tag.Xor, + Hidden: tag.Hidden, + } + value.Flag = flag + node.Flags = append(node.Flags, flag) + } + return nil +} + +func buildGroupForKey(k *Kong, key string) *Group { + if key == "" { + return nil + } + for _, group := range k.groups { + if group.Key == key { + return &group + } + } + + // No group provided with kong.ExplicitGroups. We create one ad-hoc for this key. + return &Group{ + Key: key, + Title: key, + } +} + +func checkDuplicateNames(node *Node, v reflect.Value) error { + seenNames := make(map[string]struct{}) + for _, node := range node.Children { + if _, ok := seenNames[node.Name]; ok { + name := v.Type().Name() + if name == "" { + name = "" + } + return fmt.Errorf("duplicate command name %q in command %q", node.Name, name) + } + + seenNames[node.Name] = struct{}{} + } + + return nil +} diff --git a/vendor/github.com/alecthomas/kong/callbacks.go b/vendor/github.com/alecthomas/kong/callbacks.go new file mode 100644 index 0000000..8771a3e --- /dev/null +++ b/vendor/github.com/alecthomas/kong/callbacks.go @@ -0,0 +1,129 @@ +package kong + +import ( + "fmt" + "reflect" + "strings" +) + +type bindings map[reflect.Type]func() (reflect.Value, error) + +func (b bindings) String() string { + out := []string{} + for k := range b { + out = append(out, k.String()) + } + return "bindings{" + strings.Join(out, ", ") + "}" +} + +func (b bindings) add(values ...interface{}) bindings { + for _, v := range values { + v := v + b[reflect.TypeOf(v)] = func() (reflect.Value, error) { return reflect.ValueOf(v), nil } + } + return b +} + +func (b bindings) addTo(impl, iface interface{}) { + valueOf := reflect.ValueOf(impl) + b[reflect.TypeOf(iface).Elem()] = func() (reflect.Value, error) { return valueOf, nil } +} + +func (b bindings) addProvider(provider interface{}) error { + pv := reflect.ValueOf(provider) + t := pv.Type() + if t.Kind() != reflect.Func || t.NumIn() != 0 || t.NumOut() != 2 || t.Out(1) != reflect.TypeOf((*error)(nil)).Elem() { + return fmt.Errorf("%T must be a function with the signature func()(T, error)", provider) + } + rt := pv.Type().Out(0) + b[rt] = func() (reflect.Value, error) { + out := pv.Call(nil) + errv := out[1] + var err error + if !errv.IsNil() { + err = errv.Interface().(error) // nolint + } + return out[0], err + } + return nil +} + +// Clone and add values. +func (b bindings) clone() bindings { + out := make(bindings, len(b)) + for k, v := range b { + out[k] = v + } + return out +} + +func (b bindings) merge(other bindings) bindings { + for k, v := range other { + b[k] = v + } + return b +} + +func getMethod(value reflect.Value, name string) reflect.Value { + method := value.MethodByName(name) + if !method.IsValid() { + if value.CanAddr() { + method = value.Addr().MethodByName(name) + } + } + return method +} + +func callFunction(f reflect.Value, bindings bindings) error { + if f.Kind() != reflect.Func { + return fmt.Errorf("expected function, got %s", f.Type()) + } + in := []reflect.Value{} + t := f.Type() + if t.NumOut() != 1 || !t.Out(0).Implements(callbackReturnSignature) { + return fmt.Errorf("return value of %s must implement \"error\"", t) + } + for i := 0; i < t.NumIn(); i++ { + pt := t.In(i) + if argf, ok := bindings[pt]; ok { + argv, err := argf() + if err != nil { + return err + } + in = append(in, argv) + } else { + return fmt.Errorf("couldn't find binding of type %s for parameter %d of %s(), use kong.Bind(%s)", pt, i, t, pt) + } + } + out := f.Call(in) + if out[0].IsNil() { + return nil + } + return out[0].Interface().(error) // nolint +} + +func callAnyFunction(f reflect.Value, bindings bindings) (out []any, err error) { + if f.Kind() != reflect.Func { + return nil, fmt.Errorf("expected function, got %s", f.Type()) + } + in := []reflect.Value{} + t := f.Type() + for i := 0; i < t.NumIn(); i++ { + pt := t.In(i) + if argf, ok := bindings[pt]; ok { + argv, err := argf() + if err != nil { + return nil, err + } + in = append(in, argv) + } else { + return nil, fmt.Errorf("couldn't find binding of type %s for parameter %d of %s(), use kong.Bind(%s)", pt, i, t, pt) + } + } + outv := f.Call(in) + out = make([]any, len(outv)) + for i, v := range outv { + out[i] = v.Interface() + } + return out, nil +} diff --git a/vendor/github.com/alecthomas/kong/camelcase.go b/vendor/github.com/alecthomas/kong/camelcase.go new file mode 100644 index 0000000..acf29f7 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/camelcase.go @@ -0,0 +1,90 @@ +package kong + +// NOTE: This code is from https://github.com/fatih/camelcase. MIT license. + +import ( + "unicode" + "unicode/utf8" +) + +// Split splits the camelcase word and returns a list of words. It also +// supports digits. Both lower camel case and upper camel case are supported. +// For more info please check: http://en.wikipedia.org/wiki/CamelCase +// +// Examples +// +// "" => [""] +// "lowercase" => ["lowercase"] +// "Class" => ["Class"] +// "MyClass" => ["My", "Class"] +// "MyC" => ["My", "C"] +// "HTML" => ["HTML"] +// "PDFLoader" => ["PDF", "Loader"] +// "AString" => ["A", "String"] +// "SimpleXMLParser" => ["Simple", "XML", "Parser"] +// "vimRPCPlugin" => ["vim", "RPC", "Plugin"] +// "GL11Version" => ["GL", "11", "Version"] +// "99Bottles" => ["99", "Bottles"] +// "May5" => ["May", "5"] +// "BFG9000" => ["BFG", "9000"] +// "BöseÜberraschung" => ["Böse", "Überraschung"] +// "Two spaces" => ["Two", " ", "spaces"] +// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] +// +// Splitting rules +// +// 1) If string is not valid UTF-8, return it without splitting as +// single item array. +// 2) Assign all unicode characters into one of 4 sets: lower case +// letters, upper case letters, numbers, and all other characters. +// 3) Iterate through characters of string, introducing splits +// between adjacent characters that belong to different sets. +// 4) Iterate through array of split strings, and if a given string +// is upper case: +// if subsequent string is lower case: +// move last character of upper case string to beginning of +// lower case string +func camelCase(src string) (entries []string) { + // don't split invalid utf8 + if !utf8.ValidString(src) { + return []string{src} + } + entries = []string{} + var runes [][]rune + lastClass := 0 + // split into fields based on class of unicode character + for _, r := range src { + var class int + switch { + case unicode.IsLower(r): + class = 1 + case unicode.IsUpper(r): + class = 2 + case unicode.IsDigit(r): + class = 3 + default: + class = 4 + } + if class == lastClass { + runes[len(runes)-1] = append(runes[len(runes)-1], r) + } else { + runes = append(runes, []rune{r}) + } + lastClass = class + } + // handle upper case -> lower case sequences, e.g. + // "PDFL", "oader" -> "PDF", "Loader" + for i := 0; i < len(runes)-1; i++ { + if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) { + runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...) + runes[i] = runes[i][:len(runes[i])-1] + } + } + // construct []string from results + for _, s := range runes { + if len(s) > 0 { + entries = append(entries, string(s)) + } + } + return entries +} diff --git a/vendor/github.com/alecthomas/kong/context.go b/vendor/github.com/alecthomas/kong/context.go new file mode 100644 index 0000000..0b41079 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/context.go @@ -0,0 +1,1010 @@ +package kong + +import ( + "errors" + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" +) + +// Path records the nodes and parsed values from the current command-line. +type Path struct { + Parent *Node + + // One of these will be non-nil. + App *Application + Positional *Positional + Flag *Flag + Argument *Argument + Command *Command + + // Flags added by this node. + Flags []*Flag + + // True if this Path element was created as the result of a resolver. + Resolved bool +} + +// Node returns the Node associated with this Path, or nil if Path is a non-Node. +func (p *Path) Node() *Node { + switch { + case p.App != nil: + return p.App.Node + + case p.Argument != nil: + return p.Argument + + case p.Command != nil: + return p.Command + } + return nil +} + +// Visitable returns the Visitable for this path element. +func (p *Path) Visitable() Visitable { + switch { + case p.App != nil: + return p.App + + case p.Argument != nil: + return p.Argument + + case p.Command != nil: + return p.Command + + case p.Flag != nil: + return p.Flag + + case p.Positional != nil: + return p.Positional + } + return nil +} + +// Context contains the current parse context. +type Context struct { + *Kong + // A trace through parsed nodes. + Path []*Path + // Original command-line arguments. + Args []string + // Error that occurred during trace, if any. + Error error + + values map[*Value]reflect.Value // Temporary values during tracing. + bindings bindings + resolvers []Resolver // Extra context-specific resolvers. + scan *Scanner +} + +// Trace path of "args" through the grammar tree. +// +// The returned Context will include a Path of all commands, arguments, positionals and flags. +// +// This just constructs a new trace. To fully apply the trace you must call Reset(), Resolve(), +// Validate() and Apply(). +func Trace(k *Kong, args []string) (*Context, error) { + c := &Context{ + Kong: k, + Args: args, + Path: []*Path{ + {App: k.Model, Flags: k.Model.Flags}, + }, + values: map[*Value]reflect.Value{}, + scan: Scan(args...), + bindings: bindings{}, + } + c.Error = c.trace(c.Model.Node) + return c, nil +} + +// Bind adds bindings to the Context. +func (c *Context) Bind(args ...interface{}) { + c.bindings.add(args...) +} + +// BindTo adds a binding to the Context. +// +// This will typically have to be called like so: +// +// BindTo(impl, (*MyInterface)(nil)) +func (c *Context) BindTo(impl, iface interface{}) { + c.bindings.addTo(impl, iface) +} + +// BindToProvider allows binding of provider functions. +// +// This is useful when the Run() function of different commands require different values that may +// not all be initialisable from the main() function. +func (c *Context) BindToProvider(provider interface{}) error { + return c.bindings.addProvider(provider) +} + +// Value returns the value for a particular path element. +func (c *Context) Value(path *Path) reflect.Value { + switch { + case path.Positional != nil: + return c.values[path.Positional] + case path.Flag != nil: + return c.values[path.Flag.Value] + case path.Argument != nil: + return c.values[path.Argument.Argument] + } + panic("can only retrieve value for flag, argument or positional") +} + +// Selected command or argument. +func (c *Context) Selected() *Node { + var selected *Node + for _, path := range c.Path { + switch { + case path.Command != nil: + selected = path.Command + case path.Argument != nil: + selected = path.Argument + } + } + return selected +} + +// Empty returns true if there were no arguments provided. +func (c *Context) Empty() bool { + for _, path := range c.Path { + if !path.Resolved && path.App == nil { + return false + } + } + return true +} + +// Validate the current context. +func (c *Context) Validate() error { // nolint: gocyclo + err := Visit(c.Model, func(node Visitable, next Next) error { + switch node := node.(type) { + case *Value: + ok := atLeastOneEnvSet(node.Tag.Envs) + if node.Enum != "" && (!node.Required || node.HasDefault || (len(node.Tag.Envs) != 0 && ok)) { + if err := checkEnum(node, node.Target); err != nil { + return err + } + } + + case *Flag: + ok := atLeastOneEnvSet(node.Tag.Envs) + if node.Enum != "" && (!node.Required || node.HasDefault || (len(node.Tag.Envs) != 0 && ok)) { + if err := checkEnum(node.Value, node.Target); err != nil { + return err + } + } + } + return next(nil) + }) + if err != nil { + return err + } + for _, el := range c.Path { + var ( + value reflect.Value + desc string + ) + switch node := el.Visitable().(type) { + case *Value: + value = node.Target + desc = node.ShortSummary() + + case *Flag: + value = node.Target + desc = node.ShortSummary() + + case *Application: + value = node.Target + desc = "" + + case *Node: + value = node.Target + desc = node.Path() + } + if validate := isValidatable(value); validate != nil { + if err := validate.Validate(); err != nil { + if desc != "" { + return fmt.Errorf("%s: %w", desc, err) + } + return err + } + } + } + for _, resolver := range c.combineResolvers() { + if err := resolver.Validate(c.Model); err != nil { + return err + } + } + for _, path := range c.Path { + var value *Value + switch { + case path.Flag != nil: + value = path.Flag.Value + + case path.Positional != nil: + value = path.Positional + } + if value != nil && value.Tag.Enum != "" { + if err := checkEnum(value, value.Target); err != nil { + return err + } + } + if err := checkMissingFlags(path.Flags); err != nil { + return err + } + } + // Check the terminal node. + node := c.Selected() + if node == nil { + node = c.Model.Node + } + + // Find deepest positional argument so we can check if all required positionals have been provided. + positionals := 0 + for _, path := range c.Path { + if path.Positional != nil { + positionals = path.Positional.Position + 1 + } + } + + if err := checkMissingChildren(node); err != nil { + return err + } + if err := checkMissingPositionals(positionals, node.Positional); err != nil { + return err + } + if err := checkXorDuplicates(c.Path); err != nil { + return err + } + + if node.Type == ArgumentNode { + value := node.Argument + if value.Required && !value.Set { + return fmt.Errorf("%s is required", node.Summary()) + } + } + return nil +} + +// Flags returns the accumulated available flags. +func (c *Context) Flags() (flags []*Flag) { + for _, trace := range c.Path { + flags = append(flags, trace.Flags...) + } + return +} + +// Command returns the full command path. +func (c *Context) Command() string { + command := []string{} + for _, trace := range c.Path { + switch { + case trace.Positional != nil: + command = append(command, "<"+trace.Positional.Name+">") + + case trace.Argument != nil: + command = append(command, "<"+trace.Argument.Name+">") + + case trace.Command != nil: + command = append(command, trace.Command.Name) + } + } + return strings.Join(command, " ") +} + +// AddResolver adds a context-specific resolver. +// +// This is most useful in the BeforeResolve() hook. +func (c *Context) AddResolver(resolver Resolver) { + c.resolvers = append(c.resolvers, resolver) +} + +// FlagValue returns the set value of a flag if it was encountered and exists, or its default value. +func (c *Context) FlagValue(flag *Flag) interface{} { + for _, trace := range c.Path { + if trace.Flag == flag { + v, ok := c.values[trace.Flag.Value] + if !ok { + break + } + return v.Interface() + } + } + if flag.Target.IsValid() { + return flag.Target.Interface() + } + return flag.DefaultValue.Interface() +} + +// Reset recursively resets values to defaults (as specified in the grammar) or the zero value. +func (c *Context) Reset() error { + return Visit(c.Model.Node, func(node Visitable, next Next) error { + if value, ok := node.(*Value); ok { + return next(value.Reset()) + } + return next(nil) + }) +} + +func (c *Context) endParsing() { + args := []string{} + for { + token := c.scan.Pop() + if token.Type == EOLToken { + break + } + args = append(args, token.String()) + } + // Note: tokens must be pushed in reverse order. + for i := range args { + c.scan.PushTyped(args[len(args)-1-i], PositionalArgumentToken) + } +} + +func (c *Context) trace(node *Node) (err error) { // nolint: gocyclo + positional := 0 + node.Active = true + + flags := []*Flag{} + flagNode := node + if node.DefaultCmd != nil && node.DefaultCmd.Tag.Default == "withargs" { + // Add flags of the default command if the current node has one + // and that default command allows args / flags without explicitly + // naming the command on the CLI. + flagNode = node.DefaultCmd + } + for _, group := range flagNode.AllFlags(false) { + flags = append(flags, group...) + } + + if node.Passthrough { + c.endParsing() + } + + for !c.scan.Peek().IsEOL() { + token := c.scan.Peek() + switch token.Type { + case UntypedToken: + switch v := token.Value.(type) { + case string: + + switch { + case v == "-": + fallthrough + default: // nolint + c.scan.Pop() + c.scan.PushTyped(token.Value, PositionalArgumentToken) + + // Indicates end of parsing. All remaining arguments are treated as positional arguments only. + case v == "--": + c.scan.Pop() + c.endParsing() + + // Long flag. + case strings.HasPrefix(v, "--"): + c.scan.Pop() + // Parse it and push the tokens. + parts := strings.SplitN(v[2:], "=", 2) + if len(parts) > 1 { + c.scan.PushTyped(parts[1], FlagValueToken) + } + c.scan.PushTyped(parts[0], FlagToken) + + // Short flag. + case strings.HasPrefix(v, "-"): + c.scan.Pop() + // Note: tokens must be pushed in reverse order. + if tail := v[2:]; tail != "" { + c.scan.PushTyped(tail, ShortFlagTailToken) + } + c.scan.PushTyped(v[1:2], ShortFlagToken) + } + default: + c.scan.Pop() + c.scan.PushTyped(token.Value, PositionalArgumentToken) + } + + case ShortFlagTailToken: + c.scan.Pop() + // Note: tokens must be pushed in reverse order. + if tail := token.String()[1:]; tail != "" { + c.scan.PushTyped(tail, ShortFlagTailToken) + } + c.scan.PushTyped(token.String()[0:1], ShortFlagToken) + + case FlagToken: + if err := c.parseFlag(flags, token.String()); err != nil { + return err + } + + case ShortFlagToken: + if err := c.parseFlag(flags, token.String()); err != nil { + return err + } + + case FlagValueToken: + return fmt.Errorf("unexpected flag argument %q", token.Value) + + case PositionalArgumentToken: + candidates := []string{} + + // Ensure we've consumed all positional arguments. + if positional < len(node.Positional) { + arg := node.Positional[positional] + + if arg.Passthrough { + c.endParsing() + } + + arg.Active = true + err := arg.Parse(c.scan, c.getValue(arg)) + if err != nil { + return err + } + c.Path = append(c.Path, &Path{ + Parent: node, + Positional: arg, + }) + positional++ + break + } + + // Assign token value to a branch name if tagged as an alias + // An alias will be ignored in the case of an existing command + cmds := make(map[string]bool) + for _, branch := range node.Children { + if branch.Type == CommandNode { + cmds[branch.Name] = true + } + } + for _, branch := range node.Children { + for _, a := range branch.Aliases { + _, ok := cmds[a] + if token.Value == a && !ok { + token.Value = branch.Name + break + } + } + } + + // After positional arguments have been consumed, check commands next... + for _, branch := range node.Children { + if branch.Type == CommandNode && !branch.Hidden { + candidates = append(candidates, branch.Name) + } + if branch.Type == CommandNode && branch.Name == token.Value { + c.scan.Pop() + c.Path = append(c.Path, &Path{ + Parent: node, + Command: branch, + Flags: branch.Flags, + }) + return c.trace(branch) + } + } + + // Finally, check arguments. + for _, branch := range node.Children { + if branch.Type == ArgumentNode { + arg := branch.Argument + if err := arg.Parse(c.scan, c.getValue(arg)); err == nil { + c.Path = append(c.Path, &Path{ + Parent: node, + Argument: branch, + Flags: branch.Flags, + }) + return c.trace(branch) + } + } + } + + // If there is a default command that allows args and nothing else + // matches, take the branch of the default command + if node.DefaultCmd != nil && node.DefaultCmd.Tag.Default == "withargs" { + c.Path = append(c.Path, &Path{ + Parent: node, + Command: node.DefaultCmd, + Flags: node.DefaultCmd.Flags, + }) + return c.trace(node.DefaultCmd) + } + + return findPotentialCandidates(token.String(), candidates, "unexpected argument %s", token) + default: + return fmt.Errorf("unexpected token %s", token) + } + } + return c.maybeSelectDefault(flags, node) +} + +// End of the line, check for a default command, but only if we're not displaying help, +// otherwise we'd only ever display the help for the default command. +func (c *Context) maybeSelectDefault(flags []*Flag, node *Node) error { + for _, flag := range flags { + if flag.Name == "help" && flag.Set { + return nil + } + } + if node.DefaultCmd != nil { + c.Path = append(c.Path, &Path{ + Parent: node.DefaultCmd, + Command: node.DefaultCmd, + Flags: node.DefaultCmd.Flags, + }) + } + return nil +} + +// Resolve walks through the traced path, applying resolvers to any unset flags. +func (c *Context) Resolve() error { + resolvers := c.combineResolvers() + if len(resolvers) == 0 { + return nil + } + + inserted := []*Path{} + for _, path := range c.Path { + for _, flag := range path.Flags { + // Flag has already been set on the command-line. + if _, ok := c.values[flag.Value]; ok { + continue + } + + // Pick the last resolved value. + var selected interface{} + for _, resolver := range resolvers { + s, err := resolver.Resolve(c, path, flag) + if err != nil { + return fmt.Errorf("%s: %w", flag.ShortSummary(), err) + } + if s == nil { + continue + } + selected = s + } + + if selected == nil { + continue + } + + scan := Scan().PushTyped(selected, FlagValueToken) + delete(c.values, flag.Value) + err := flag.Parse(scan, c.getValue(flag.Value)) + if err != nil { + return err + } + inserted = append(inserted, &Path{ + Flag: flag, + Resolved: true, + }) + } + } + c.Path = append(c.Path, inserted...) + return nil +} + +// Combine application-level resolvers and context resolvers. +func (c *Context) combineResolvers() []Resolver { + resolvers := []Resolver{} + resolvers = append(resolvers, c.Kong.resolvers...) + resolvers = append(resolvers, c.resolvers...) + return resolvers +} + +func (c *Context) getValue(value *Value) reflect.Value { + v, ok := c.values[value] + if !ok { + v = reflect.New(value.Target.Type()).Elem() + switch v.Kind() { + case reflect.Ptr: + v.Set(reflect.New(v.Type().Elem())) + case reflect.Slice: + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + case reflect.Map: + v.Set(reflect.MakeMap(v.Type())) + default: + } + c.values[value] = v + } + return v +} + +// ApplyDefaults if they are not already set. +func (c *Context) ApplyDefaults() error { + return Visit(c.Model.Node, func(node Visitable, next Next) error { + var value *Value + switch node := node.(type) { + case *Flag: + value = node.Value + case *Node: + value = node.Argument + case *Value: + value = node + default: + } + if value != nil { + if err := value.ApplyDefault(); err != nil { + return err + } + } + return next(nil) + }) +} + +// Apply traced context to the target grammar. +func (c *Context) Apply() (string, error) { + path := []string{} + + for _, trace := range c.Path { + var value *Value + switch { + case trace.App != nil: + case trace.Argument != nil: + path = append(path, "<"+trace.Argument.Name+">") + value = trace.Argument.Argument + case trace.Command != nil: + path = append(path, trace.Command.Name) + case trace.Flag != nil: + value = trace.Flag.Value + case trace.Positional != nil: + path = append(path, "<"+trace.Positional.Name+">") + value = trace.Positional + default: + panic("unsupported path ?!") + } + if value != nil { + value.Apply(c.getValue(value)) + } + } + + return strings.Join(path, " "), nil +} + +func flipBoolValue(value reflect.Value) error { + if value.Kind() == reflect.Bool { + value.SetBool(!value.Bool()) + return nil + } + + if value.Kind() == reflect.Ptr { + if !value.IsNil() { + return flipBoolValue(value.Elem()) + } + return nil + } + + return fmt.Errorf("cannot negate a value of %s", value.Type().String()) +} + +func (c *Context) parseFlag(flags []*Flag, match string) (err error) { + candidates := []string{} + for _, flag := range flags { + long := "--" + flag.Name + short := "-" + string(flag.Short) + neg := "--no-" + flag.Name + candidates = append(candidates, long) + if flag.Short != 0 { + candidates = append(candidates, short) + } + if short != match && long != match && !(match == neg && flag.Tag.Negatable) { + continue + } + // Found a matching flag. + c.scan.Pop() + if match == neg && flag.Tag.Negatable { + flag.Negated = true + } + err := flag.Parse(c.scan, c.getValue(flag.Value)) + if err != nil { + var expected *expectedError + if errors.As(err, &expected) && expected.token.InferredType().IsAny(FlagToken, ShortFlagToken) { + return fmt.Errorf("%s; perhaps try %s=%q?", err.Error(), flag.ShortSummary(), expected.token) + } + return err + } + if flag.Negated { + value := c.getValue(flag.Value) + err := flipBoolValue(value) + if err != nil { + return err + } + flag.Value.Apply(value) + } + c.Path = append(c.Path, &Path{Flag: flag}) + return nil + } + return findPotentialCandidates(match, candidates, "unknown flag %s", match) +} + +// Call an arbitrary function filling arguments with bound values. +func (c *Context) Call(fn any, binds ...interface{}) (out []interface{}, err error) { + fv := reflect.ValueOf(fn) + bindings := c.Kong.bindings.clone().add(binds...).add(c).merge(c.bindings) //nolint:govet + return callAnyFunction(fv, bindings) +} + +// RunNode calls the Run() method on an arbitrary node. +// +// This is useful in conjunction with Visit(), for dynamically running commands. +// +// Any passed values will be bindable to arguments of the target Run() method. Additionally, +// all parent nodes in the command structure will be bound. +func (c *Context) RunNode(node *Node, binds ...interface{}) (err error) { + type targetMethod struct { + node *Node + method reflect.Value + binds bindings + } + methodBinds := c.Kong.bindings.clone().add(binds...).add(c).merge(c.bindings) + methods := []targetMethod{} + for i := 0; node != nil; i, node = i+1, node.Parent { + method := getMethod(node.Target, "Run") + methodBinds = methodBinds.clone() + for p := node; p != nil; p = p.Parent { + methodBinds = methodBinds.add(p.Target.Addr().Interface()) + } + if method.IsValid() { + methods = append(methods, targetMethod{node, method, methodBinds}) + } + } + if len(methods) == 0 { + return fmt.Errorf("no Run() method found in hierarchy of %s", c.Selected().Summary()) + } + _, err = c.Apply() + if err != nil { + return err + } + + for _, method := range methods { + if err = callFunction(method.method, method.binds); err != nil { + return err + } + } + return nil +} + +// Run executes the Run() method on the selected command, which must exist. +// +// Any passed values will be bindable to arguments of the target Run() method. Additionally, +// all parent nodes in the command structure will be bound. +func (c *Context) Run(binds ...interface{}) (err error) { + node := c.Selected() + if node == nil { + if len(c.Path) > 0 { + selected := c.Path[0].Node() + if selected.Type == ApplicationNode { + method := getMethod(selected.Target, "Run") + if method.IsValid() { + return c.RunNode(selected, binds...) + } + } + } + return fmt.Errorf("no command selected") + } + return c.RunNode(node, binds...) +} + +// PrintUsage to Kong's stdout. +// +// If summary is true, a summarised version of the help will be output. +func (c *Context) PrintUsage(summary bool) error { + options := c.helpOptions + options.Summary = summary + return c.help(options, c) +} + +func checkMissingFlags(flags []*Flag) error { + xorGroupSet := map[string]bool{} + xorGroup := map[string][]string{} + missing := []string{} + for _, flag := range flags { + if flag.Set { + for _, xor := range flag.Xor { + xorGroupSet[xor] = true + } + } + if !flag.Required || flag.Set { + continue + } + if len(flag.Xor) > 0 { + for _, xor := range flag.Xor { + if xorGroupSet[xor] { + continue + } + xorGroup[xor] = append(xorGroup[xor], flag.Summary()) + } + } else { + missing = append(missing, flag.Summary()) + } + } + for xor, flags := range xorGroup { + if !xorGroupSet[xor] && len(flags) > 1 { + missing = append(missing, strings.Join(flags, " or ")) + } + } + + if len(missing) == 0 { + return nil + } + + sort.Strings(missing) + + return fmt.Errorf("missing flags: %s", strings.Join(missing, ", ")) +} + +func checkMissingChildren(node *Node) error { + missing := []string{} + + missingArgs := []string{} + for _, arg := range node.Positional { + if arg.Required && !arg.Set { + missingArgs = append(missingArgs, arg.Summary()) + } + } + if len(missingArgs) > 0 { + missing = append(missing, strconv.Quote(strings.Join(missingArgs, " "))) + } + + for _, child := range node.Children { + if child.Hidden { + continue + } + if child.Argument != nil { + if !child.Argument.Required { + continue + } + missing = append(missing, strconv.Quote(child.Summary())) + } else { + missing = append(missing, strconv.Quote(child.Name)) + } + } + if len(missing) == 0 { + return nil + } + + if len(missing) > 5 { + missing = append(missing[:5], "...") + } + if len(missing) == 1 { + return fmt.Errorf("expected %s", missing[0]) + } + return fmt.Errorf("expected one of %s", strings.Join(missing, ", ")) +} + +// If we're missing any positionals and they're required, return an error. +func checkMissingPositionals(positional int, values []*Value) error { + // All the positionals are in. + if positional >= len(values) { + return nil + } + + // We're low on supplied positionals, but the missing one is optional. + if !values[positional].Required { + return nil + } + + missing := []string{} + for ; positional < len(values); positional++ { + arg := values[positional] + // TODO(aat): Fix hardcoding of these env checks all over the place :\ + if len(arg.Tag.Envs) != 0 { + if atLeastOneEnvSet(arg.Tag.Envs) { + continue + } + } + missing = append(missing, "<"+arg.Name+">") + } + if len(missing) == 0 { + return nil + } + return fmt.Errorf("missing positional arguments %s", strings.Join(missing, " ")) +} + +func checkEnum(value *Value, target reflect.Value) error { + switch target.Kind() { + case reflect.Slice, reflect.Array: + for i := 0; i < target.Len(); i++ { + if err := checkEnum(value, target.Index(i)); err != nil { + return err + } + } + return nil + + case reflect.Map, reflect.Struct: + return errors.New("enum can only be applied to a slice or value") + + case reflect.Ptr: + if target.IsNil() { + return nil + } + return checkEnum(value, target.Elem()) + default: + enumSlice := value.EnumSlice() + v := fmt.Sprintf("%v", target) + enums := []string{} + for _, enum := range enumSlice { + if enum == v { + return nil + } + enums = append(enums, fmt.Sprintf("%q", enum)) + } + return fmt.Errorf("%s must be one of %s but got %q", value.ShortSummary(), strings.Join(enums, ","), target.Interface()) + } +} + +func checkPassthroughArg(target reflect.Value) bool { + typ := target.Type() + switch typ.Kind() { + case reflect.Slice: + return typ.Elem().Kind() == reflect.String + default: + return false + } +} + +func checkXorDuplicates(paths []*Path) error { + for _, path := range paths { + seen := map[string]*Flag{} + for _, flag := range path.Flags { + if !flag.Set { + continue + } + for _, xor := range flag.Xor { + if seen[xor] != nil { + return fmt.Errorf("--%s and --%s can't be used together", seen[xor].Name, flag.Name) + } + seen[xor] = flag + } + } + } + return nil +} + +func findPotentialCandidates(needle string, haystack []string, format string, args ...interface{}) error { + if len(haystack) == 0 { + return fmt.Errorf(format, args...) + } + closestCandidates := []string{} + for _, candidate := range haystack { + if strings.HasPrefix(candidate, needle) || levenshtein(candidate, needle) <= 2 { + closestCandidates = append(closestCandidates, fmt.Sprintf("%q", candidate)) + } + } + prefix := fmt.Sprintf(format, args...) + if len(closestCandidates) == 1 { + return fmt.Errorf("%s, did you mean %s?", prefix, closestCandidates[0]) + } else if len(closestCandidates) > 1 { + return fmt.Errorf("%s, did you mean one of %s?", prefix, strings.Join(closestCandidates, ", ")) + } + return fmt.Errorf("%s", prefix) +} + +type validatable interface{ Validate() error } + +func isValidatable(v reflect.Value) validatable { + if !v.IsValid() || (v.Kind() == reflect.Ptr || v.Kind() == reflect.Slice || v.Kind() == reflect.Map) && v.IsNil() { + return nil + } + if validate, ok := v.Interface().(validatable); ok { + return validate + } + if v.CanAddr() { + return isValidatable(v.Addr()) + } + return nil +} + +func atLeastOneEnvSet(envs []string) bool { + for _, env := range envs { + if _, ok := os.LookupEnv(env); ok { + return true + } + } + return false +} diff --git a/vendor/github.com/alecthomas/kong/defaults.go b/vendor/github.com/alecthomas/kong/defaults.go new file mode 100644 index 0000000..f6728d7 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/defaults.go @@ -0,0 +1,21 @@ +package kong + +// ApplyDefaults if they are not already set. +func ApplyDefaults(target interface{}, options ...Option) error { + app, err := New(target, options...) + if err != nil { + return err + } + ctx, err := Trace(app, nil) + if err != nil { + return err + } + err = ctx.Resolve() + if err != nil { + return err + } + if err = ctx.ApplyDefaults(); err != nil { + return err + } + return ctx.Validate() +} diff --git a/vendor/github.com/alecthomas/kong/doc.go b/vendor/github.com/alecthomas/kong/doc.go new file mode 100644 index 0000000..78c4d11 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/doc.go @@ -0,0 +1,32 @@ +// Package kong aims to support arbitrarily complex command-line structures with as little developer effort as possible. +// +// Here's an example: +// +// shell rm [-f] [-r] ... +// shell ls [ ...] +// +// This can be represented by the following command-line structure: +// +// package main +// +// import "github.com/alecthomas/kong" +// +// var CLI struct { +// Rm struct { +// Force bool `short:"f" help:"Force removal."` +// Recursive bool `short:"r" help:"Recursively remove files."` +// +// Paths []string `arg help:"Paths to remove." type:"path"` +// } `cmd help:"Remove files."` +// +// Ls struct { +// Paths []string `arg optional help:"Paths to list." type:"path"` +// } `cmd help:"List paths."` +// } +// +// func main() { +// kong.Parse(&CLI) +// } +// +// See https://github.com/alecthomas/kong for details. +package kong diff --git a/vendor/github.com/alecthomas/kong/error.go b/vendor/github.com/alecthomas/kong/error.go new file mode 100644 index 0000000..18225ef --- /dev/null +++ b/vendor/github.com/alecthomas/kong/error.go @@ -0,0 +1,12 @@ +package kong + +// ParseError is the error type returned by Kong.Parse(). +// +// It contains the parse Context that triggered the error. +type ParseError struct { + error + Context *Context +} + +// Unwrap returns the original cause of the error. +func (p *ParseError) Unwrap() error { return p.error } diff --git a/vendor/github.com/alecthomas/kong/global.go b/vendor/github.com/alecthomas/kong/global.go new file mode 100644 index 0000000..d4b3cb5 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/global.go @@ -0,0 +1,16 @@ +package kong + +import ( + "os" +) + +// Parse constructs a new parser and parses the default command-line. +func Parse(cli interface{}, options ...Option) *Context { + parser, err := New(cli, options...) + if err != nil { + panic(err) + } + ctx, err := parser.Parse(os.Args[1:]) + parser.FatalIfErrorf(err) + return ctx +} diff --git a/vendor/github.com/alecthomas/kong/guesswidth.go b/vendor/github.com/alecthomas/kong/guesswidth.go new file mode 100644 index 0000000..46768e6 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/guesswidth.go @@ -0,0 +1,9 @@ +// +build appengine !linux,!freebsd,!darwin,!dragonfly,!netbsd,!openbsd + +package kong + +import "io" + +func guessWidth(w io.Writer) int { + return 80 +} diff --git a/vendor/github.com/alecthomas/kong/guesswidth_unix.go b/vendor/github.com/alecthomas/kong/guesswidth_unix.go new file mode 100644 index 0000000..db52595 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/guesswidth_unix.go @@ -0,0 +1,42 @@ +//go:build (!appengine && linux) || freebsd || darwin || dragonfly || netbsd || openbsd +// +build !appengine,linux freebsd darwin dragonfly netbsd openbsd + +package kong + +import ( + "io" + "os" + "strconv" + "syscall" + "unsafe" +) + +func guessWidth(w io.Writer) int { + // check if COLUMNS env is set to comply with + // http://pubs.opengroup.org/onlinepubs/009604499/basedefs/xbd_chap08.html + colsStr := os.Getenv("COLUMNS") + if colsStr != "" { + if cols, err := strconv.Atoi(colsStr); err == nil { + return cols + } + } + + if t, ok := w.(*os.File); ok { + fd := t.Fd() + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6( + syscall.SYS_IOCTL, + uintptr(fd), // nolint: unconvert + uintptr(syscall.TIOCGWINSZ), + uintptr(unsafe.Pointer(&dimensions)), // nolint: gas + 0, 0, 0, + ); err == 0 { + if dimensions[1] == 0 { + return 80 + } + return int(dimensions[1]) + } + } + return 80 +} diff --git a/vendor/github.com/alecthomas/kong/help.go b/vendor/github.com/alecthomas/kong/help.go new file mode 100644 index 0000000..4fb7700 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/help.go @@ -0,0 +1,578 @@ +package kong + +import ( + "bytes" + "fmt" + "go/doc" + "io" + "strings" +) + +const ( + defaultIndent = 2 + defaultColumnPadding = 4 +) + +// Help flag. +type helpValue bool + +func (h helpValue) BeforeReset(ctx *Context) error { + options := ctx.Kong.helpOptions + options.Summary = false + err := ctx.Kong.help(options, ctx) + if err != nil { + return err + } + ctx.Kong.Exit(0) + return nil +} + +// HelpOptions for HelpPrinters. +type HelpOptions struct { + // Don't print top-level usage summary. + NoAppSummary bool + + // Write a one-line summary of the context. + Summary bool + + // Write help in a more compact, but still fully-specified, form. + Compact bool + + // Tree writes command chains in a tree structure instead of listing them separately. + Tree bool + + // Place the flags after the commands listing. + FlagsLast bool + + // Indenter modulates the given prefix for the next layer in the tree view. + // The following exported templates can be used: kong.SpaceIndenter, kong.LineIndenter, kong.TreeIndenter + // The kong.SpaceIndenter will be used by default. + Indenter HelpIndenter + + // Don't show the help associated with subcommands + NoExpandSubcommands bool + + // Clamp the help wrap width to a value smaller than the terminal width. + // If this is set to a non-positive number, the terminal width is used; otherwise, + // the min of this value or the terminal width is used. + WrapUpperBound int +} + +// Apply options to Kong as a configuration option. +func (h HelpOptions) Apply(k *Kong) error { + k.helpOptions = h + return nil +} + +// HelpProvider can be implemented by commands/args to provide detailed help. +type HelpProvider interface { + // This string is formatted by go/doc and thus has the same formatting rules. + Help() string +} + +// PlaceHolderProvider can be implemented by mappers to provide custom placeholder text. +type PlaceHolderProvider interface { + PlaceHolder(flag *Flag) string +} + +// HelpIndenter is used to indent new layers in the help tree. +type HelpIndenter func(prefix string) string + +// HelpPrinter is used to print context-sensitive help. +type HelpPrinter func(options HelpOptions, ctx *Context) error + +// HelpValueFormatter is used to format the help text of flags and positional arguments. +type HelpValueFormatter func(value *Value) string + +// DefaultHelpValueFormatter is the default HelpValueFormatter. +func DefaultHelpValueFormatter(value *Value) string { + if len(value.Tag.Envs) == 0 || HasInterpolatedVar(value.OrigHelp, "env") { + return value.Help + } + suffix := "(" + formatEnvs(value.Tag.Envs) + ")" + switch { + case strings.HasSuffix(value.Help, "."): + return value.Help[:len(value.Help)-1] + " " + suffix + "." + case value.Help == "": + return suffix + default: + return value.Help + " " + suffix + } +} + +// DefaultShortHelpPrinter is the default HelpPrinter for short help on error. +func DefaultShortHelpPrinter(options HelpOptions, ctx *Context) error { + w := newHelpWriter(ctx, options) + cmd := ctx.Selected() + app := ctx.Model + if cmd == nil { + w.Printf("Usage: %s%s", app.Name, app.Summary()) + w.Printf(`Run "%s --help" for more information.`, app.Name) + } else { + w.Printf("Usage: %s %s", app.Name, cmd.Summary()) + w.Printf(`Run "%s --help" for more information.`, cmd.FullPath()) + } + return w.Write(ctx.Stdout) +} + +// DefaultHelpPrinter is the default HelpPrinter. +func DefaultHelpPrinter(options HelpOptions, ctx *Context) error { + if ctx.Empty() { + options.Summary = false + } + w := newHelpWriter(ctx, options) + selected := ctx.Selected() + if selected == nil { + printApp(w, ctx.Model) + } else { + printCommand(w, ctx.Model, selected) + } + return w.Write(ctx.Stdout) +} + +func printApp(w *helpWriter, app *Application) { + if !w.NoAppSummary { + w.Printf("Usage: %s%s", app.Name, app.Summary()) + } + printNodeDetail(w, app.Node, true) + cmds := app.Leaves(true) + if len(cmds) > 0 && app.HelpFlag != nil { + w.Print("") + if w.Summary { + w.Printf(`Run "%s --help" for more information.`, app.Name) + } else { + w.Printf(`Run "%s --help" for more information on a command.`, app.Name) + } + } +} + +func printCommand(w *helpWriter, app *Application, cmd *Command) { + if !w.NoAppSummary { + w.Printf("Usage: %s %s", app.Name, cmd.Summary()) + } + printNodeDetail(w, cmd, true) + if w.Summary && app.HelpFlag != nil { + w.Print("") + w.Printf(`Run "%s --help" for more information.`, cmd.FullPath()) + } +} + +func printNodeDetail(w *helpWriter, node *Node, hide bool) { + if node.Help != "" { + w.Print("") + w.Wrap(node.Help) + } + if w.Summary { + return + } + if node.Detail != "" { + w.Print("") + w.Wrap(node.Detail) + } + if len(node.Positional) > 0 { + w.Print("") + w.Print("Arguments:") + writePositionals(w.Indent(), node.Positional) + } + printFlags := func() { + if flags := node.AllFlags(true); len(flags) > 0 { + groupedFlags := collectFlagGroups(flags) + for _, group := range groupedFlags { + w.Print("") + if group.Metadata.Title != "" { + w.Wrap(group.Metadata.Title) + } + if group.Metadata.Description != "" { + w.Indent().Wrap(group.Metadata.Description) + w.Print("") + } + writeFlags(w.Indent(), group.Flags) + } + } + } + if !w.FlagsLast { + printFlags() + } + var cmds []*Node + if w.NoExpandSubcommands { + cmds = node.Children + } else { + cmds = node.Leaves(hide) + } + if len(cmds) > 0 { + iw := w.Indent() + if w.Tree { + w.Print("") + w.Print("Commands:") + writeCommandTree(iw, node) + } else { + groupedCmds := collectCommandGroups(cmds) + for _, group := range groupedCmds { + w.Print("") + if group.Metadata.Title != "" { + w.Wrap(group.Metadata.Title) + } + if group.Metadata.Description != "" { + w.Indent().Wrap(group.Metadata.Description) + w.Print("") + } + + if w.Compact { + writeCompactCommandList(group.Commands, iw) + } else { + writeCommandList(group.Commands, iw) + } + } + } + } + if w.FlagsLast { + printFlags() + } +} + +func writeCommandList(cmds []*Node, iw *helpWriter) { + for i, cmd := range cmds { + if cmd.Hidden { + continue + } + printCommandSummary(iw, cmd) + if i != len(cmds)-1 { + iw.Print("") + } + } +} + +func writeCompactCommandList(cmds []*Node, iw *helpWriter) { + rows := [][2]string{} + for _, cmd := range cmds { + if cmd.Hidden { + continue + } + rows = append(rows, [2]string{cmd.Path(), cmd.Help}) + } + writeTwoColumns(iw, rows) +} + +func writeCommandTree(w *helpWriter, node *Node) { + rows := make([][2]string, 0, len(node.Children)*2) + for i, cmd := range node.Children { + if cmd.Hidden { + continue + } + rows = append(rows, w.CommandTree(cmd, "")...) + if i != len(node.Children)-1 { + rows = append(rows, [2]string{"", ""}) + } + } + writeTwoColumns(w, rows) +} + +type helpFlagGroup struct { + Metadata *Group + Flags [][]*Flag +} + +func collectFlagGroups(flags [][]*Flag) []helpFlagGroup { + // Group keys in order of appearance. + groups := []*Group{} + // Flags grouped by their group key. + flagsByGroup := map[string][][]*Flag{} + + for _, levelFlags := range flags { + levelFlagsByGroup := map[string][]*Flag{} + + for _, flag := range levelFlags { + key := "" + if flag.Group != nil { + key = flag.Group.Key + groupAlreadySeen := false + for _, group := range groups { + if key == group.Key { + groupAlreadySeen = true + break + } + } + if !groupAlreadySeen { + groups = append(groups, flag.Group) + } + } + + levelFlagsByGroup[key] = append(levelFlagsByGroup[key], flag) + } + + for key, flags := range levelFlagsByGroup { + flagsByGroup[key] = append(flagsByGroup[key], flags) + } + } + + out := []helpFlagGroup{} + // Ungrouped flags are always displayed first. + if ungroupedFlags, ok := flagsByGroup[""]; ok { + out = append(out, helpFlagGroup{ + Metadata: &Group{Title: "Flags:"}, + Flags: ungroupedFlags, + }) + } + for _, group := range groups { + out = append(out, helpFlagGroup{Metadata: group, Flags: flagsByGroup[group.Key]}) + } + return out +} + +type helpCommandGroup struct { + Metadata *Group + Commands []*Node +} + +func collectCommandGroups(nodes []*Node) []helpCommandGroup { + // Groups in order of appearance. + groups := []*Group{} + // Nodes grouped by their group key. + nodesByGroup := map[string][]*Node{} + + for _, node := range nodes { + key := "" + if group := node.ClosestGroup(); group != nil { + key = group.Key + if _, ok := nodesByGroup[key]; !ok { + groups = append(groups, group) + } + } + nodesByGroup[key] = append(nodesByGroup[key], node) + } + + out := []helpCommandGroup{} + // Ungrouped nodes are always displayed first. + if ungroupedNodes, ok := nodesByGroup[""]; ok { + out = append(out, helpCommandGroup{ + Metadata: &Group{Title: "Commands:"}, + Commands: ungroupedNodes, + }) + } + for _, group := range groups { + out = append(out, helpCommandGroup{Metadata: group, Commands: nodesByGroup[group.Key]}) + } + return out +} + +func printCommandSummary(w *helpWriter, cmd *Command) { + w.Print(cmd.Summary()) + if cmd.Help != "" { + w.Indent().Wrap(cmd.Help) + } +} + +type helpWriter struct { + indent string + width int + lines *[]string + helpFormatter HelpValueFormatter + HelpOptions +} + +func newHelpWriter(ctx *Context, options HelpOptions) *helpWriter { + lines := []string{} + wrapWidth := guessWidth(ctx.Stdout) + if options.WrapUpperBound > 0 && wrapWidth > options.WrapUpperBound { + wrapWidth = options.WrapUpperBound + } + w := &helpWriter{ + indent: "", + width: wrapWidth, + lines: &lines, + helpFormatter: ctx.Kong.helpFormatter, + HelpOptions: options, + } + return w +} + +func (h *helpWriter) Printf(format string, args ...interface{}) { + h.Print(fmt.Sprintf(format, args...)) +} + +func (h *helpWriter) Print(text string) { + *h.lines = append(*h.lines, strings.TrimRight(h.indent+text, " ")) +} + +// Indent returns a new helpWriter indented by two characters. +func (h *helpWriter) Indent() *helpWriter { + return &helpWriter{indent: h.indent + " ", lines: h.lines, width: h.width - 2, HelpOptions: h.HelpOptions, helpFormatter: h.helpFormatter} +} + +func (h *helpWriter) String() string { + return strings.Join(*h.lines, "\n") +} + +func (h *helpWriter) Write(w io.Writer) error { + for _, line := range *h.lines { + _, err := io.WriteString(w, line+"\n") + if err != nil { + return err + } + } + return nil +} + +func (h *helpWriter) Wrap(text string) { + w := bytes.NewBuffer(nil) + doc.ToText(w, strings.TrimSpace(text), "", " ", h.width) + for _, line := range strings.Split(strings.TrimSpace(w.String()), "\n") { + h.Print(line) + } +} + +func writePositionals(w *helpWriter, args []*Positional) { + rows := [][2]string{} + for _, arg := range args { + rows = append(rows, [2]string{arg.Summary(), w.helpFormatter(arg)}) + } + writeTwoColumns(w, rows) +} + +func writeFlags(w *helpWriter, groups [][]*Flag) { + rows := [][2]string{} + haveShort := false + for _, group := range groups { + for _, flag := range group { + if flag.Short != 0 { + haveShort = true + break + } + } + } + for i, group := range groups { + if i > 0 { + rows = append(rows, [2]string{"", ""}) + } + for _, flag := range group { + if !flag.Hidden { + rows = append(rows, [2]string{formatFlag(haveShort, flag), w.helpFormatter(flag.Value)}) + } + } + } + writeTwoColumns(w, rows) +} + +func writeTwoColumns(w *helpWriter, rows [][2]string) { + maxLeft := 375 * w.width / 1000 + if maxLeft < 30 { + maxLeft = 30 + } + // Find size of first column. + leftSize := 0 + for _, row := range rows { + if c := len(row[0]); c > leftSize && c < maxLeft { + leftSize = c + } + } + + offsetStr := strings.Repeat(" ", leftSize+defaultColumnPadding) + + for _, row := range rows { + buf := bytes.NewBuffer(nil) + doc.ToText(buf, row[1], "", strings.Repeat(" ", defaultIndent), w.width-leftSize-defaultColumnPadding) + lines := strings.Split(strings.TrimRight(buf.String(), "\n"), "\n") + + line := fmt.Sprintf("%-*s", leftSize, row[0]) + if len(row[0]) < maxLeft { + line += fmt.Sprintf("%*s%s", defaultColumnPadding, "", lines[0]) + lines = lines[1:] + } + w.Print(line) + for _, line := range lines { + w.Printf("%s%s", offsetStr, line) + } + } +} + +// haveShort will be true if there are short flags present at all in the help. Useful for column alignment. +func formatFlag(haveShort bool, flag *Flag) string { + flagString := "" + name := flag.Name + isBool := flag.IsBool() + if flag.Short != 0 { + if isBool && flag.Tag.Negatable { + flagString += fmt.Sprintf("-%c, --[no-]%s", flag.Short, name) + } else { + flagString += fmt.Sprintf("-%c, --%s", flag.Short, name) + } + } else { + if isBool && flag.Tag.Negatable { + if haveShort { + flagString = fmt.Sprintf(" --[no-]%s", name) + } else { + flagString = fmt.Sprintf("--[no-]%s", name) + } + } else { + if haveShort { + flagString += fmt.Sprintf(" --%s", name) + } else { + flagString += fmt.Sprintf("--%s", name) + } + } + } + if !isBool { + flagString += fmt.Sprintf("=%s", flag.FormatPlaceHolder()) + } + return flagString +} + +// CommandTree creates a tree with the given node name as root and its children's arguments and sub commands as leaves. +func (h *HelpOptions) CommandTree(node *Node, prefix string) (rows [][2]string) { + var nodeName string + switch node.Type { + default: + nodeName += prefix + node.Name + if len(node.Aliases) != 0 { + nodeName += fmt.Sprintf(" (%s)", strings.Join(node.Aliases, ",")) + } + case ArgumentNode: + nodeName += prefix + "<" + node.Name + ">" + } + rows = append(rows, [2]string{nodeName, node.Help}) + if h.Indenter == nil { + prefix = SpaceIndenter(prefix) + } else { + prefix = h.Indenter(prefix) + } + for _, arg := range node.Positional { + rows = append(rows, [2]string{prefix + arg.Summary(), arg.Help}) + } + for _, subCmd := range node.Children { + if subCmd.Hidden { + continue + } + rows = append(rows, h.CommandTree(subCmd, prefix)...) + } + return +} + +// SpaceIndenter adds a space indent to the given prefix. +func SpaceIndenter(prefix string) string { + return prefix + strings.Repeat(" ", defaultIndent) +} + +// LineIndenter adds line points to every new indent. +func LineIndenter(prefix string) string { + if prefix == "" { + return "- " + } + return strings.Repeat(" ", defaultIndent) + prefix +} + +// TreeIndenter adds line points to every new indent and vertical lines to every layer. +func TreeIndenter(prefix string) string { + if prefix == "" { + return "|- " + } + return "|" + strings.Repeat(" ", defaultIndent) + prefix +} + +func formatEnvs(envs []string) string { + formatted := make([]string, len(envs)) + for i := range envs { + formatted[i] = "$" + envs[i] + } + + return strings.Join(formatted, ", ") +} diff --git a/vendor/github.com/alecthomas/kong/hooks.go b/vendor/github.com/alecthomas/kong/hooks.go new file mode 100644 index 0000000..d166b08 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/hooks.go @@ -0,0 +1,19 @@ +package kong + +// BeforeResolve is a documentation-only interface describing hooks that run before resolvers are applied. +type BeforeResolve interface { + // This is not the correct signature - see README for details. + BeforeResolve(args ...interface{}) error +} + +// BeforeApply is a documentation-only interface describing hooks that run before values are set. +type BeforeApply interface { + // This is not the correct signature - see README for details. + BeforeApply(args ...interface{}) error +} + +// AfterApply is a documentation-only interface describing hooks that run after values are set. +type AfterApply interface { + // This is not the correct signature - see README for details. + AfterApply(args ...interface{}) error +} diff --git a/vendor/github.com/alecthomas/kong/interpolate.go b/vendor/github.com/alecthomas/kong/interpolate.go new file mode 100644 index 0000000..e811632 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/interpolate.go @@ -0,0 +1,52 @@ +package kong + +import ( + "fmt" + "regexp" +) + +var interpolationRegex = regexp.MustCompile(`(\$\$)|((?:\${([[:alpha:]_][[:word:]]*))(?:=([^}]+))?})|(\$)|([^$]+)`) + +// HasInterpolatedVar returns true if the variable "v" is interpolated in "s". +func HasInterpolatedVar(s string, v string) bool { + matches := interpolationRegex.FindAllStringSubmatch(s, -1) + for _, match := range matches { + if name := match[3]; name == v { + return true + } + } + return false +} + +// Interpolate variables from vars into s for substrings in the form ${var} or ${var=default}. +func interpolate(s string, vars Vars, updatedVars map[string]string) (string, error) { + out := "" + matches := interpolationRegex.FindAllStringSubmatch(s, -1) + if len(matches) == 0 { + return s, nil + } + for key, val := range updatedVars { + if vars[key] != val { + vars = vars.CloneWith(updatedVars) + break + } + } + for _, match := range matches { + if dollar := match[1]; dollar != "" { + out += "$" + } else if name := match[3]; name != "" { + value, ok := vars[name] + if !ok { + // No default value. + if match[4] == "" { + return "", fmt.Errorf("undefined variable ${%s}", name) + } + value = match[4] + } + out += value + } else { + out += match[0] + } + } + return out, nil +} diff --git a/vendor/github.com/alecthomas/kong/kong.go b/vendor/github.com/alecthomas/kong/kong.go new file mode 100644 index 0000000..c4eda71 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/kong.go @@ -0,0 +1,449 @@ +package kong + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "regexp" + "strings" +) + +var ( + callbackReturnSignature = reflect.TypeOf((*error)(nil)).Elem() +) + +func failField(parent reflect.Value, field reflect.StructField, format string, args ...interface{}) error { + name := parent.Type().Name() + if name == "" { + name = "" + } + return fmt.Errorf("%s.%s: %s", name, field.Name, fmt.Sprintf(format, args...)) +} + +// Must creates a new Parser or panics if there is an error. +func Must(ast interface{}, options ...Option) *Kong { + k, err := New(ast, options...) + if err != nil { + panic(err) + } + return k +} + +type usageOnError int + +const ( + shortUsage usageOnError = iota + 1 + fullUsage +) + +// Kong is the main parser type. +type Kong struct { + // Grammar model. + Model *Application + + // Termination function (defaults to os.Exit) + Exit func(int) + + Stdout io.Writer + Stderr io.Writer + + bindings bindings + loader ConfigurationLoader + resolvers []Resolver + registry *Registry + ignoreFields []*regexp.Regexp + + noDefaultHelp bool + usageOnError usageOnError + help HelpPrinter + shortHelp HelpPrinter + helpFormatter HelpValueFormatter + helpOptions HelpOptions + helpFlag *Flag + groups []Group + vars Vars + flagNamer func(string) string + + // Set temporarily by Options. These are applied after build(). + postBuildOptions []Option + embedded []embedded + dynamicCommands []*dynamicCommand +} + +// New creates a new Kong parser on grammar. +// +// See the README (https://github.com/alecthomas/kong) for usage instructions. +func New(grammar interface{}, options ...Option) (*Kong, error) { + k := &Kong{ + Exit: os.Exit, + Stdout: os.Stdout, + Stderr: os.Stderr, + registry: NewRegistry().RegisterDefaults(), + vars: Vars{}, + bindings: bindings{}, + helpFormatter: DefaultHelpValueFormatter, + ignoreFields: make([]*regexp.Regexp, 0), + flagNamer: func(s string) string { + return strings.ToLower(dashedString(s)) + }, + } + + options = append(options, Bind(k)) + + for _, option := range options { + if err := option.Apply(k); err != nil { + return nil, err + } + } + + if k.help == nil { + k.help = DefaultHelpPrinter + } + + if k.shortHelp == nil { + k.shortHelp = DefaultShortHelpPrinter + } + + model, err := build(k, grammar) + if err != nil { + return k, err + } + model.Name = filepath.Base(os.Args[0]) + k.Model = model + k.Model.HelpFlag = k.helpFlag + + // Embed any embedded structs. + for _, embed := range k.embedded { + tag, err := parseTagString(strings.Join(embed.tags, " ")) //nolint:govet + if err != nil { + return nil, err + } + tag.Embed = true + v := reflect.Indirect(reflect.ValueOf(embed.strct)) + node, err := buildNode(k, v, CommandNode, tag, map[string]bool{}) + if err != nil { + return nil, err + } + for _, child := range node.Children { + child.Parent = k.Model.Node + k.Model.Children = append(k.Model.Children, child) + } + k.Model.Flags = append(k.Model.Flags, node.Flags...) + } + + // Synthesise command nodes. + for _, dcmd := range k.dynamicCommands { + tag, terr := parseTagString(strings.Join(dcmd.tags, " ")) + if terr != nil { + return nil, terr + } + tag.Name = dcmd.name + tag.Help = dcmd.help + tag.Group = dcmd.group + tag.Cmd = true + v := reflect.Indirect(reflect.ValueOf(dcmd.cmd)) + err = buildChild(k, k.Model.Node, CommandNode, reflect.Value{}, reflect.StructField{ + Name: dcmd.name, + Type: v.Type(), + }, v, tag, dcmd.name, map[string]bool{}) + if err != nil { + return nil, err + } + } + + for _, option := range k.postBuildOptions { + if err = option.Apply(k); err != nil { + return nil, err + } + } + k.postBuildOptions = nil + + if err = k.interpolate(k.Model.Node); err != nil { + return nil, err + } + + k.bindings.add(k.vars) + + return k, nil +} + +type varStack []Vars + +func (v *varStack) head() Vars { return (*v)[len(*v)-1] } +func (v *varStack) pop() { *v = (*v)[:len(*v)-1] } +func (v *varStack) push(vars Vars) Vars { + if len(*v) != 0 { + vars = (*v)[len(*v)-1].CloneWith(vars) + } + *v = append(*v, vars) + return vars +} + +// Interpolate variables into model. +func (k *Kong) interpolate(node *Node) (err error) { + stack := varStack{} + return Visit(node, func(node Visitable, next Next) error { + switch node := node.(type) { + case *Node: + vars := stack.push(node.Vars()) + node.Help, err = interpolate(node.Help, vars, nil) + if err != nil { + return fmt.Errorf("help for %s: %s", node.Path(), err) + } + err = next(nil) + stack.pop() + return err + + case *Value: + return next(k.interpolateValue(node, stack.head())) + } + return next(nil) + }) +} + +func (k *Kong) interpolateValue(value *Value, vars Vars) (err error) { + if len(value.Tag.Vars) > 0 { + vars = vars.CloneWith(value.Tag.Vars) + } + if varsContributor, ok := value.Mapper.(VarsContributor); ok { + vars = vars.CloneWith(varsContributor.Vars(value)) + } + + if value.Enum, err = interpolate(value.Enum, vars, nil); err != nil { + return fmt.Errorf("enum for %s: %s", value.Summary(), err) + } + + updatedVars := map[string]string{ + "default": value.Default, + "enum": value.Enum, + } + if value.Default, err = interpolate(value.Default, vars, nil); err != nil { + return fmt.Errorf("default value for %s: %s", value.Summary(), err) + } + if value.Enum, err = interpolate(value.Enum, vars, nil); err != nil { + return fmt.Errorf("enum value for %s: %s", value.Summary(), err) + } + if value.Flag != nil { + for i, env := range value.Flag.Envs { + if value.Flag.Envs[i], err = interpolate(env, vars, nil); err != nil { + return fmt.Errorf("env value for %s: %s", value.Summary(), err) + } + } + value.Tag.Envs = value.Flag.Envs + updatedVars["env"] = "" + if len(value.Flag.Envs) != 0 { + updatedVars["env"] = value.Flag.Envs[0] + } + } + value.Help, err = interpolate(value.Help, vars, updatedVars) + if err != nil { + return fmt.Errorf("help for %s: %s", value.Summary(), err) + } + return nil +} + +// Provide additional builtin flags, if any. +func (k *Kong) extraFlags() []*Flag { + if k.noDefaultHelp { + return nil + } + var helpTarget helpValue + value := reflect.ValueOf(&helpTarget).Elem() + helpFlag := &Flag{ + Short: 'h', + Value: &Value{ + Name: "help", + Help: "Show context-sensitive help.", + OrigHelp: "Show context-sensitive help.", + Target: value, + Tag: &Tag{}, + Mapper: k.registry.ForValue(value), + DefaultValue: reflect.ValueOf(false), + }, + } + helpFlag.Flag = helpFlag + k.helpFlag = helpFlag + return []*Flag{helpFlag} +} + +// Parse arguments into target. +// +// The return Context can be used to further inspect the parsed command-line, to format help, to find the +// selected command, to run command Run() methods, and so on. See Context and README for more information. +// +// Will return a ParseError if a *semantically* invalid command-line is encountered (as opposed to a syntactically +// invalid one, which will report a normal error). +func (k *Kong) Parse(args []string) (ctx *Context, err error) { + ctx, err = Trace(k, args) + if err != nil { + return nil, err + } + if ctx.Error != nil { + return nil, &ParseError{error: ctx.Error, Context: ctx} + } + if err = k.applyHook(ctx, "BeforeReset"); err != nil { + return nil, &ParseError{error: err, Context: ctx} + } + if err = ctx.Reset(); err != nil { + return nil, &ParseError{error: err, Context: ctx} + } + if err = k.applyHook(ctx, "BeforeResolve"); err != nil { + return nil, &ParseError{error: err, Context: ctx} + } + if err = ctx.Resolve(); err != nil { + return nil, &ParseError{error: err, Context: ctx} + } + if err = k.applyHook(ctx, "BeforeApply"); err != nil { + return nil, &ParseError{error: err, Context: ctx} + } + if _, err = ctx.Apply(); err != nil { + return nil, &ParseError{error: err, Context: ctx} + } + if err = ctx.Validate(); err != nil { + return nil, &ParseError{error: err, Context: ctx} + } + if err = k.applyHook(ctx, "AfterApply"); err != nil { + return nil, &ParseError{error: err, Context: ctx} + } + return ctx, nil +} + +func (k *Kong) applyHook(ctx *Context, name string) error { + for _, trace := range ctx.Path { + var value reflect.Value + switch { + case trace.App != nil: + value = trace.App.Target + case trace.Argument != nil: + value = trace.Argument.Target + case trace.Command != nil: + value = trace.Command.Target + case trace.Positional != nil: + value = trace.Positional.Target + case trace.Flag != nil: + value = trace.Flag.Value.Target + default: + panic("unsupported Path") + } + method := getMethod(value, name) + if !method.IsValid() { + continue + } + binds := k.bindings.clone() + binds.add(ctx, trace) + binds.add(trace.Node().Vars().CloneWith(k.vars)) + binds.merge(ctx.bindings) + if err := callFunction(method, binds); err != nil { + return err + } + } + // Path[0] will always be the app root. + return k.applyHookToDefaultFlags(ctx, ctx.Path[0].Node(), name) +} + +// Call hook on any unset flags with default values. +func (k *Kong) applyHookToDefaultFlags(ctx *Context, node *Node, name string) error { + if node == nil { + return nil + } + return Visit(node, func(n Visitable, next Next) error { + node, ok := n.(*Node) + if !ok { + return next(nil) + } + binds := k.bindings.clone().add(ctx).add(node.Vars().CloneWith(k.vars)) + for _, flag := range node.Flags { + if !flag.HasDefault || ctx.values[flag.Value].IsValid() || !flag.Target.IsValid() { + continue + } + method := getMethod(flag.Target, name) + if !method.IsValid() { + continue + } + path := &Path{Flag: flag} + if err := callFunction(method, binds.clone().add(path)); err != nil { + return next(err) + } + } + return next(nil) + }) +} + +func formatMultilineMessage(w io.Writer, leaders []string, format string, args ...interface{}) { + lines := strings.Split(fmt.Sprintf(format, args...), "\n") + leader := "" + for _, l := range leaders { + if l == "" { + continue + } + leader += l + ": " + } + fmt.Fprintf(w, "%s%s\n", leader, lines[0]) + for _, line := range lines[1:] { + fmt.Fprintf(w, "%*s%s\n", len(leader), " ", line) + } +} + +// Printf writes a message to Kong.Stdout with the application name prefixed. +func (k *Kong) Printf(format string, args ...interface{}) *Kong { + formatMultilineMessage(k.Stdout, []string{k.Model.Name}, format, args...) + return k +} + +// Errorf writes a message to Kong.Stderr with the application name prefixed. +func (k *Kong) Errorf(format string, args ...interface{}) *Kong { + formatMultilineMessage(k.Stderr, []string{k.Model.Name, "error"}, format, args...) + return k +} + +// Fatalf writes a message to Kong.Stderr with the application name prefixed then exits with a non-zero status. +func (k *Kong) Fatalf(format string, args ...interface{}) { + k.Errorf(format, args...) + k.Exit(1) +} + +// FatalIfErrorf terminates with an error message if err != nil. +func (k *Kong) FatalIfErrorf(err error, args ...interface{}) { + if err == nil { + return + } + msg := err.Error() + if len(args) > 0 { + msg = fmt.Sprintf(args[0].(string), args[1:]...) + ": " + err.Error() // nolint + } + // Maybe display usage information. + var parseErr *ParseError + if errors.As(err, &parseErr) { + switch k.usageOnError { + case fullUsage: + _ = k.help(k.helpOptions, parseErr.Context) + fmt.Fprintln(k.Stdout) + case shortUsage: + _ = k.shortHelp(k.helpOptions, parseErr.Context) + fmt.Fprintln(k.Stdout) + } + } + k.Fatalf("%s", msg) +} + +// LoadConfig from path using the loader configured via Configuration(loader). +// +// "path" will have ~ and any variables expanded. +func (k *Kong) LoadConfig(path string) (Resolver, error) { + var err error + path = ExpandPath(path) + path, err = interpolate(path, k.vars, nil) + if err != nil { + return nil, err + } + r, err := os.Open(path) // nolint: gas + if err != nil { + return nil, err + } + defer r.Close() // nolint: gosec + + return k.loader(r) +} diff --git a/vendor/github.com/alecthomas/kong/kong.png b/vendor/github.com/alecthomas/kong/kong.png new file mode 100644 index 0000000..151fb08 Binary files /dev/null and b/vendor/github.com/alecthomas/kong/kong.png differ diff --git a/vendor/github.com/alecthomas/kong/kong.sketch b/vendor/github.com/alecthomas/kong/kong.sketch new file mode 100644 index 0000000..38816d5 Binary files /dev/null and b/vendor/github.com/alecthomas/kong/kong.sketch differ diff --git a/vendor/github.com/alecthomas/kong/levenshtein.go b/vendor/github.com/alecthomas/kong/levenshtein.go new file mode 100644 index 0000000..1816f30 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/levenshtein.go @@ -0,0 +1,39 @@ +package kong + +import "unicode/utf8" + +// https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Go +// License: https://creativecommons.org/licenses/by-sa/3.0/ +func levenshtein(a, b string) int { + f := make([]int, utf8.RuneCountInString(b)+1) + + for j := range f { + f[j] = j + } + + for _, ca := range a { + j := 1 + fj1 := f[0] // fj1 is the value of f[j - 1] in last iteration + f[0]++ + for _, cb := range b { + mn := min(f[j]+1, f[j-1]+1) // delete & insert + if cb != ca { + mn = min(mn, fj1+1) // change + } else { + mn = min(mn, fj1) // matched + } + + fj1, f[j] = f[j], mn // save f[j] to fj1(j is about to increase), update f[j] to mn + j++ + } + } + + return f[len(f)-1] +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} diff --git a/vendor/github.com/alecthomas/kong/mapper.go b/vendor/github.com/alecthomas/kong/mapper.go new file mode 100644 index 0000000..c332cce --- /dev/null +++ b/vendor/github.com/alecthomas/kong/mapper.go @@ -0,0 +1,925 @@ +package kong + +import ( + "encoding" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/bits" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "time" +) + +var ( + mapperValueType = reflect.TypeOf((*MapperValue)(nil)).Elem() + boolMapperValueType = reflect.TypeOf((*BoolMapperValue)(nil)).Elem() + jsonUnmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() +) + +// DecodeContext is passed to a Mapper's Decode(). +// +// It contains the Value being decoded into and the Scanner to parse from. +type DecodeContext struct { + // Value being decoded into. + Value *Value + // Scan contains the input to scan into Target. + Scan *Scanner +} + +// WithScanner creates a clone of this context with a new Scanner. +func (r *DecodeContext) WithScanner(scan *Scanner) *DecodeContext { + return &DecodeContext{ + Value: r.Value, + Scan: scan, + } +} + +// MapperValue may be implemented by fields in order to provide custom mapping. +// Mappers may additionally implement PlaceHolderProvider to provide custom placeholder text. +type MapperValue interface { + Decode(ctx *DecodeContext) error +} + +// BoolMapperValue may be implemented by fields in order to provide custom mappings for boolean values. +type BoolMapperValue interface { + MapperValue + IsBool() bool +} + +type mapperValueAdapter struct { + isBool bool +} + +func (m *mapperValueAdapter) Decode(ctx *DecodeContext, target reflect.Value) error { + if target.Type().Implements(mapperValueType) { + return target.Interface().(MapperValue).Decode(ctx) // nolint + } + return target.Addr().Interface().(MapperValue).Decode(ctx) // nolint +} + +func (m *mapperValueAdapter) IsBool() bool { + return m.isBool +} + +type textUnmarshalerAdapter struct{} + +func (m *textUnmarshalerAdapter) Decode(ctx *DecodeContext, target reflect.Value) error { + var value string + err := ctx.Scan.PopValueInto("value", &value) + if err != nil { + return err + } + if target.Type().Implements(textUnmarshalerType) { + return target.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)) // nolint + } + return target.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)) // nolint +} + +type binaryUnmarshalerAdapter struct{} + +func (m *binaryUnmarshalerAdapter) Decode(ctx *DecodeContext, target reflect.Value) error { + var value string + err := ctx.Scan.PopValueInto("value", &value) + if err != nil { + return err + } + if target.Type().Implements(binaryUnmarshalerType) { + return target.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte(value)) // nolint + } + return target.Addr().Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte(value)) // nolint +} + +type jsonUnmarshalerAdapter struct{} + +func (j *jsonUnmarshalerAdapter) Decode(ctx *DecodeContext, target reflect.Value) error { + var value string + err := ctx.Scan.PopValueInto("value", &value) + if err != nil { + return err + } + if target.Type().Implements(jsonUnmarshalerType) { + return target.Interface().(json.Unmarshaler).UnmarshalJSON([]byte(value)) // nolint + } + return target.Addr().Interface().(json.Unmarshaler).UnmarshalJSON([]byte(value)) // nolint +} + +// A Mapper represents how a field is mapped from command-line values to Go. +// +// Mappers can be associated with concrete fields via pointer, reflect.Type, reflect.Kind, or via a "type" tag. +// +// Additionally, if a type implements the MapperValue interface, it will be used. +type Mapper interface { + // Decode ctx.Value with ctx.Scanner into target. + Decode(ctx *DecodeContext, target reflect.Value) error +} + +// VarsContributor can be implemented by a Mapper to contribute Vars during interpolation. +type VarsContributor interface { + Vars(ctx *Value) Vars +} + +// A BoolMapper is a Mapper to a value that is a boolean. +// +// This is used solely for formatting help. +type BoolMapper interface { + Mapper + IsBool() bool +} + +// BoolMapperExt allows a Mapper to dynamically determine if a value is a boolean. +type BoolMapperExt interface { + Mapper + IsBoolFromValue(v reflect.Value) bool +} + +// A MapperFunc is a single function that complies with the Mapper interface. +type MapperFunc func(ctx *DecodeContext, target reflect.Value) error + +func (m MapperFunc) Decode(ctx *DecodeContext, target reflect.Value) error { // nolint: revive + return m(ctx, target) +} + +// A Registry contains a set of mappers and supporting lookup methods. +type Registry struct { + names map[string]Mapper + types map[reflect.Type]Mapper + kinds map[reflect.Kind]Mapper + values map[reflect.Value]Mapper +} + +// NewRegistry creates a new (empty) Registry. +func NewRegistry() *Registry { + return &Registry{ + names: map[string]Mapper{}, + types: map[reflect.Type]Mapper{}, + kinds: map[reflect.Kind]Mapper{}, + values: map[reflect.Value]Mapper{}, + } +} + +// ForNamedValue finds a mapper for a value with a user-specified name. +// +// Will return nil if a mapper can not be determined. +func (r *Registry) ForNamedValue(name string, value reflect.Value) Mapper { + if mapper, ok := r.names[name]; ok { + return mapper + } + return r.ForValue(value) +} + +// ForValue looks up the Mapper for a reflect.Value. +func (r *Registry) ForValue(value reflect.Value) Mapper { + if mapper, ok := r.values[value]; ok { + return mapper + } + return r.ForType(value.Type()) +} + +// ForNamedType finds a mapper for a type with a user-specified name. +// +// Will return nil if a mapper can not be determined. +func (r *Registry) ForNamedType(name string, typ reflect.Type) Mapper { + if mapper, ok := r.names[name]; ok { + return mapper + } + return r.ForType(typ) +} + +// ForType finds a mapper from a type, by type, then kind. +// +// Will return nil if a mapper can not be determined. +func (r *Registry) ForType(typ reflect.Type) Mapper { + // Check if the type implements MapperValue. + for _, impl := range []reflect.Type{typ, reflect.PtrTo(typ)} { + if impl.Implements(mapperValueType) { + // FIXME: This should pass in the bool mapper. + return &mapperValueAdapter{impl.Implements(boolMapperValueType)} + } + } + // Next, try explicitly registered types. + var mapper Mapper + var ok bool + if mapper, ok = r.types[typ]; ok { + return mapper + } + // Next try stdlib unmarshaler interfaces. + for _, impl := range []reflect.Type{typ, reflect.PtrTo(typ)} { + switch { + case impl.Implements(textUnmarshalerType): + return &textUnmarshalerAdapter{} + case impl.Implements(binaryUnmarshalerType): + return &binaryUnmarshalerAdapter{} + case impl.Implements(jsonUnmarshalerType): + return &jsonUnmarshalerAdapter{} + } + } + // Finally try registered kinds. + if mapper, ok = r.kinds[typ.Kind()]; ok { + return mapper + } + return nil +} + +// RegisterKind registers a Mapper for a reflect.Kind. +func (r *Registry) RegisterKind(kind reflect.Kind, mapper Mapper) *Registry { + r.kinds[kind] = mapper + return r +} + +// RegisterName registers a mapper to be used if the value mapper has a "type" tag matching name. +// +// eg. +// +// Mapper string `kong:"type='colour'` +// registry.RegisterName("colour", ...) +func (r *Registry) RegisterName(name string, mapper Mapper) *Registry { + r.names[name] = mapper + return r +} + +// RegisterType registers a Mapper for a reflect.Type. +func (r *Registry) RegisterType(typ reflect.Type, mapper Mapper) *Registry { + r.types[typ] = mapper + return r +} + +// RegisterValue registers a Mapper by pointer to the field value. +func (r *Registry) RegisterValue(ptr interface{}, mapper Mapper) *Registry { + key := reflect.ValueOf(ptr) + if key.Kind() != reflect.Ptr { + panic("expected a pointer") + } + key = key.Elem() + r.values[key] = mapper + return r +} + +// RegisterDefaults registers Mappers for all builtin supported Go types and some common stdlib types. +func (r *Registry) RegisterDefaults() *Registry { + return r.RegisterKind(reflect.Int, intDecoder(bits.UintSize)). + RegisterKind(reflect.Int8, intDecoder(8)). + RegisterKind(reflect.Int16, intDecoder(16)). + RegisterKind(reflect.Int32, intDecoder(32)). + RegisterKind(reflect.Int64, intDecoder(64)). + RegisterKind(reflect.Uint, uintDecoder(bits.UintSize)). + RegisterKind(reflect.Uint8, uintDecoder(8)). + RegisterKind(reflect.Uint16, uintDecoder(16)). + RegisterKind(reflect.Uint32, uintDecoder(32)). + RegisterKind(reflect.Uint64, uintDecoder(64)). + RegisterKind(reflect.Float32, floatDecoder(32)). + RegisterKind(reflect.Float64, floatDecoder(64)). + RegisterKind(reflect.String, MapperFunc(func(ctx *DecodeContext, target reflect.Value) error { + return ctx.Scan.PopValueInto("string", target.Addr().Interface()) + })). + RegisterKind(reflect.Bool, boolMapper{}). + RegisterKind(reflect.Slice, sliceDecoder(r)). + RegisterKind(reflect.Map, mapDecoder(r)). + RegisterType(reflect.TypeOf(time.Time{}), timeDecoder()). + RegisterType(reflect.TypeOf(time.Duration(0)), durationDecoder()). + RegisterType(reflect.TypeOf(&url.URL{}), urlMapper()). + RegisterType(reflect.TypeOf(&os.File{}), fileMapper(r)). + RegisterName("path", pathMapper(r)). + RegisterName("existingfile", existingFileMapper(r)). + RegisterName("existingdir", existingDirMapper(r)). + RegisterName("counter", counterMapper()). + RegisterName("filecontent", fileContentMapper(r)). + RegisterKind(reflect.Ptr, ptrMapper{r}) +} + +type boolMapper struct{} + +func (boolMapper) Decode(ctx *DecodeContext, target reflect.Value) error { + if ctx.Scan.Peek().Type == FlagValueToken { + token := ctx.Scan.Pop() + switch v := token.Value.(type) { + case string: + v = strings.ToLower(v) + switch v { + case "true", "1", "yes": + target.SetBool(true) + + case "false", "0", "no": + target.SetBool(false) + + default: + return fmt.Errorf("bool value must be true, 1, yes, false, 0 or no but got %q", v) + } + + case bool: + target.SetBool(v) + + default: + return fmt.Errorf("expected bool but got %q (%T)", token.Value, token.Value) + } + } else { + target.SetBool(true) + } + return nil +} +func (boolMapper) IsBool() bool { return true } + +func durationDecoder() MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + t, err := ctx.Scan.PopValue("duration") + if err != nil { + return err + } + var d time.Duration + switch v := t.Value.(type) { + case string: + d, err = time.ParseDuration(v) + if err != nil { + return fmt.Errorf("expected duration but got %q: %v", v, err) + } + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: + d = reflect.ValueOf(v).Convert(reflect.TypeOf(time.Duration(0))).Interface().(time.Duration) // nolint: forcetypeassert + default: + return fmt.Errorf("expected duration but got %q", v) + } + target.Set(reflect.ValueOf(d)) + return nil + } +} + +func timeDecoder() MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + format := time.RFC3339 + if ctx.Value.Format != "" { + format = ctx.Value.Format + } + var value string + if err := ctx.Scan.PopValueInto("time", &value); err != nil { + return err + } + t, err := time.Parse(format, value) + if err != nil { + return err + } + target.Set(reflect.ValueOf(t)) + return nil + } +} + +func intDecoder(bits int) MapperFunc { // nolint: dupl + return func(ctx *DecodeContext, target reflect.Value) error { + t, err := ctx.Scan.PopValue("int") + if err != nil { + return err + } + var sv string + switch v := t.Value.(type) { + case string: + sv = v + + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + sv = fmt.Sprintf("%v", v) + + case float32, float64: + sv = fmt.Sprintf("%0.f", v) + + default: + return fmt.Errorf("expected an int but got %q (%T)", t, t.Value) + } + n, err := strconv.ParseInt(sv, 10, bits) + if err != nil { + return fmt.Errorf("expected a valid %d bit int but got %q", bits, sv) + } + target.SetInt(n) + return nil + } +} + +func uintDecoder(bits int) MapperFunc { // nolint: dupl + return func(ctx *DecodeContext, target reflect.Value) error { + t, err := ctx.Scan.PopValue("uint") + if err != nil { + return err + } + var sv string + switch v := t.Value.(type) { + case string: + sv = v + + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + sv = fmt.Sprintf("%v", v) + + case float32, float64: + sv = fmt.Sprintf("%0.f", v) + + default: + return fmt.Errorf("expected an int but got %q (%T)", t, t.Value) + } + n, err := strconv.ParseUint(sv, 10, bits) + if err != nil { + return fmt.Errorf("expected a valid %d bit uint but got %q", bits, sv) + } + target.SetUint(n) + return nil + } +} + +func floatDecoder(bits int) MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + t, err := ctx.Scan.PopValue("float") + if err != nil { + return err + } + switch v := t.Value.(type) { + case string: + n, err := strconv.ParseFloat(v, bits) + if err != nil { + return fmt.Errorf("expected a float but got %q (%T)", t, t.Value) + } + target.SetFloat(n) + + case float32: + target.SetFloat(float64(v)) + + case float64: + target.SetFloat(v) + + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + target.Set(reflect.ValueOf(v)) + + default: + return fmt.Errorf("expected an int but got %q (%T)", t, t.Value) + } + return nil + } +} + +func mapDecoder(r *Registry) MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + if target.IsNil() { + target.Set(reflect.MakeMap(target.Type())) + } + el := target.Type() + mapsep := ctx.Value.Tag.MapSep + var childScanner *Scanner + if ctx.Value.Flag != nil { + t := ctx.Scan.Pop() + // If decoding a flag, we need an value. + if t.IsEOL() { + return fmt.Errorf("missing value, expecting \"=%c...\"", mapsep) + } + switch v := t.Value.(type) { + case string: + childScanner = ScanAsType(t.Type, SplitEscaped(v, mapsep)...) + + case []map[string]interface{}: + for _, m := range v { + err := jsonTranscode(m, target.Addr().Interface()) + if err != nil { + return err + } + } + return nil + + case map[string]interface{}: + return jsonTranscode(v, target.Addr().Interface()) + + default: + return fmt.Errorf("invalid map value %q (of type %T)", t, t.Value) + } + } else { + tokens := ctx.Scan.PopWhile(func(t Token) bool { return t.IsValue() }) + childScanner = ScanFromTokens(tokens...) + } + for !childScanner.Peek().IsEOL() { + var token string + err := childScanner.PopValueInto("map", &token) + if err != nil { + return err + } + parts := strings.SplitN(token, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("expected \"=\" but got %q", token) + } + key, value := parts[0], parts[1] + + keyTypeName, valueTypeName := "", "" + if typ := ctx.Value.Tag.Type; typ != "" { + parts := strings.Split(typ, ":") + if len(parts) != 2 { + return errors.New("type:\"\" on map field must be in the form \"[]:[]\"") + } + keyTypeName, valueTypeName = parts[0], parts[1] + } + + keyScanner := ScanAsType(FlagValueToken, key) + keyDecoder := r.ForNamedType(keyTypeName, el.Key()) + keyValue := reflect.New(el.Key()).Elem() + if err := keyDecoder.Decode(ctx.WithScanner(keyScanner), keyValue); err != nil { + return fmt.Errorf("invalid map key %q", key) + } + + valueScanner := ScanAsType(FlagValueToken, value) + valueDecoder := r.ForNamedType(valueTypeName, el.Elem()) + valueValue := reflect.New(el.Elem()).Elem() + if err := valueDecoder.Decode(ctx.WithScanner(valueScanner), valueValue); err != nil { + return fmt.Errorf("invalid map value %q", value) + } + + target.SetMapIndex(keyValue, valueValue) + } + return nil + } +} + +func sliceDecoder(r *Registry) MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + el := target.Type().Elem() + sep := ctx.Value.Tag.Sep + var childScanner *Scanner + if ctx.Value.Flag != nil { + t := ctx.Scan.Pop() + // If decoding a flag, we need an value. + if t.IsEOL() { + return fmt.Errorf("missing value, expecting \"%c...\"", sep) + } + switch v := t.Value.(type) { + case string: + childScanner = ScanAsType(t.Type, SplitEscaped(v, sep)...) + + case []interface{}: + return jsonTranscode(v, target.Addr().Interface()) + + default: + v = []interface{}{v} + return jsonTranscode(v, target.Addr().Interface()) + } + } else { + tokens := ctx.Scan.PopWhile(func(t Token) bool { return t.IsValue() }) + childScanner = ScanFromTokens(tokens...) + } + childDecoder := r.ForNamedType(ctx.Value.Tag.Type, el) + if childDecoder == nil { + return fmt.Errorf("no mapper for element type of %s", target.Type()) + } + for !childScanner.Peek().IsEOL() { + childValue := reflect.New(el).Elem() + err := childDecoder.Decode(ctx.WithScanner(childScanner), childValue) + if err != nil { + return err + } + target.Set(reflect.Append(target, childValue)) + } + return nil + } +} + +func pathMapper(r *Registry) MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + if target.Kind() == reflect.Slice { + return sliceDecoder(r)(ctx, target) + } + if target.Kind() != reflect.String { + return fmt.Errorf("\"path\" type must be applied to a string not %s", target.Type()) + } + var path string + err := ctx.Scan.PopValueInto("file", &path) + if err != nil { + return err + } + if path != "-" { + path = ExpandPath(path) + } + target.SetString(path) + return nil + } +} + +func fileMapper(r *Registry) MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + if target.Kind() == reflect.Slice { + return sliceDecoder(r)(ctx, target) + } + var path string + err := ctx.Scan.PopValueInto("file", &path) + if err != nil { + return err + } + var file *os.File + if path == "-" { + file = os.Stdin + } else { + path = ExpandPath(path) + file, err = os.Open(path) // nolint: gosec + if err != nil { + return err + } + } + target.Set(reflect.ValueOf(file)) + return nil + } +} + +func existingFileMapper(r *Registry) MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + if target.Kind() == reflect.Slice { + return sliceDecoder(r)(ctx, target) + } + if target.Kind() != reflect.String { + return fmt.Errorf("\"existingfile\" type must be applied to a string not %s", target.Type()) + } + var path string + err := ctx.Scan.PopValueInto("file", &path) + if err != nil { + return err + } + + if !ctx.Value.Active || ctx.Value.Set { + // early return to avoid checking extra files that may not exist; + // this hack only works because the value provided on the cli is + // checked before the default value is checked (if default is set). + return nil + } + + if path != "-" { + path = ExpandPath(path) + stat, err := os.Stat(path) + if err != nil { + return err + } + if stat.IsDir() { + return fmt.Errorf("%q exists but is a directory", path) + } + } + target.SetString(path) + return nil + } +} + +func existingDirMapper(r *Registry) MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + if target.Kind() == reflect.Slice { + return sliceDecoder(r)(ctx, target) + } + if target.Kind() != reflect.String { + return fmt.Errorf("\"existingdir\" must be applied to a string not %s", target.Type()) + } + var path string + err := ctx.Scan.PopValueInto("file", &path) + if err != nil { + return err + } + + if !ctx.Value.Active || ctx.Value.Set { + // early return to avoid checking extra dirs that may not exist; + // this hack only works because the value provided on the cli is + // checked before the default value is checked (if default is set). + return nil + } + + path = ExpandPath(path) + stat, err := os.Stat(path) + if err != nil { + return err + } + if !stat.IsDir() { + return fmt.Errorf("%q exists but is not a directory", path) + } + target.SetString(path) + return nil + } +} + +func fileContentMapper(r *Registry) MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + if target.Kind() != reflect.Slice && target.Elem().Kind() != reflect.Uint8 { + return fmt.Errorf("\"filecontent\" must be applied to []byte not %s", target.Type()) + } + var path string + err := ctx.Scan.PopValueInto("file", &path) + if err != nil { + return err + } + + if !ctx.Value.Active || ctx.Value.Set { + // early return to avoid checking extra dirs that may not exist; + // this hack only works because the value provided on the cli is + // checked before the default value is checked (if default is set). + return nil + } + + var data []byte + if path != "-" { + path = ExpandPath(path) + data, err = ioutil.ReadFile(path) //nolint:gosec + } else { + data, err = ioutil.ReadAll(os.Stdin) + } + if err != nil { + return err + } + target.SetBytes(data) + return nil + } +} + +type ptrMapper struct { + r *Registry +} + +var _ BoolMapperExt = (*ptrMapper)(nil) + +// IsBoolFromValue implements BoolMapperExt +func (p ptrMapper) IsBoolFromValue(target reflect.Value) bool { + elem := reflect.New(target.Type().Elem()).Elem() + nestedMapper := p.r.ForValue(elem) + if nestedMapper == nil { + return false + } + if bm, ok := nestedMapper.(BoolMapper); ok && bm.IsBool() { + return true + } + if bm, ok := nestedMapper.(BoolMapperExt); ok && bm.IsBoolFromValue(target) { + return true + } + return target.Kind() == reflect.Ptr && target.Type().Elem().Kind() == reflect.Bool +} + +func (p ptrMapper) Decode(ctx *DecodeContext, target reflect.Value) error { + elem := reflect.New(target.Type().Elem()).Elem() + nestedMapper := p.r.ForValue(elem) + if nestedMapper == nil { + return fmt.Errorf("cannot find mapper for %v", target.Type().Elem().String()) + } + err := nestedMapper.Decode(ctx, elem) + if err != nil { + return err + } + target.Set(elem.Addr()) + return nil +} + +func counterMapper() MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + if ctx.Scan.Peek().Type == FlagValueToken { + t, err := ctx.Scan.PopValue("counter") + if err != nil { + return err + } + switch v := t.Value.(type) { + case string: + n, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("expected a counter but got %q (%T)", t, t.Value) + } + target.SetInt(n) + + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + target.Set(reflect.ValueOf(v)) + + default: + return fmt.Errorf("expected a counter but got %q (%T)", t, t.Value) + } + return nil + } + + switch target.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + target.SetInt(target.Int() + 1) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + target.SetUint(target.Uint() + 1) + + case reflect.Float32, reflect.Float64: + target.SetFloat(target.Float() + 1) + + default: + return fmt.Errorf("type:\"counter\" must be used with a numeric field") + } + return nil + } +} + +func urlMapper() MapperFunc { + return func(ctx *DecodeContext, target reflect.Value) error { + var urlStr string + err := ctx.Scan.PopValueInto("url", &urlStr) + if err != nil { + return err + } + url, err := url.Parse(urlStr) + if err != nil { + return err + } + target.Set(reflect.ValueOf(url)) + return nil + } +} + +// SplitEscaped splits a string on a separator. +// +// It differs from strings.Split() in that the separator can exist in a field by escaping it with a \. eg. +// +// SplitEscaped(`hello\,there,bob`, ',') == []string{"hello,there", "bob"} +func SplitEscaped(s string, sep rune) (out []string) { + if sep == -1 { + return []string{s} + } + escaped := false + token := "" + for i, ch := range s { + switch { + case escaped: + if ch != sep { + token += `\` + } + token += string(ch) + escaped = false + case ch == '\\' && i < len(s)-1: + escaped = true + case ch == sep && !escaped: + out = append(out, token) + token = "" + escaped = false + default: + token += string(ch) + } + } + if token != "" { + out = append(out, token) + } + return +} + +// JoinEscaped joins a slice of strings on sep, but also escapes any instances of sep in the fields with \. eg. +// +// JoinEscaped([]string{"hello,there", "bob"}, ',') == `hello\,there,bob` +func JoinEscaped(s []string, sep rune) string { + escaped := []string{} + for _, e := range s { + escaped = append(escaped, strings.ReplaceAll(e, string(sep), `\`+string(sep))) + } + return strings.Join(escaped, string(sep)) +} + +// NamedFileContentFlag is a flag value that loads a file's contents and filename into its value. +type NamedFileContentFlag struct { + Filename string + Contents []byte +} + +func (f *NamedFileContentFlag) Decode(ctx *DecodeContext) error { // nolint: revive + var filename string + err := ctx.Scan.PopValueInto("filename", &filename) + if err != nil { + return err + } + // This allows unsetting of file content flags. + if filename == "" { + *f = NamedFileContentFlag{} + return nil + } + filename = ExpandPath(filename) + data, err := ioutil.ReadFile(filename) // nolint: gosec + if err != nil { + return fmt.Errorf("failed to open %q: %v", filename, err) + } + f.Contents = data + f.Filename = filename + return nil +} + +// FileContentFlag is a flag value that loads a file's contents into its value. +type FileContentFlag []byte + +func (f *FileContentFlag) Decode(ctx *DecodeContext) error { // nolint: revive + var filename string + err := ctx.Scan.PopValueInto("filename", &filename) + if err != nil { + return err + } + // This allows unsetting of file content flags. + if filename == "" { + *f = nil + return nil + } + filename = ExpandPath(filename) + data, err := ioutil.ReadFile(filename) // nolint: gosec + if err != nil { + return fmt.Errorf("failed to open %q: %v", filename, err) + } + *f = data + return nil +} + +func jsonTranscode(in, out interface{}) error { + data, err := json.Marshal(in) + if err != nil { + return err + } + if err = json.Unmarshal(data, out); err != nil { + return fmt.Errorf("%#v -> %T: %v", in, out, err) + } + return nil +} diff --git a/vendor/github.com/alecthomas/kong/model.go b/vendor/github.com/alecthomas/kong/model.go new file mode 100644 index 0000000..793cf97 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/model.go @@ -0,0 +1,495 @@ +package kong + +import ( + "fmt" + "math" + "os" + "reflect" + "strconv" + "strings" +) + +// A Visitable component in the model. +type Visitable interface { + node() +} + +// Application is the root of the Kong model. +type Application struct { + *Node + // Help flag, if the NoDefaultHelp() option is not specified. + HelpFlag *Flag +} + +// Argument represents a branching positional argument. +type Argument = Node + +// Command represents a command in the CLI. +type Command = Node + +// NodeType is an enum representing the type of a Node. +type NodeType int + +// Node type enumerations. +const ( + ApplicationNode NodeType = iota + CommandNode + ArgumentNode +) + +// Node is a branch in the CLI. ie. a command or positional argument. +type Node struct { + Type NodeType + Parent *Node + Name string + Help string // Short help displayed in summaries. + Detail string // Detailed help displayed when describing command/arg alone. + Group *Group + Hidden bool + Flags []*Flag + Positional []*Positional + Children []*Node + DefaultCmd *Node + Target reflect.Value // Pointer to the value in the grammar that this Node is associated with. + Tag *Tag + Aliases []string + Passthrough bool // Set to true to stop flag parsing when encountered. + Active bool // Denotes the node is part of an active branch in the CLI. + + Argument *Value // Populated when Type is ArgumentNode. +} + +func (*Node) node() {} + +// Leaf returns true if this Node is a leaf node. +func (n *Node) Leaf() bool { + return len(n.Children) == 0 +} + +// Find a command/argument/flag by pointer to its field. +// +// Returns nil if not found. Panics if ptr is not a pointer. +func (n *Node) Find(ptr interface{}) *Node { + key := reflect.ValueOf(ptr) + if key.Kind() != reflect.Ptr { + panic("expected a pointer") + } + return n.findNode(key) +} + +func (n *Node) findNode(key reflect.Value) *Node { + if n.Target == key { + return n + } + for _, child := range n.Children { + if found := child.findNode(key); found != nil { + return found + } + } + return nil +} + +// AllFlags returns flags from all ancestor branches encountered. +// +// If "hide" is true hidden flags will be omitted. +func (n *Node) AllFlags(hide bool) (out [][]*Flag) { + if n.Parent != nil { + out = append(out, n.Parent.AllFlags(hide)...) + } + group := []*Flag{} + for _, flag := range n.Flags { + if !hide || !flag.Hidden { + flag.Active = true + group = append(group, flag) + } + } + if len(group) > 0 { + out = append(out, group) + } + return +} + +// Leaves returns the leaf commands/arguments under Node. +// +// If "hidden" is true hidden leaves will be omitted. +func (n *Node) Leaves(hide bool) (out []*Node) { + _ = Visit(n, func(nd Visitable, next Next) error { + if nd == n { + return next(nil) + } + if node, ok := nd.(*Node); ok { + if hide && node.Hidden { + return nil + } + if len(node.Children) == 0 && node.Type != ApplicationNode { + out = append(out, node) + } + } + return next(nil) + }) + return +} + +// Depth of the command from the application root. +func (n *Node) Depth() int { + depth := 0 + p := n.Parent + for p != nil && p.Type != ApplicationNode { + depth++ + p = p.Parent + } + return depth +} + +// Summary help string for the node (not including application name). +func (n *Node) Summary() string { + summary := n.Path() + if flags := n.FlagSummary(true); flags != "" { + summary += " " + flags + } + args := []string{} + optional := 0 + for _, arg := range n.Positional { + argSummary := arg.Summary() + if arg.Tag.Optional { + optional++ + argSummary = strings.TrimRight(argSummary, "]") + } + args = append(args, argSummary) + } + if len(args) != 0 { + summary += " " + strings.Join(args, " ") + strings.Repeat("]", optional) + } else if len(n.Children) > 0 { + summary += " " + } + return summary +} + +// FlagSummary for the node. +func (n *Node) FlagSummary(hide bool) string { + required := []string{} + count := 0 + for _, group := range n.AllFlags(hide) { + for _, flag := range group { + count++ + if flag.Required { + required = append(required, flag.Summary()) + } + } + } + return strings.Join(required, " ") +} + +// FullPath is like Path() but includes the Application root node. +func (n *Node) FullPath() string { + root := n + for root.Parent != nil { + root = root.Parent + } + return strings.TrimSpace(root.Name + " " + n.Path()) +} + +// Vars returns the combined Vars defined by all ancestors of this Node. +func (n *Node) Vars() Vars { + if n == nil { + return Vars{} + } + return n.Parent.Vars().CloneWith(n.Tag.Vars) +} + +// Path through ancestors to this Node. +func (n *Node) Path() (out string) { + if n.Parent != nil { + out += " " + n.Parent.Path() + } + switch n.Type { + case CommandNode: + out += " " + n.Name + if len(n.Aliases) > 0 { + out += fmt.Sprintf(" (%s)", strings.Join(n.Aliases, ",")) + } + case ArgumentNode: + out += " " + "<" + n.Name + ">" + default: + } + return strings.TrimSpace(out) +} + +// ClosestGroup finds the first non-nil group in this node and its ancestors. +func (n *Node) ClosestGroup() *Group { + switch { + case n.Group != nil: + return n.Group + case n.Parent != nil: + return n.Parent.ClosestGroup() + default: + return nil + } +} + +// A Value is either a flag or a variable positional argument. +type Value struct { + Flag *Flag // Nil if positional argument. + Name string + Help string + OrigHelp string // Original help string, without interpolated variables. + HasDefault bool + Default string + DefaultValue reflect.Value + Enum string + Mapper Mapper + Tag *Tag + Target reflect.Value + Required bool + Set bool // Set to true when this value is set through some mechanism. + Format string // Formatting directive, if applicable. + Position int // Position (for positional arguments). + Passthrough bool // Set to true to stop flag parsing when encountered. + Active bool // Denotes the value is part of an active branch in the CLI. +} + +// EnumMap returns a map of the enums in this value. +func (v *Value) EnumMap() map[string]bool { + parts := strings.Split(v.Enum, ",") + out := make(map[string]bool, len(parts)) + for _, part := range parts { + out[strings.TrimSpace(part)] = true + } + return out +} + +// EnumSlice returns a slice of the enums in this value. +func (v *Value) EnumSlice() []string { + parts := strings.Split(v.Enum, ",") + out := make([]string, len(parts)) + for i, part := range parts { + out[i] = strings.TrimSpace(part) + } + return out +} + +// ShortSummary returns a human-readable summary of the value, not including any placeholders/defaults. +func (v *Value) ShortSummary() string { + if v.Flag != nil { + return fmt.Sprintf("--%s", v.Name) + } + argText := "<" + v.Name + ">" + if v.IsCumulative() { + argText += " ..." + } + if !v.Required { + argText = "[" + argText + "]" + } + return argText +} + +// Summary returns a human-readable summary of the value. +func (v *Value) Summary() string { + if v.Flag != nil { + if v.IsBool() { + return fmt.Sprintf("--%s", v.Name) + } + return fmt.Sprintf("--%s=%s", v.Name, v.Flag.FormatPlaceHolder()) + } + argText := "<" + v.Name + ">" + if v.IsCumulative() { + argText += " ..." + } + if !v.Required { + argText = "[" + argText + "]" + } + return argText +} + +// IsCumulative returns true if the type can be accumulated into. +func (v *Value) IsCumulative() bool { + return v.IsSlice() || v.IsMap() +} + +// IsSlice returns true if the value is a slice. +func (v *Value) IsSlice() bool { + return v.Target.Type().Name() == "" && v.Target.Kind() == reflect.Slice +} + +// IsMap returns true if the value is a map. +func (v *Value) IsMap() bool { + return v.Target.Kind() == reflect.Map +} + +// IsBool returns true if the underlying value is a boolean. +func (v *Value) IsBool() bool { + if m, ok := v.Mapper.(BoolMapperExt); ok && m.IsBoolFromValue(v.Target) { + return true + } + if m, ok := v.Mapper.(BoolMapper); ok && m.IsBool() { + return true + } + return v.Target.Kind() == reflect.Bool +} + +// IsCounter returns true if the value is a counter. +func (v *Value) IsCounter() bool { + return v.Tag.Type == "counter" +} + +// Parse tokens into value, parse, and validate, but do not write to the field. +func (v *Value) Parse(scan *Scanner, target reflect.Value) (err error) { + if target.Kind() == reflect.Ptr && target.IsNil() { + target.Set(reflect.New(target.Type().Elem())) + } + err = v.Mapper.Decode(&DecodeContext{Value: v, Scan: scan}, target) + if err != nil { + return fmt.Errorf("%s: %w", v.ShortSummary(), err) + } + v.Set = true + return nil +} + +// Apply value to field. +func (v *Value) Apply(value reflect.Value) { + v.Target.Set(value) + v.Set = true +} + +// ApplyDefault value to field if it is not already set. +func (v *Value) ApplyDefault() error { + if reflectValueIsZero(v.Target) { + return v.Reset() + } + v.Set = true + return nil +} + +// Reset this value to its default, either the zero value or the parsed result of its envar, +// or its "default" tag. +// +// Does not include resolvers. +func (v *Value) Reset() error { + v.Target.Set(reflect.Zero(v.Target.Type())) + if len(v.Tag.Envs) != 0 { + for _, env := range v.Tag.Envs { + envar := os.Getenv(env) + // Parse the first non-empty ENV in the list + if envar != "" { + err := v.Parse(ScanFromTokens(Token{Type: FlagValueToken, Value: envar}), v.Target) + if err != nil { + return fmt.Errorf("%s (from envar %s=%q)", err, env, envar) + } + return nil + } + } + } + if v.HasDefault { + return v.Parse(ScanFromTokens(Token{Type: FlagValueToken, Value: v.Default}), v.Target) + } + return nil +} + +func (*Value) node() {} + +// A Positional represents a non-branching command-line positional argument. +type Positional = Value + +// A Flag represents a command-line flag. +type Flag struct { + *Value + Group *Group // Logical grouping when displaying. May also be used by configuration loaders to group options logically. + Xor []string + PlaceHolder string + Envs []string + Short rune + Hidden bool + Negated bool +} + +func (f *Flag) String() string { + out := "--" + f.Name + if f.Short != 0 { + out = fmt.Sprintf("-%c, %s", f.Short, out) + } + if !f.IsBool() && !f.IsCounter() { + out += "=" + f.FormatPlaceHolder() + } + return out +} + +// FormatPlaceHolder formats the placeholder string for a Flag. +func (f *Flag) FormatPlaceHolder() string { + placeholderHelper, ok := f.Value.Mapper.(PlaceHolderProvider) + if ok { + return placeholderHelper.PlaceHolder(f) + } + tail := "" + if f.Value.IsSlice() && f.Value.Tag.Sep != -1 { + tail += string(f.Value.Tag.Sep) + "..." + } + if f.PlaceHolder != "" { + return f.PlaceHolder + tail + } + if f.HasDefault { + if f.Value.Target.Kind() == reflect.String { + return strconv.Quote(f.Default) + tail + } + return f.Default + tail + } + if f.Value.IsMap() { + if f.Value.Tag.MapSep != -1 { + tail = string(f.Value.Tag.MapSep) + "..." + } + return "KEY=VALUE" + tail + } + if f.Tag != nil && f.Tag.TypeName != "" { + return strings.ToUpper(dashedString(f.Tag.TypeName)) + tail + } + return strings.ToUpper(f.Name) + tail +} + +// Group holds metadata about a command or flag group used when printing help. +type Group struct { + // Key is the `group` field tag value used to identify this group. + Key string + // Title is displayed above the grouped items. + Title string + // Description is optional and displayed under the Title when non empty. + // It can be used to introduce the group's purpose to the user. + Description string +} + +// This is directly from the Go 1.13 source code. +func reflectValueIsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !reflectValueIsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !reflectValueIsZero(v.Field(i)) { + return false + } + } + return true + default: + // This should never happens, but will act as a safeguard for + // later, as a default value doesn't makes sense here. + panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + } +} diff --git a/vendor/github.com/alecthomas/kong/options.go b/vendor/github.com/alecthomas/kong/options.go new file mode 100644 index 0000000..8d2893c --- /dev/null +++ b/vendor/github.com/alecthomas/kong/options.go @@ -0,0 +1,493 @@ +package kong + +import ( + "errors" + "fmt" + "io" + "os" + "os/user" + "path/filepath" + "reflect" + "regexp" + "strings" +) + +// An Option applies optional changes to the Kong application. +type Option interface { + Apply(k *Kong) error +} + +// OptionFunc is function that adheres to the Option interface. +type OptionFunc func(k *Kong) error + +func (o OptionFunc) Apply(k *Kong) error { return o(k) } // nolint: revive + +// Vars sets the variables to use for interpolation into help strings and default values. +// +// See README for details. +type Vars map[string]string + +// Apply lets Vars act as an Option. +func (v Vars) Apply(k *Kong) error { + for key, value := range v { + k.vars[key] = value + } + return nil +} + +// CloneWith clones the current Vars and merges "vars" onto the clone. +func (v Vars) CloneWith(vars Vars) Vars { + out := make(Vars, len(v)+len(vars)) + for key, value := range v { + out[key] = value + } + for key, value := range vars { + out[key] = value + } + return out +} + +// Exit overrides the function used to terminate. This is useful for testing or interactive use. +func Exit(exit func(int)) Option { + return OptionFunc(func(k *Kong) error { + k.Exit = exit + return nil + }) +} + +type embedded struct { + strct any + tags []string +} + +// Embed a struct into the root of the CLI. +// +// "strct" must be a pointer to a structure. +func Embed(strct any, tags ...string) Option { + t := reflect.TypeOf(strct) + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + panic("kong: Embed() must be called with a pointer to a struct") + } + return OptionFunc(func(k *Kong) error { + k.embedded = append(k.embedded, embedded{strct, tags}) + return nil + }) +} + +type dynamicCommand struct { + name string + help string + group string + tags []string + cmd interface{} +} + +// DynamicCommand registers a dynamically constructed command with the root of the CLI. +// +// This is useful for command-line structures that are extensible via user-provided plugins. +// +// "tags" is a list of extra tag strings to parse, in the form :"". +func DynamicCommand(name, help, group string, cmd interface{}, tags ...string) Option { + return OptionFunc(func(k *Kong) error { + k.dynamicCommands = append(k.dynamicCommands, &dynamicCommand{ + name: name, + help: help, + group: group, + cmd: cmd, + tags: tags, + }) + return nil + }) +} + +// NoDefaultHelp disables the default help flags. +func NoDefaultHelp() Option { + return OptionFunc(func(k *Kong) error { + k.noDefaultHelp = true + return nil + }) +} + +// PostBuild provides read/write access to kong.Kong after initial construction of the model is complete but before +// parsing occurs. +// +// This is useful for, e.g., adding short options to flags, updating help, etc. +func PostBuild(fn func(*Kong) error) Option { + return OptionFunc(func(k *Kong) error { + k.postBuildOptions = append(k.postBuildOptions, OptionFunc(fn)) + return nil + }) +} + +// Name overrides the application name. +func Name(name string) Option { + return PostBuild(func(k *Kong) error { + k.Model.Name = name + return nil + }) +} + +// Description sets the application description. +func Description(description string) Option { + return PostBuild(func(k *Kong) error { + k.Model.Help = description + return nil + }) +} + +// TypeMapper registers a mapper to a type. +func TypeMapper(typ reflect.Type, mapper Mapper) Option { + return OptionFunc(func(k *Kong) error { + k.registry.RegisterType(typ, mapper) + return nil + }) +} + +// KindMapper registers a mapper to a kind. +func KindMapper(kind reflect.Kind, mapper Mapper) Option { + return OptionFunc(func(k *Kong) error { + k.registry.RegisterKind(kind, mapper) + return nil + }) +} + +// ValueMapper registers a mapper to a field value. +func ValueMapper(ptr interface{}, mapper Mapper) Option { + return OptionFunc(func(k *Kong) error { + k.registry.RegisterValue(ptr, mapper) + return nil + }) +} + +// NamedMapper registers a mapper to a name. +func NamedMapper(name string, mapper Mapper) Option { + return OptionFunc(func(k *Kong) error { + k.registry.RegisterName(name, mapper) + return nil + }) +} + +// Writers overrides the default writers. Useful for testing or interactive use. +func Writers(stdout, stderr io.Writer) Option { + return OptionFunc(func(k *Kong) error { + k.Stdout = stdout + k.Stderr = stderr + return nil + }) +} + +// Bind binds values for hooks and Run() function arguments. +// +// Any arguments passed will be available to the receiving hook functions, but may be omitted. Additionally, *Kong and +// the current *Context will also be made available. +// +// There are two hook points: +// +// BeforeApply(...) error +// AfterApply(...) error +// +// Called before validation/assignment, and immediately after validation/assignment, respectively. +func Bind(args ...interface{}) Option { + return OptionFunc(func(k *Kong) error { + k.bindings.add(args...) + return nil + }) +} + +// BindTo allows binding of implementations to interfaces. +// +// BindTo(impl, (*iface)(nil)) +func BindTo(impl, iface interface{}) Option { + return OptionFunc(func(k *Kong) error { + k.bindings.addTo(impl, iface) + return nil + }) +} + +// BindToProvider allows binding of provider functions. +// +// This is useful when the Run() function of different commands require different values that may +// not all be initialisable from the main() function. +func BindToProvider(provider interface{}) Option { + return OptionFunc(func(k *Kong) error { + return k.bindings.addProvider(provider) + }) +} + +// Help printer to use. +func Help(help HelpPrinter) Option { + return OptionFunc(func(k *Kong) error { + k.help = help + return nil + }) +} + +// ShortHelp configures the short usage message. +// +// It should be used together with kong.ShortUsageOnError() to display a +// custom short usage message on errors. +func ShortHelp(shortHelp HelpPrinter) Option { + return OptionFunc(func(k *Kong) error { + k.shortHelp = shortHelp + return nil + }) +} + +// HelpFormatter configures how the help text is formatted. +// +// Deprecated: Use ValueFormatter() instead. +func HelpFormatter(helpFormatter HelpValueFormatter) Option { + return OptionFunc(func(k *Kong) error { + k.helpFormatter = helpFormatter + return nil + }) +} + +// ValueFormatter configures how the help text is formatted. +func ValueFormatter(helpFormatter HelpValueFormatter) Option { + return OptionFunc(func(k *Kong) error { + k.helpFormatter = helpFormatter + return nil + }) +} + +// ConfigureHelp sets the HelpOptions to use for printing help. +func ConfigureHelp(options HelpOptions) Option { + return OptionFunc(func(k *Kong) error { + k.helpOptions = options + return nil + }) +} + +// AutoGroup automatically assigns groups to flags. +func AutoGroup(format func(parent Visitable, flag *Flag) *Group) Option { + return PostBuild(func(kong *Kong) error { + parents := []Visitable{kong.Model} + return Visit(kong.Model, func(node Visitable, next Next) error { + if flag, ok := node.(*Flag); ok && flag.Group == nil { + flag.Group = format(parents[len(parents)-1], flag) + } + parents = append(parents, node) + defer func() { parents = parents[:len(parents)-1] }() + return next(nil) + }) + }) +} + +// Groups associates `group` field tags with group metadata. +// +// This option is used to simplify Kong tags while providing +// rich group information such as title and optional description. +// +// Each key in the "groups" map corresponds to the value of a +// `group` Kong tag, while the first line of the value will be +// the title, and subsequent lines if any will be the description of +// the group. +// +// See also ExplicitGroups for a more structured alternative. +type Groups map[string]string + +func (g Groups) Apply(k *Kong) error { // nolint: revive + for key, info := range g { + lines := strings.Split(info, "\n") + title := strings.TrimSpace(lines[0]) + description := "" + if len(lines) > 1 { + description = strings.TrimSpace(strings.Join(lines[1:], "\n")) + } + k.groups = append(k.groups, Group{ + Key: key, + Title: title, + Description: description, + }) + } + return nil +} + +// ExplicitGroups associates `group` field tags with their metadata. +// +// It can be used to provide a title or header to a command or flag group. +func ExplicitGroups(groups []Group) Option { + return OptionFunc(func(k *Kong) error { + k.groups = groups + return nil + }) +} + +// UsageOnError configures Kong to display context-sensitive usage if FatalIfErrorf is called with an error. +func UsageOnError() Option { + return OptionFunc(func(k *Kong) error { + k.usageOnError = fullUsage + return nil + }) +} + +// ShortUsageOnError configures Kong to display context-sensitive short +// usage if FatalIfErrorf is called with an error. The default short +// usage message can be overridden with kong.ShortHelp(...). +func ShortUsageOnError() Option { + return OptionFunc(func(k *Kong) error { + k.usageOnError = shortUsage + return nil + }) +} + +// ClearResolvers clears all existing resolvers. +func ClearResolvers() Option { + return OptionFunc(func(k *Kong) error { + k.resolvers = nil + return nil + }) +} + +// Resolvers registers flag resolvers. +func Resolvers(resolvers ...Resolver) Option { + return OptionFunc(func(k *Kong) error { + k.resolvers = append(k.resolvers, resolvers...) + return nil + }) +} + +// IgnoreFields will cause kong.New() to skip field names that match any +// of the provided regex patterns. This is useful if you are not able to add a +// kong="-" struct tag to a struct/element before the call to New. +// +// Example: When referencing protoc generated structs, you will likely want to +// ignore/skip XXX_* fields. +func IgnoreFields(regexes ...string) Option { + return OptionFunc(func(k *Kong) error { + for _, r := range regexes { + if r == "" { + return errors.New("regex input cannot be empty") + } + + re, err := regexp.Compile(r) + if err != nil { + return fmt.Errorf("unable to compile regex: %v", err) + } + + k.ignoreFields = append(k.ignoreFields, re) + } + + return nil + }) +} + +// ConfigurationLoader is a function that builds a resolver from a file. +type ConfigurationLoader func(r io.Reader) (Resolver, error) + +// Configuration provides Kong with support for loading defaults from a set of configuration files. +// +// Paths will be opened in order, and "loader" will be used to provide a Resolver which is registered with Kong. +// +// Note: The JSON function is a ConfigurationLoader. +// +// ~ and variable expansion will occur on the provided paths. +func Configuration(loader ConfigurationLoader, paths ...string) Option { + return OptionFunc(func(k *Kong) error { + k.loader = loader + for _, path := range paths { + f, err := os.Open(ExpandPath(path)) + if err != nil { + if os.IsNotExist(err) || os.IsPermission(err) { + continue + } + + return err + } + f.Close() + + resolver, err := k.LoadConfig(path) + if err != nil { + return fmt.Errorf("%s: %v", path, err) + } + if resolver != nil { + k.resolvers = append(k.resolvers, resolver) + } + } + return nil + }) +} + +// ExpandPath is a helper function to expand a relative or home-relative path to an absolute path. +// +// eg. ~/.someconf -> /home/alec/.someconf +func ExpandPath(path string) string { + if filepath.IsAbs(path) { + return path + } + if strings.HasPrefix(path, "~/") { + user, err := user.Current() + if err != nil { + return path + } + return filepath.Join(user.HomeDir, path[2:]) + } + abspath, err := filepath.Abs(path) + if err != nil { + return path + } + return abspath +} + +func siftStrings(ss []string, filter func(s string) bool) []string { + i := 0 + ss = append([]string(nil), ss...) + for _, s := range ss { + if filter(s) { + ss[i] = s + i++ + } + } + return ss[0:i] +} + +// DefaultEnvars option inits environment names for flags. +// The name will not generate if tag "env" is "-". +// Predefined environment variables are skipped. +// +// For example: +// +// --some.value -> PREFIX_SOME_VALUE +func DefaultEnvars(prefix string) Option { + processFlag := func(flag *Flag) { + switch env := flag.Envs; { + case flag.Name == "help": + return + case len(env) == 1 && env[0] == "-": + flag.Envs = nil + return + case len(env) > 0: + return + } + replacer := strings.NewReplacer("-", "_", ".", "_") + names := append([]string{prefix}, camelCase(replacer.Replace(flag.Name))...) + names = siftStrings(names, func(s string) bool { return !(s == "_" || strings.TrimSpace(s) == "") }) + name := strings.ToUpper(strings.Join(names, "_")) + flag.Envs = append(flag.Envs, name) + flag.Value.Tag.Envs = append(flag.Value.Tag.Envs, name) + } + + var processNode func(node *Node) + processNode = func(node *Node) { + for _, flag := range node.Flags { + processFlag(flag) + } + for _, node := range node.Children { + processNode(node) + } + } + + return PostBuild(func(k *Kong) error { + processNode(k.Model.Node) + return nil + }) +} + +// FlagNamer allows you to override the default kebab-case automated flag name generation. +func FlagNamer(namer func(fieldName string) string) Option { + return OptionFunc(func(k *Kong) error { + k.flagNamer = namer + return nil + }) +} diff --git a/vendor/github.com/alecthomas/kong/resolver.go b/vendor/github.com/alecthomas/kong/resolver.go new file mode 100644 index 0000000..ac1de1f --- /dev/null +++ b/vendor/github.com/alecthomas/kong/resolver.go @@ -0,0 +1,68 @@ +package kong + +import ( + "encoding/json" + "io" + "strings" +) + +// A Resolver resolves a Flag value from an external source. +type Resolver interface { + // Validate configuration against Application. + // + // This can be used to validate that all provided configuration is valid within this application. + Validate(app *Application) error + + // Resolve the value for a Flag. + Resolve(context *Context, parent *Path, flag *Flag) (interface{}, error) +} + +// ResolverFunc is a convenience type for non-validating Resolvers. +type ResolverFunc func(context *Context, parent *Path, flag *Flag) (interface{}, error) + +var _ Resolver = ResolverFunc(nil) + +func (r ResolverFunc) Resolve(context *Context, parent *Path, flag *Flag) (interface{}, error) { // nolint: revive + return r(context, parent, flag) +} +func (r ResolverFunc) Validate(app *Application) error { return nil } // nolint: revive + +// JSON returns a Resolver that retrieves values from a JSON source. +// +// Flag names are used as JSON keys indirectly, by tring snake_case and camelCase variants. +func JSON(r io.Reader) (Resolver, error) { + values := map[string]interface{}{} + err := json.NewDecoder(r).Decode(&values) + if err != nil { + return nil, err + } + var f ResolverFunc = func(context *Context, parent *Path, flag *Flag) (interface{}, error) { + name := strings.ReplaceAll(flag.Name, "-", "_") + snakeCaseName := snakeCase(flag.Name) + raw, ok := values[name] + if ok { + return raw, nil + } else if raw, ok = values[snakeCaseName]; ok { + return raw, nil + } + raw = values + for _, part := range strings.Split(name, ".") { + if values, ok := raw.(map[string]interface{}); ok { + raw, ok = values[part] + if !ok { + return nil, nil + } + } else { + return nil, nil + } + } + return raw, nil + } + + return f, nil +} + +func snakeCase(name string) string { + name = strings.Join(strings.Split(strings.Title(name), "-"), "") //nolint: staticcheck + return strings.ToLower(name[:1]) + name[1:] +} diff --git a/vendor/github.com/alecthomas/kong/scanner.go b/vendor/github.com/alecthomas/kong/scanner.go new file mode 100644 index 0000000..1766c4b --- /dev/null +++ b/vendor/github.com/alecthomas/kong/scanner.go @@ -0,0 +1,222 @@ +package kong + +import ( + "fmt" + "strings" +) + +// TokenType is the type of a token. +type TokenType int + +// Token types. +const ( + UntypedToken TokenType = iota + EOLToken + FlagToken // -- + FlagValueToken // = + ShortFlagToken // -[ + PositionalArgumentToken // +) + +func (t TokenType) String() string { + switch t { + case UntypedToken: + return "untyped" + case EOLToken: + return "" + case FlagToken: // -- + return "long flag" + case FlagValueToken: // = + return "flag value" + case ShortFlagToken: // -[ + return "short flag remainder" + case PositionalArgumentToken: // + return "positional argument" + } + panic("unsupported type") +} + +// Token created by Scanner. +type Token struct { + Value interface{} + Type TokenType +} + +func (t Token) String() string { + switch t.Type { + case FlagToken: + return fmt.Sprintf("--%v", t.Value) + + case ShortFlagToken: + return fmt.Sprintf("-%v", t.Value) + + case EOLToken: + return "EOL" + + default: + return fmt.Sprintf("%v", t.Value) + } +} + +// IsEOL returns true if this Token is past the end of the line. +func (t Token) IsEOL() bool { + return t.Type == EOLToken +} + +// IsAny returns true if the token's type is any of those provided. +func (t TokenType) IsAny(types ...TokenType) bool { + for _, typ := range types { + if t == typ { + return true + } + } + return false +} + +// InferredType tries to infer the type of a token. +func (t Token) InferredType() TokenType { + if t.Type != UntypedToken { + return t.Type + } + if v, ok := t.Value.(string); ok { + if strings.HasPrefix(v, "--") { // nolint: gocritic + return FlagToken + } else if v == "-" { + return PositionalArgumentToken + } else if strings.HasPrefix(v, "-") { + return ShortFlagToken + } + } + return t.Type +} + +// IsValue returns true if token is usable as a parseable value. +// +// A parseable value is either a value typed token, or an untyped token NOT starting with a hyphen. +func (t Token) IsValue() bool { + tt := t.InferredType() + return tt.IsAny(FlagValueToken, ShortFlagTailToken, PositionalArgumentToken) || + (tt == UntypedToken && !strings.HasPrefix(t.String(), "-")) +} + +// Scanner is a stack-based scanner over command-line tokens. +// +// Initially all tokens are untyped. As the parser consumes tokens it assigns types, splits tokens, and pushes them back +// onto the stream. +// +// For example, the token "--foo=bar" will be split into the following by the parser: +// +// [{FlagToken, "foo"}, {FlagValueToken, "bar"}] +type Scanner struct { + args []Token +} + +// ScanAsType creates a new Scanner from args with the given type. +func ScanAsType(ttype TokenType, args ...string) *Scanner { + s := &Scanner{} + for _, arg := range args { + s.args = append(s.args, Token{Value: arg, Type: ttype}) + } + return s +} + +// Scan creates a new Scanner from args with untyped tokens. +func Scan(args ...string) *Scanner { + return ScanAsType(UntypedToken, args...) +} + +// ScanFromTokens creates a new Scanner from a slice of tokens. +func ScanFromTokens(tokens ...Token) *Scanner { + return &Scanner{args: tokens} +} + +// Len returns the number of input arguments. +func (s *Scanner) Len() int { + return len(s.args) +} + +// Pop the front token off the Scanner. +func (s *Scanner) Pop() Token { + if len(s.args) == 0 { + return Token{Type: EOLToken} + } + arg := s.args[0] + s.args = s.args[1:] + return arg +} + +type expectedError struct { + context string + token Token +} + +func (e *expectedError) Error() string { + return fmt.Sprintf("expected %s value but got %q (%s)", e.context, e.token, e.token.InferredType()) +} + +// PopValue pops a value token, or returns an error. +// +// "context" is used to assist the user if the value can not be popped, eg. "expected value but got " +func (s *Scanner) PopValue(context string) (Token, error) { + t := s.Pop() + if !t.IsValue() { + return t, &expectedError{context, t} + } + return t, nil +} + +// PopValueInto pops a value token into target or returns an error. +// +// "context" is used to assist the user if the value can not be popped, eg. "expected value but got " +func (s *Scanner) PopValueInto(context string, target interface{}) error { + t, err := s.PopValue(context) + if err != nil { + return err + } + return jsonTranscode(t.Value, target) +} + +// PopWhile predicate returns true. +func (s *Scanner) PopWhile(predicate func(Token) bool) (values []Token) { + for predicate(s.Peek()) { + values = append(values, s.Pop()) + } + return +} + +// PopUntil predicate returns true. +func (s *Scanner) PopUntil(predicate func(Token) bool) (values []Token) { + for !predicate(s.Peek()) { + values = append(values, s.Pop()) + } + return +} + +// Peek at the next Token or return an EOLToken. +func (s *Scanner) Peek() Token { + if len(s.args) == 0 { + return Token{Type: EOLToken} + } + return s.args[0] +} + +// Push an untyped Token onto the front of the Scanner. +func (s *Scanner) Push(arg interface{}) *Scanner { + s.PushToken(Token{Value: arg}) + return s +} + +// PushTyped pushes a typed token onto the front of the Scanner. +func (s *Scanner) PushTyped(arg interface{}, typ TokenType) *Scanner { + s.PushToken(Token{Value: arg, Type: typ}) + return s +} + +// PushToken pushes a preconstructed Token onto the front of the Scanner. +func (s *Scanner) PushToken(token Token) *Scanner { + s.args = append([]Token{token}, s.args...) + return s +} diff --git a/vendor/github.com/alecthomas/kong/tag.go b/vendor/github.com/alecthomas/kong/tag.go new file mode 100644 index 0000000..f99059b --- /dev/null +++ b/vendor/github.com/alecthomas/kong/tag.go @@ -0,0 +1,351 @@ +package kong + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Tag represents the parsed state of Kong tags in a struct field tag. +type Tag struct { + Ignored bool // Field is ignored by Kong. ie. kong:"-" + Cmd bool + Arg bool + Required bool + Optional bool + Name string + Help string + Type string + TypeName string + HasDefault bool + Default string + Format string + PlaceHolder string + Envs []string + Short rune + Hidden bool + Sep rune + MapSep rune + Enum string + Group string + Xor []string + Vars Vars + Prefix string // Optional prefix on anonymous structs. All sub-flags will have this prefix. + EnvPrefix string + Embed bool + Aliases []string + Negatable bool + Passthrough bool + + // Storage for all tag keys for arbitrary lookups. + items map[string][]string +} + +func (t *Tag) String() string { + out := []string{} + for key, list := range t.items { + for _, value := range list { + out = append(out, fmt.Sprintf("%s:%q", key, value)) + } + } + return strings.Join(out, " ") +} + +type tagChars struct { + sep, quote, assign rune + needsUnquote bool +} + +var kongChars = tagChars{sep: ',', quote: '\'', assign: '=', needsUnquote: false} +var bareChars = tagChars{sep: ' ', quote: '"', assign: ':', needsUnquote: true} + +// nolint:gocyclo +func parseTagItems(tagString string, chr tagChars) (map[string][]string, error) { + d := map[string][]string{} + key := []rune{} + value := []rune{} + quotes := false + inKey := true + + add := func() error { + // Bare tags are quoted, therefore we need to unquote them in the same fashion reflect.Lookup() (implicitly) + // unquotes "kong tags". + s := string(value) + + if chr.needsUnquote && s != "" { + if unquoted, err := strconv.Unquote(fmt.Sprintf(`"%s"`, s)); err == nil { + s = unquoted + } else { + return fmt.Errorf("unquoting tag value `%s`: %w", s, err) + } + } + + d[string(key)] = append(d[string(key)], s) + key = []rune{} + value = []rune{} + inKey = true + + return nil + } + + runes := []rune(tagString) + for idx := 0; idx < len(runes); idx++ { + r := runes[idx] + next := rune(0) + eof := false + if idx < len(runes)-1 { + next = runes[idx+1] + } else { + eof = true + } + if !quotes && r == chr.sep { + if err := add(); err != nil { + return nil, err + } + + continue + } + if r == chr.assign && inKey { + inKey = false + continue + } + if r == '\\' { + if next == chr.quote { + idx++ + + // We need to keep the backslashes, otherwise subsequent unquoting cannot work + if chr.needsUnquote { + value = append(value, r) + } + + r = chr.quote + } + } else if r == chr.quote { + if quotes { + quotes = false + if next == chr.sep || eof { + continue + } + return nil, fmt.Errorf("%v has an unexpected char at pos %v", tagString, idx) + } + quotes = true + continue + } + if inKey { + key = append(key, r) + } else { + value = append(value, r) + } + } + if quotes { + return nil, fmt.Errorf("%v is not quoted properly", tagString) + } + + if err := add(); err != nil { + return nil, err + } + + return d, nil +} + +func getTagInfo(ft reflect.StructField) (string, tagChars) { + s, ok := ft.Tag.Lookup("kong") + if ok { + return s, kongChars + } + + return string(ft.Tag), bareChars +} + +func newEmptyTag() *Tag { + return &Tag{items: map[string][]string{}} +} + +func tagSplitFn(r rune) bool { + return r == ',' || r == ' ' +} + +func parseTagString(s string) (*Tag, error) { + items, err := parseTagItems(s, bareChars) + if err != nil { + return nil, err + } + t := &Tag{ + items: items, + } + err = hydrateTag(t, nil) + if err != nil { + return nil, fmt.Errorf("%s: %s", s, err) + } + return t, nil +} + +func parseTag(parent reflect.Value, ft reflect.StructField) (*Tag, error) { + if ft.Tag.Get("kong") == "-" { + t := newEmptyTag() + t.Ignored = true + return t, nil + } + items, err := parseTagItems(getTagInfo(ft)) + if err != nil { + return nil, err + } + t := &Tag{ + items: items, + } + err = hydrateTag(t, ft.Type) + if err != nil { + return nil, failField(parent, ft, "%s", err) + } + return t, nil +} + +func hydrateTag(t *Tag, typ reflect.Type) error { // nolint: gocyclo + var typeName string + var isBool bool + var isBoolPtr bool + if typ != nil { + typeName = typ.Name() + isBool = typ.Kind() == reflect.Bool + isBoolPtr = typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Bool + } + var err error + t.Cmd = t.Has("cmd") + t.Arg = t.Has("arg") + required := t.Has("required") + optional := t.Has("optional") + if required && optional { + return fmt.Errorf("can't specify both required and optional") + } + t.Required = required + t.Optional = optional + t.HasDefault = t.Has("default") + t.Default = t.Get("default") + // Arguments with defaults are always optional. + if t.Arg && t.HasDefault { + t.Optional = true + } else if t.Arg && !optional { // Arguments are required unless explicitly made optional. + t.Required = true + } + t.Name = t.Get("name") + t.Help = t.Get("help") + t.Type = t.Get("type") + t.TypeName = typeName + for _, env := range t.GetAll("env") { + t.Envs = append(t.Envs, strings.FieldsFunc(env, tagSplitFn)...) + } + t.Short, err = t.GetRune("short") + if err != nil && t.Get("short") != "" { + return fmt.Errorf("invalid short flag name %q: %s", t.Get("short"), err) + } + t.Hidden = t.Has("hidden") + t.Format = t.Get("format") + t.Sep, _ = t.GetSep("sep", ',') + t.MapSep, _ = t.GetSep("mapsep", ';') + t.Group = t.Get("group") + for _, xor := range t.GetAll("xor") { + t.Xor = append(t.Xor, strings.FieldsFunc(xor, tagSplitFn)...) + } + t.Prefix = t.Get("prefix") + t.EnvPrefix = t.Get("envprefix") + t.Embed = t.Has("embed") + negatable := t.Has("negatable") + if negatable && !isBool && !isBoolPtr { + return fmt.Errorf("negatable can only be set on booleans") + } + t.Negatable = negatable + aliases := t.Get("aliases") + if len(aliases) > 0 { + t.Aliases = append(t.Aliases, strings.FieldsFunc(aliases, tagSplitFn)...) + } + t.Vars = Vars{} + for _, set := range t.GetAll("set") { + parts := strings.SplitN(set, "=", 2) + if len(parts) == 0 { + return fmt.Errorf("set should be in the form key=value but got %q", set) + } + t.Vars[parts[0]] = parts[1] + } + t.PlaceHolder = t.Get("placeholder") + t.Enum = t.Get("enum") + scalarType := typ == nil || !(typ.Kind() == reflect.Slice || typ.Kind() == reflect.Map || typ.Kind() == reflect.Ptr) + if t.Enum != "" && !(t.Required || t.HasDefault) && scalarType { + return fmt.Errorf("enum value is only valid if it is either required or has a valid default value") + } + passthrough := t.Has("passthrough") + if passthrough && !t.Arg && !t.Cmd { + return fmt.Errorf("passthrough only makes sense for positional arguments or commands") + } + t.Passthrough = passthrough + return nil +} + +// Has returns true if the tag contained the given key. +func (t *Tag) Has(k string) bool { + _, ok := t.items[k] + return ok +} + +// Get returns the value of the given tag. +// +// Note that this will return the empty string if the tag is missing. +func (t *Tag) Get(k string) string { + values := t.items[k] + if len(values) == 0 { + return "" + } + return values[0] +} + +// GetAll returns all encountered values for a tag, in the case of multiple occurrences. +func (t *Tag) GetAll(k string) []string { + return t.items[k] +} + +// GetBool returns true if the given tag looks like a boolean truth string. +func (t *Tag) GetBool(k string) (bool, error) { + return strconv.ParseBool(t.Get(k)) +} + +// GetFloat parses the given tag as a float64. +func (t *Tag) GetFloat(k string) (float64, error) { + return strconv.ParseFloat(t.Get(k), 64) +} + +// GetInt parses the given tag as an int64. +func (t *Tag) GetInt(k string) (int64, error) { + return strconv.ParseInt(t.Get(k), 10, 64) +} + +// GetRune parses the given tag as a rune. +func (t *Tag) GetRune(k string) (rune, error) { + value := t.Get(k) + r, size := utf8.DecodeRuneInString(value) + if r == utf8.RuneError || size < len(value) { + return 0, errors.New("invalid rune") + } + return r, nil +} + +// GetSep parses the given tag as a rune separator, allowing for a default or none. +// The separator is returned, or -1 if "none" is specified. If the tag value is an +// invalid utf8 sequence, the default rune is returned as well as an error. If the +// tag value is more than one rune, the first rune is returned as well as an error. +func (t *Tag) GetSep(k string, dflt rune) (rune, error) { + tv := t.Get(k) + if tv == "none" { + return -1, nil + } else if tv == "" { + return dflt, nil + } + r, size := utf8.DecodeRuneInString(tv) + if r == utf8.RuneError { + return dflt, fmt.Errorf(`%v:"%v" has a rune error`, k, tv) + } else if size != len(tv) { + return r, fmt.Errorf(`%v:"%v" is more than a single rune`, k, tv) + } + return r, nil +} diff --git a/vendor/github.com/alecthomas/kong/util.go b/vendor/github.com/alecthomas/kong/util.go new file mode 100644 index 0000000..50b1dfe --- /dev/null +++ b/vendor/github.com/alecthomas/kong/util.go @@ -0,0 +1,57 @@ +package kong + +import ( + "fmt" + "os" + "reflect" +) + +// ConfigFlag uses the configured (via kong.Configuration(loader)) configuration loader to load configuration +// from a file specified by a flag. +// +// Use this as a flag value to support loading of custom configuration via a flag. +type ConfigFlag string + +// BeforeResolve adds a resolver. +func (c ConfigFlag) BeforeResolve(kong *Kong, ctx *Context, trace *Path) error { + if kong.loader == nil { + return fmt.Errorf("kong must be configured with kong.Configuration(...)") + } + path := string(ctx.FlagValue(trace.Flag).(ConfigFlag)) // nolint + resolver, err := kong.LoadConfig(path) + if err != nil { + return err + } + ctx.AddResolver(resolver) + return nil +} + +// VersionFlag is a flag type that can be used to display a version number, stored in the "version" variable. +type VersionFlag bool + +// BeforeReset writes the version variable and terminates with a 0 exit status. +func (v VersionFlag) BeforeReset(app *Kong, vars Vars) error { + fmt.Fprintln(app.Stdout, vars["version"]) + app.Exit(0) + return nil +} + +// ChangeDirFlag changes the current working directory to a path specified by a flag +// early in the parsing process, changing how other flags resolve relative paths. +// +// Use this flag to provide a "git -C" like functionality. +// +// It is not compatible with custom named decoders, e.g., existingdir. +type ChangeDirFlag string + +// Decode is used to create a side effect of changing the current working directory. +func (c ChangeDirFlag) Decode(ctx *DecodeContext) error { + var path string + err := ctx.Scan.PopValueInto("string", &path) + if err != nil { + return err + } + path = ExpandPath(path) + ctx.Value.Target.Set(reflect.ValueOf(ChangeDirFlag(path))) + return os.Chdir(path) +} diff --git a/vendor/github.com/alecthomas/kong/visit.go b/vendor/github.com/alecthomas/kong/visit.go new file mode 100644 index 0000000..f7dab53 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/visit.go @@ -0,0 +1,58 @@ +package kong + +import ( + "fmt" +) + +// Next should be called by Visitor to proceed with the walk. +// +// The walk will terminate if "err" is non-nil. +type Next func(err error) error + +// Visitor can be used to walk all nodes in the model. +type Visitor func(node Visitable, next Next) error + +// Visit all nodes. +func Visit(node Visitable, visitor Visitor) error { + return visitor(node, func(err error) error { + if err != nil { + return err + } + switch node := node.(type) { + case *Application: + return visitNodeChildren(node.Node, visitor) + case *Node: + return visitNodeChildren(node, visitor) + case *Value: + case *Flag: + return Visit(node.Value, visitor) + default: + panic(fmt.Sprintf("unsupported node type %T", node)) + } + return nil + }) +} + +func visitNodeChildren(node *Node, visitor Visitor) error { + if node.Argument != nil { + if err := Visit(node.Argument, visitor); err != nil { + return err + } + } + for _, flag := range node.Flags { + if err := Visit(flag, visitor); err != nil { + return err + } + } + for _, pos := range node.Positional { + if err := Visit(pos, visitor); err != nil { + return err + } + } + for _, child := range node.Children { + if err := Visit(child, visitor); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 0000000..339177b --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 0000000..1602287 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 0000000..d7d14f8 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 0000000..24b5306 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 0000000..8bf0e5b --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,72 @@ +# xxhash + +[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) +[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 0000000..94b9c44 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 0000000..a9e0d45 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,228 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 0000000..3e8b132 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,209 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 0000000..7e3145a --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go new file mode 100644 index 0000000..9216e0a --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -0,0 +1,15 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 0000000..26df13b --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 0000000..e86f1b5 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,16 @@ +//go:build appengine +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 0000000..1c1638f --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,58 @@ +//go:build !appengine +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "unsafe" +) + +// In the future it's possible that compiler optimizations will make these +// XxxString functions unnecessary by realizing that calls such as +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. +// If that happens, even if we keep these functions they can be replaced with +// the trivial safe code. + +// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: +// +// var b []byte +// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) +// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data +// bh.Len = len(s) +// bh.Cap = len(s) +// +// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough +// weight to this sequence of expressions that any function that uses it will +// not be inlined. Instead, the functions below use a different unsafe +// conversion designed to minimize the inliner weight and allow both to be +// inlined. There is also a test (TestInlining) which verifies that these are +// inlined. +// +// See https://github.com/golang/go/issues/42739 for discussion. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) + // d.Write always returns len(s), nil. + // Ignoring the return output and returning these fixed values buys a + // savings of 6 in the inliner's cost model. + return len(s), nil +} + +// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout +// of the first two words is the same as the layout of a string. +type sliceHeader struct { + s string + cap int +} diff --git a/vendor/github.com/coreos/go-systemd/v22/LICENSE b/vendor/github.com/coreos/go-systemd/v22/LICENSE new file mode 100644 index 0000000..37ec93a --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/v22/NOTICE b/vendor/github.com/coreos/go-systemd/v22/NOTICE new file mode 100644 index 0000000..23a0ada --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go new file mode 100644 index 0000000..bf7671d --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go @@ -0,0 +1,70 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "strings" + "syscall" +) + +const ( + // listenFdsStart corresponds to `SD_LISTEN_FDS_START`. + listenFdsStart = 3 +) + +// Files returns a slice containing a `os.File` object for each +// file descriptor passed to this process via systemd fd-passing protocol. +// +// The order of the file descriptors is preserved in the returned slice. +// `unsetEnv` is typically set to `true` in order to avoid clashes in +// fd usage and to avoid leaking environment flags to child processes. +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + defer os.Unsetenv("LISTEN_PID") + defer os.Unsetenv("LISTEN_FDS") + defer os.Unsetenv("LISTEN_FDNAMES") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + names := strings.Split(os.Getenv("LISTEN_FDNAMES"), ":") + + files := make([]*os.File, 0, nfds) + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + name := "LISTEN_FD_" + strconv.Itoa(fd) + offset := fd - listenFdsStart + if offset < len(names) && len(names[offset]) > 0 { + name = names[offset] + } + files = append(files, os.NewFile(uintptr(fd), name)) + } + + return files +} diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go b/vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go new file mode 100644 index 0000000..d391bf0 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go @@ -0,0 +1,21 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import "os" + +func Files(unsetEnv bool) []*os.File { + return nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/listeners.go b/vendor/github.com/coreos/go-systemd/v22/activation/listeners.go new file mode 100644 index 0000000..3dbe2b0 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/listeners.go @@ -0,0 +1,103 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "crypto/tls" + "net" +) + +// Listeners returns a slice containing a net.Listener for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} +func Listeners() ([]net.Listener, error) { + files := Files(true) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + if pc, err := net.FileListener(f); err == nil { + listeners[i] = pc + f.Close() + } + } + return listeners, nil +} + +// ListenersWithNames maps a listener name to a set of net.Listener instances. +func ListenersWithNames() (map[string][]net.Listener, error) { + files := Files(true) + listeners := map[string][]net.Listener{} + + for _, f := range files { + if pc, err := net.FileListener(f); err == nil { + current, ok := listeners[f.Name()] + if !ok { + listeners[f.Name()] = []net.Listener{pc} + } else { + listeners[f.Name()] = append(current, pc) + } + f.Close() + } + } + return listeners, nil +} + +// TLSListeners returns a slice containing a net.listener for each matching TCP socket type +// passed to this process. +// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. +func TLSListeners(tlsConfig *tls.Config) ([]net.Listener, error) { + listeners, err := Listeners() + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil { + for i, l := range listeners { + // Activate TLS only for TCP sockets + if l.Addr().Network() == "tcp" { + listeners[i] = tls.NewListener(l, tlsConfig) + } + } + } + + return listeners, err +} + +// TLSListenersWithNames maps a listener name to a net.Listener with +// the associated TLS configuration. +func TLSListenersWithNames(tlsConfig *tls.Config) (map[string][]net.Listener, error) { + listeners, err := ListenersWithNames() + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil { + for _, ll := range listeners { + // Activate TLS only for TCP sockets + for i, l := range ll { + if l.Addr().Network() == "tcp" { + ll[i] = tls.NewListener(l, tlsConfig) + } + } + } + } + + return listeners, err +} diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go b/vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go new file mode 100644 index 0000000..a972067 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go @@ -0,0 +1,38 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "net" +) + +// PacketConns returns a slice containing a net.PacketConn for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} +func PacketConns() ([]net.PacketConn, error) { + files := Files(true) + conns := make([]net.PacketConn, len(files)) + + for i, f := range files { + if pc, err := net.FilePacketConn(f); err == nil { + conns[i] = pc + f.Close() + } + } + return conns, nil +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000..bc52e96 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..7929947 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..205c28d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..1be8ce9 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..f78d89f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..b04edb7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000..32c0e33 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/go-kit/log/.gitignore b/vendor/github.com/go-kit/log/.gitignore new file mode 100644 index 0000000..66fd13c --- /dev/null +++ b/vendor/github.com/go-kit/log/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/go-kit/log/LICENSE b/vendor/github.com/go-kit/log/LICENSE new file mode 100644 index 0000000..bb5bdb9 --- /dev/null +++ b/vendor/github.com/go-kit/log/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Go kit + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-kit/log/README.md b/vendor/github.com/go-kit/log/README.md new file mode 100644 index 0000000..8067794 --- /dev/null +++ b/vendor/github.com/go-kit/log/README.md @@ -0,0 +1,156 @@ +# package log + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-kit/log.svg)](https://pkg.go.dev/github.com/go-kit/log) +[![Go Report Card](https://goreportcard.com/badge/go-kit/log)](https://goreportcard.com/report/go-kit/log) +[![GitHub Actions](https://github.com/go-kit/log/actions/workflows/test.yml/badge.svg)](https://github.com/go-kit/log/actions/workflows/test.yml) +[![Coverage Status](https://coveralls.io/repos/github/go-kit/log/badge.svg?branch=main)](https://coveralls.io/github/go-kit/log?branch=main) + +`package log` provides a minimal interface for structured logging in services. +It may be wrapped to encode conventions, enforce type-safety, provide leveled +logging, and so on. It can be used for both typical application log events, +and log-structured data streams. + +## Structured logging + +Structured logging is, basically, conceding to the reality that logs are +_data_, and warrant some level of schematic rigor. Using a stricter, +key/value-oriented message format for our logs, containing contextual and +semantic information, makes it much easier to get insight into the +operational activity of the systems we build. Consequently, `package log` is +of the strong belief that "[the benefits of structured logging outweigh the +minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". + +Migrating from unstructured to structured logging is probably a lot easier +than you'd expect. + +```go +// Unstructured +log.Printf("HTTP server listening on %s", addr) + +// Structured +logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") +``` + +## Usage + +### Typical application logging + +```go +w := log.NewSyncWriter(os.Stderr) +logger := log.NewLogfmtLogger(w) +logger.Log("question", "what is the meaning of life?", "answer", 42) + +// Output: +// question="what is the meaning of life?" answer=42 +``` + +### Contextual Loggers + +```go +func main() { + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = log.With(logger, "instance_id", 123) + + logger.Log("msg", "starting") + NewWorker(log.With(logger, "component", "worker")).Run() + NewSlacker(log.With(logger, "component", "slacker")).Run() +} + +// Output: +// instance_id=123 msg=starting +// instance_id=123 component=worker msg=running +// instance_id=123 component=slacker msg=running +``` + +### Interact with stdlib logger + +Redirect stdlib logger to Go kit logger. + +```go +import ( + "os" + stdlog "log" + kitlog "github.com/go-kit/log" +) + +func main() { + logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) + stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) + stdlog.Print("I sure like pie") +} + +// Output: +// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} +``` + +Or, if, for legacy reasons, you need to pipe all of your logging through the +stdlib log package, you can redirect Go kit logger to the stdlib logger. + +```go +logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) +logger.Log("legacy", true, "msg", "at least it's something") + +// Output: +// 2016/01/01 12:34:56 legacy=true msg="at least it's something" +``` + +### Timestamps and callers + +```go +var logger log.Logger +logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) +logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + +logger.Log("msg", "hello") + +// Output: +// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello +``` + +## Levels + +Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/log/level). + +## Supported output formats + +- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) +- JSON + +## Enhancements + +`package log` is centered on the one-method Logger interface. + +```go +type Logger interface { + Log(keyvals ...interface{}) error +} +``` + +This interface, and its supporting code like is the product of much iteration +and evaluation. For more details on the evolution of the Logger interface, +see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), +a talk by [Chris Hines](https://github.com/ChrisHines). +Also, please see +[#63](https://github.com/go-kit/kit/issues/63), +[#76](https://github.com/go-kit/kit/pull/76), +[#131](https://github.com/go-kit/kit/issues/131), +[#157](https://github.com/go-kit/kit/pull/157), +[#164](https://github.com/go-kit/kit/issues/164), and +[#252](https://github.com/go-kit/kit/pull/252) +to review historical conversations about package log and the Logger interface. + +Value-add packages and suggestions, +like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/log/level), +are of course welcome. Good proposals should + +- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/log#With), +- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/log#Caller) in any wrapped contextual loggers, and +- Be friendly to packages that accept only an unadorned log.Logger. + +## Benchmarks & comparisons + +There are a few Go logging benchmarks and comparisons that include Go kit's package log. + +- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log +- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/log/doc.go b/vendor/github.com/go-kit/log/doc.go new file mode 100644 index 0000000..f744382 --- /dev/null +++ b/vendor/github.com/go-kit/log/doc.go @@ -0,0 +1,116 @@ +// Package log provides a structured logger. +// +// Structured logging produces logs easily consumed later by humans or +// machines. Humans might be interested in debugging errors, or tracing +// specific requests. Machines might be interested in counting interesting +// events, or aggregating information for off-line processing. In both cases, +// it is important that the log messages are structured and actionable. +// Package log is designed to encourage both of these best practices. +// +// Basic Usage +// +// The fundamental interface is Logger. Loggers create log events from +// key/value data. The Logger interface has a single method, Log, which +// accepts a sequence of alternating key/value pairs, which this package names +// keyvals. +// +// type Logger interface { +// Log(keyvals ...interface{}) error +// } +// +// Here is an example of a function using a Logger to create log events. +// +// func RunTask(task Task, logger log.Logger) string { +// logger.Log("taskID", task.ID, "event", "starting task") +// ... +// logger.Log("taskID", task.ID, "event", "task complete") +// } +// +// The keys in the above example are "taskID" and "event". The values are +// task.ID, "starting task", and "task complete". Every key is followed +// immediately by its value. +// +// Keys are usually plain strings. Values may be any type that has a sensible +// encoding in the chosen log format. With structured logging it is a good +// idea to log simple values without formatting them. This practice allows +// the chosen logger to encode values in the most appropriate way. +// +// Contextual Loggers +// +// A contextual logger stores keyvals that it includes in all log events. +// Building appropriate contextual loggers reduces repetition and aids +// consistency in the resulting log output. With, WithPrefix, and WithSuffix +// add context to a logger. We can use With to improve the RunTask example. +// +// func RunTask(task Task, logger log.Logger) string { +// logger = log.With(logger, "taskID", task.ID) +// logger.Log("event", "starting task") +// ... +// taskHelper(task.Cmd, logger) +// ... +// logger.Log("event", "task complete") +// } +// +// The improved version emits the same log events as the original for the +// first and last calls to Log. Passing the contextual logger to taskHelper +// enables each log event created by taskHelper to include the task.ID even +// though taskHelper does not have access to that value. Using contextual +// loggers this way simplifies producing log output that enables tracing the +// life cycle of individual tasks. (See the Contextual example for the full +// code of the above snippet.) +// +// Dynamic Contextual Values +// +// A Valuer function stored in a contextual logger generates a new value each +// time an event is logged. The Valuer example demonstrates how this feature +// works. +// +// Valuers provide the basis for consistently logging timestamps and source +// code location. The log package defines several valuers for that purpose. +// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and +// DefaultCaller. A common logger initialization sequence that ensures all log +// entries contain a timestamp and source location looks like this: +// +// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) +// +// Concurrent Safety +// +// Applications with multiple goroutines want each log event written to the +// same logger to remain separate from other log events. Package log provides +// two simple solutions for concurrent safe logging. +// +// NewSyncWriter wraps an io.Writer and serializes each call to its Write +// method. Using a SyncWriter has the benefit that the smallest practical +// portion of the logging logic is performed within a mutex, but it requires +// the formatting Logger to make only one call to Write per log event. +// +// NewSyncLogger wraps any Logger and serializes each call to its Log method. +// Using a SyncLogger has the benefit that it guarantees each log event is +// handled atomically within the wrapped logger, but it typically serializes +// both the formatting and output logic. Use a SyncLogger if the formatting +// logger may perform multiple writes per log event. +// +// Error Handling +// +// This package relies on the practice of wrapping or decorating loggers with +// other loggers to provide composable pieces of functionality. It also means +// that Logger.Log must return an error because some +// implementations—especially those that output log data to an io.Writer—may +// encounter errors that cannot be handled locally. This in turn means that +// Loggers that wrap other loggers should return errors from the wrapped +// logger up the stack. +// +// Fortunately, the decorator pattern also provides a way to avoid the +// necessity to check for errors every time an application calls Logger.Log. +// An application required to panic whenever its Logger encounters +// an error could initialize its logger as follows. +// +// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger := log.LoggerFunc(func(keyvals ...interface{}) error { +// if err := fmtlogger.Log(keyvals...); err != nil { +// panic(err) +// } +// return nil +// }) +package log diff --git a/vendor/github.com/go-kit/log/json_logger.go b/vendor/github.com/go-kit/log/json_logger.go new file mode 100644 index 0000000..d0faed4 --- /dev/null +++ b/vendor/github.com/go-kit/log/json_logger.go @@ -0,0 +1,91 @@ +package log + +import ( + "encoding" + "encoding/json" + "fmt" + "io" + "reflect" +) + +type jsonLogger struct { + io.Writer +} + +// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewJSONLogger(w io.Writer) Logger { + return &jsonLogger{w} +} + +func (l *jsonLogger) Log(keyvals ...interface{}) error { + n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd + m := make(map[string]interface{}, n) + for i := 0; i < len(keyvals); i += 2 { + k := keyvals[i] + var v interface{} = ErrMissingValue + if i+1 < len(keyvals) { + v = keyvals[i+1] + } + merge(m, k, v) + } + enc := json.NewEncoder(l.Writer) + enc.SetEscapeHTML(false) + return enc.Encode(m) +} + +func merge(dst map[string]interface{}, k, v interface{}) { + var key string + switch x := k.(type) { + case string: + key = x + case fmt.Stringer: + key = safeString(x) + default: + key = fmt.Sprint(x) + } + + // We want json.Marshaler and encoding.TextMarshaller to take priority over + // err.Error() and v.String(). But json.Marshall (called later) does that by + // default so we force a no-op if it's one of those 2 case. + switch x := v.(type) { + case json.Marshaler: + case encoding.TextMarshaler: + case error: + v = safeError(x) + case fmt.Stringer: + v = safeString(x) + } + + dst[key] = v +} + +func safeString(str fmt.Stringer) (s string) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { + s = "NULL" + } else { + s = fmt.Sprintf("PANIC in String method: %v", panicVal) + } + } + }() + s = str.String() + return +} + +func safeError(err error) (s interface{}) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + s = nil + } else { + s = fmt.Sprintf("PANIC in Error method: %v", panicVal) + } + } + }() + s = err.Error() + return +} diff --git a/vendor/github.com/go-kit/log/level/doc.go b/vendor/github.com/go-kit/log/level/doc.go new file mode 100644 index 0000000..fd681dc --- /dev/null +++ b/vendor/github.com/go-kit/log/level/doc.go @@ -0,0 +1,33 @@ +// Package level implements leveled logging on top of Go kit's log package. To +// use the level package, create a logger as per normal in your func main, and +// wrap it with level.NewFilter. +// +// var logger log.Logger +// logger = log.NewLogfmtLogger(os.Stderr) +// logger = level.NewFilter(logger, level.AllowInfo()) // <-- +// logger = log.With(logger, "ts", log.DefaultTimestampUTC) +// +// It's also possible to configure log level from a string. For instance from +// a flag, environment variable or configuration file. +// +// fs := flag.NewFlagSet("myprogram") +// lvl := fs.String("log", "info", "debug, info, warn, error") +// +// var logger log.Logger +// logger = log.NewLogfmtLogger(os.Stderr) +// logger = level.NewFilter(logger, level.Allow(level.ParseDefault(*lvl, level.InfoValue()))) // <-- +// logger = log.With(logger, "ts", log.DefaultTimestampUTC) +// +// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error +// helper methods to emit leveled log events. +// +// logger.Log("foo", "bar") // as normal, no level +// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) +// if value > 100 { +// level.Error(logger).Log("value", value) +// } +// +// NewFilter allows precise control over what happens when a log event is +// emitted without a level key, or if a squelched level is used. Check the +// Option functions for details. +package level diff --git a/vendor/github.com/go-kit/log/level/level.go b/vendor/github.com/go-kit/log/level/level.go new file mode 100644 index 0000000..c641d98 --- /dev/null +++ b/vendor/github.com/go-kit/log/level/level.go @@ -0,0 +1,256 @@ +package level + +import ( + "errors" + "strings" + + "github.com/go-kit/log" +) + +// ErrInvalidLevelString is returned whenever an invalid string is passed to Parse. +var ErrInvalidLevelString = errors.New("invalid level string") + +// Error returns a logger that includes a Key/ErrorValue pair. +func Error(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), ErrorValue()) +} + +// Warn returns a logger that includes a Key/WarnValue pair. +func Warn(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), WarnValue()) +} + +// Info returns a logger that includes a Key/InfoValue pair. +func Info(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), InfoValue()) +} + +// Debug returns a logger that includes a Key/DebugValue pair. +func Debug(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), DebugValue()) +} + +// NewFilter wraps next and implements level filtering. See the commentary on +// the Option functions for a detailed description of how to configure levels. +// If no options are provided, all leveled log events created with Debug, +// Info, Warn or Error helper methods are squelched and non-leveled log +// events are passed to next unmodified. +func NewFilter(next log.Logger, options ...Option) log.Logger { + l := &logger{ + next: next, + } + for _, option := range options { + option(l) + } + return l +} + +type logger struct { + next log.Logger + allowed level + squelchNoLevel bool + errNotAllowed error + errNoLevel error +} + +func (l *logger) Log(keyvals ...interface{}) error { + var hasLevel, levelAllowed bool + for i := 1; i < len(keyvals); i += 2 { + if v, ok := keyvals[i].(*levelValue); ok { + hasLevel = true + levelAllowed = l.allowed&v.level != 0 + break + } + } + if !hasLevel && l.squelchNoLevel { + return l.errNoLevel + } + if hasLevel && !levelAllowed { + return l.errNotAllowed + } + return l.next.Log(keyvals...) +} + +// Option sets a parameter for the leveled logger. +type Option func(*logger) + +// Allow the provided log level to pass. +func Allow(v Value) Option { + switch v { + case debugValue: + return AllowDebug() + case infoValue: + return AllowInfo() + case warnValue: + return AllowWarn() + case errorValue: + return AllowError() + default: + return AllowNone() + } +} + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, warn, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelWarn | levelInfo | levelDebug) +} + +// AllowInfo allows error, warn and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelWarn | levelInfo) +} + +// AllowWarn allows error and warn level log events to pass. +func AllowWarn() Option { + return allowed(levelError | levelWarn) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *logger) { l.allowed = allowed } +} + +// Parse a string to its corresponding level value. Valid strings are "debug", +// "info", "warn", and "error". Strings are normalized via strings.TrimSpace and +// strings.ToLower. +func Parse(level string) (Value, error) { + switch strings.TrimSpace(strings.ToLower(level)) { + case debugValue.name: + return debugValue, nil + case infoValue.name: + return infoValue, nil + case warnValue.name: + return warnValue, nil + case errorValue.name: + return errorValue, nil + default: + return nil, ErrInvalidLevelString + } +} + +// ParseDefault calls Parse and returns the default Value on error. +func ParseDefault(level string, def Value) Value { + v, err := Parse(level) + if err != nil { + return def + } + return v +} + +// ErrNotAllowed sets the error to return from Log when it squelches a log +// event disallowed by the configured Allow[Level] option. By default, +// ErrNotAllowed is nil; in this case the log event is squelched with no +// error. +func ErrNotAllowed(err error) Option { + return func(l *logger) { l.errNotAllowed = err } +} + +// SquelchNoLevel instructs Log to squelch log events with no level, so that +// they don't proceed through to the wrapped logger. If SquelchNoLevel is set +// to true and a log event is squelched in this way, the error value +// configured with ErrNoLevel is returned to the caller. +func SquelchNoLevel(squelch bool) Option { + return func(l *logger) { l.squelchNoLevel = squelch } +} + +// ErrNoLevel sets the error to return from Log when it squelches a log event +// with no level. By default, ErrNoLevel is nil; in this case the log event is +// squelched with no error. +func ErrNoLevel(err error) Option { + return func(l *logger) { l.errNoLevel = err } +} + +// NewInjector wraps next and returns a logger that adds a Key/level pair to +// the beginning of log events that don't already contain a level. In effect, +// this gives a default level to logs without a level. +func NewInjector(next log.Logger, level Value) log.Logger { + return &injector{ + next: next, + level: level, + } +} + +type injector struct { + next log.Logger + level interface{} +} + +func (l *injector) Log(keyvals ...interface{}) error { + for i := 1; i < len(keyvals); i += 2 { + if _, ok := keyvals[i].(*levelValue); ok { + return l.next.Log(keyvals...) + } + } + kvs := make([]interface{}, len(keyvals)+2) + kvs[0], kvs[1] = key, l.level + copy(kvs[2:], keyvals) + return l.next.Log(kvs...) +} + +// Value is the interface that each of the canonical level values implement. +// It contains unexported methods that prevent types from other packages from +// implementing it and guaranteeing that NewFilter can distinguish the levels +// defined in this package from all other values. +type Value interface { + String() string + levelVal() +} + +// Key returns the unique key added to log events by the loggers in this +// package. +func Key() interface{} { return key } + +// ErrorValue returns the unique value added to log events by Error. +func ErrorValue() Value { return errorValue } + +// WarnValue returns the unique value added to log events by Warn. +func WarnValue() Value { return warnValue } + +// InfoValue returns the unique value added to log events by Info. +func InfoValue() Value { return infoValue } + +// DebugValue returns the unique value added to log events by Debug. +func DebugValue() Value { return debugValue } + +var ( + // key is of type interface{} so that it allocates once during package + // initialization and avoids allocating every time the value is added to a + // []interface{} later. + key interface{} = "level" + + errorValue = &levelValue{level: levelError, name: "error"} + warnValue = &levelValue{level: levelWarn, name: "warn"} + infoValue = &levelValue{level: levelInfo, name: "info"} + debugValue = &levelValue{level: levelDebug, name: "debug"} +) + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelWarn + levelError +) + +type levelValue struct { + name string + level +} + +func (v *levelValue) String() string { return v.name } +func (v *levelValue) levelVal() {} diff --git a/vendor/github.com/go-kit/log/log.go b/vendor/github.com/go-kit/log/log.go new file mode 100644 index 0000000..62e11ad --- /dev/null +++ b/vendor/github.com/go-kit/log/log.go @@ -0,0 +1,179 @@ +package log + +import "errors" + +// Logger is the fundamental interface for all log operations. Log creates a +// log event from keyvals, a variadic sequence of alternating keys and values. +// Implementations must be safe for concurrent use by multiple goroutines. In +// particular, any implementation of Logger that appends to keyvals or +// modifies or retains any of its elements must make a copy first. +type Logger interface { + Log(keyvals ...interface{}) error +} + +// ErrMissingValue is appended to keyvals slices with odd length to substitute +// the missing value. +var ErrMissingValue = errors.New("(MISSING)") + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Log. If logger is also a contextual logger created by With, +// WithPrefix, or WithSuffix, keyvals is appended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func With(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + return &context{ + logger: l.logger, + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + keyvals: kvs[:len(kvs):len(kvs)], + hasValuer: l.hasValuer || containsValuer(keyvals), + sKeyvals: l.sKeyvals, + sHasValuer: l.sHasValuer, + } +} + +// WithPrefix returns a new contextual logger with keyvals prepended to those +// passed to calls to Log. If logger is also a contextual logger created by +// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func WithPrefix(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + n := len(l.keyvals) + len(keyvals) + if len(keyvals)%2 != 0 { + n++ + } + kvs := make([]interface{}, 0, n) + kvs = append(kvs, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + kvs = append(kvs, l.keyvals...) + return &context{ + logger: l.logger, + keyvals: kvs, + hasValuer: l.hasValuer || containsValuer(keyvals), + sKeyvals: l.sKeyvals, + sHasValuer: l.sHasValuer, + } +} + +// WithSuffix returns a new contextual logger with keyvals appended to those +// passed to calls to Log. If logger is also a contextual logger created by +// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func WithSuffix(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + n := len(l.sKeyvals) + len(keyvals) + if len(keyvals)%2 != 0 { + n++ + } + kvs := make([]interface{}, 0, n) + kvs = append(kvs, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + kvs = append(l.sKeyvals, kvs...) + return &context{ + logger: l.logger, + keyvals: l.keyvals, + hasValuer: l.hasValuer, + sKeyvals: kvs, + sHasValuer: l.sHasValuer || containsValuer(keyvals), + } +} + +// context is the Logger implementation returned by With, WithPrefix, and +// WithSuffix. It wraps a Logger and holds keyvals that it includes in all +// log events. Its Log method calls bindValues to generate values for each +// Valuer in the context keyvals. +// +// A context must always have the same number of stack frames between calls to +// its Log method and the eventual binding of Valuers to their value. This +// requirement comes from the functional requirement to allow a context to +// resolve application call site information for a Caller stored in the +// context. To do this we must be able to predict the number of logging +// functions on the stack when bindValues is called. +// +// Two implementation details provide the needed stack depth consistency. +// +// 1. newContext avoids introducing an additional layer when asked to +// wrap another context. +// 2. With, WithPrefix, and WithSuffix avoid introducing an additional +// layer by returning a newly constructed context with a merged keyvals +// rather than simply wrapping the existing context. +type context struct { + logger Logger + keyvals []interface{} + sKeyvals []interface{} // suffixes + hasValuer bool + sHasValuer bool +} + +func newContext(logger Logger) *context { + if c, ok := logger.(*context); ok { + return c + } + return &context{logger: logger} +} + +// Log replaces all value elements (odd indexes) containing a Valuer in the +// stored context with their generated value, appends keyvals, and passes the +// result to the wrapped Logger. +func (l *context) Log(keyvals ...interface{}) error { + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + if l.hasValuer { + // If no keyvals were appended above then we must copy l.keyvals so + // that future log events will reevaluate the stored Valuers. + if len(keyvals) == 0 { + kvs = append([]interface{}{}, l.keyvals...) + } + bindValues(kvs[:(len(l.keyvals))]) + } + kvs = append(kvs, l.sKeyvals...) + if l.sHasValuer { + bindValues(kvs[len(kvs)-len(l.sKeyvals):]) + } + return l.logger.Log(kvs...) +} + +// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If +// f is a function with the appropriate signature, LoggerFunc(f) is a Logger +// object that calls f. +type LoggerFunc func(...interface{}) error + +// Log implements Logger by calling f(keyvals...). +func (f LoggerFunc) Log(keyvals ...interface{}) error { + return f(keyvals...) +} diff --git a/vendor/github.com/go-kit/log/logfmt_logger.go b/vendor/github.com/go-kit/log/logfmt_logger.go new file mode 100644 index 0000000..a003052 --- /dev/null +++ b/vendor/github.com/go-kit/log/logfmt_logger.go @@ -0,0 +1,62 @@ +package log + +import ( + "bytes" + "io" + "sync" + + "github.com/go-logfmt/logfmt" +) + +type logfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *logfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var logfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc logfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type logfmtLogger struct { + w io.Writer +} + +// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in +// logfmt format. Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewLogfmtLogger(w io.Writer) Logger { + return &logfmtLogger{w} +} + +func (l logfmtLogger) Log(keyvals ...interface{}) error { + enc := logfmtEncoderPool.Get().(*logfmtEncoder) + enc.Reset() + defer logfmtEncoderPool.Put(enc) + + if err := enc.EncodeKeyvals(keyvals...); err != nil { + return err + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-kit/log/nop_logger.go b/vendor/github.com/go-kit/log/nop_logger.go new file mode 100644 index 0000000..1047d62 --- /dev/null +++ b/vendor/github.com/go-kit/log/nop_logger.go @@ -0,0 +1,8 @@ +package log + +type nopLogger struct{} + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return nopLogger{} } + +func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/log/staticcheck.conf b/vendor/github.com/go-kit/log/staticcheck.conf new file mode 100644 index 0000000..528438b --- /dev/null +++ b/vendor/github.com/go-kit/log/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all"] diff --git a/vendor/github.com/go-kit/log/stdlib.go b/vendor/github.com/go-kit/log/stdlib.go new file mode 100644 index 0000000..0338edb --- /dev/null +++ b/vendor/github.com/go-kit/log/stdlib.go @@ -0,0 +1,151 @@ +package log + +import ( + "bytes" + "io" + "log" + "regexp" + "strings" +) + +// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's +// designed to be passed to a Go kit logger as the writer, for cases where +// it's necessary to redirect all Go kit log output to the stdlib logger. +// +// If you have any choice in the matter, you shouldn't use this. Prefer to +// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. +type StdlibWriter struct{} + +// Write implements io.Writer. +func (w StdlibWriter) Write(p []byte) (int, error) { + log.Print(strings.TrimSpace(string(p))) + return len(p), nil +} + +// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib +// logger's SetOutput. It will extract date/timestamps, filenames, and +// messages, and place them under relevant keys. +type StdlibAdapter struct { + Logger + timestampKey string + fileKey string + messageKey string + prefix string + joinPrefixToMsg bool +} + +// StdlibAdapterOption sets a parameter for the StdlibAdapter. +type StdlibAdapterOption func(*StdlibAdapter) + +// TimestampKey sets the key for the timestamp field. By default, it's "ts". +func TimestampKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.timestampKey = key } +} + +// FileKey sets the key for the file and line field. By default, it's "caller". +func FileKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.fileKey = key } +} + +// MessageKey sets the key for the actual log message. By default, it's "msg". +func MessageKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.messageKey = key } +} + +// Prefix configures the adapter to parse a prefix from stdlib log events. If +// you provide a non-empty prefix to the stdlib logger, then your should provide +// that same prefix to the adapter via this option. +// +// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to +// true if you want to include the parsed prefix in the msg. +func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.prefix = prefix; a.joinPrefixToMsg = joinPrefixToMsg } +} + +// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed +// logger. It's designed to be passed to log.SetOutput. +func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { + a := StdlibAdapter{ + Logger: logger, + timestampKey: "ts", + fileKey: "caller", + messageKey: "msg", + } + for _, option := range options { + option(&a) + } + return a +} + +func (a StdlibAdapter) Write(p []byte) (int, error) { + p = a.handlePrefix(p) + + result := subexps(p) + keyvals := []interface{}{} + var timestamp string + if date, ok := result["date"]; ok && date != "" { + timestamp = date + } + if time, ok := result["time"]; ok && time != "" { + if timestamp != "" { + timestamp += " " + } + timestamp += time + } + if timestamp != "" { + keyvals = append(keyvals, a.timestampKey, timestamp) + } + if file, ok := result["file"]; ok && file != "" { + keyvals = append(keyvals, a.fileKey, file) + } + if msg, ok := result["msg"]; ok { + msg = a.handleMessagePrefix(msg) + keyvals = append(keyvals, a.messageKey, msg) + } + if err := a.Logger.Log(keyvals...); err != nil { + return 0, err + } + return len(p), nil +} + +func (a StdlibAdapter) handlePrefix(p []byte) []byte { + if a.prefix != "" { + p = bytes.TrimPrefix(p, []byte(a.prefix)) + } + return p +} + +func (a StdlibAdapter) handleMessagePrefix(msg string) string { + if a.prefix == "" { + return msg + } + + msg = strings.TrimPrefix(msg, a.prefix) + if a.joinPrefixToMsg { + msg = a.prefix + msg + } + return msg +} + +const ( + logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` + logRegexpTime = `(?P