diff --git a/.golangci.yml b/.golangci.yml index abf0e5593..04f0f1954 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -71,7 +71,7 @@ linters-settings: # 2. you use go >= 1.10 # 3. you do repeated runs (false for CI) or cache $GOPATH/pkg or `go env GOCACHE` dir in CI. use-installed-packages: false - golint: + revive: # minimal confidence for issues, default is 0.8 min-confidence: 0.8 gofmt: @@ -139,7 +139,7 @@ linters-settings: linters: enable: - - golint + - revive - goimports - varcheck - unparam diff --git a/.travis.yml b/.travis.yml index 98d6eda1d..4f6bbc456 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,8 @@ language: go go: # - "1.11.x" At minimum the code should run make check on the latest two go versions in the default linux environment provided by Travis. - - "1.14.x" - + - "1.16.x" + dist: xenial matrix: @@ -15,8 +15,21 @@ matrix: install: - go get -u github.com/FiloSottile/vendorcheck - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $GOPATH/bin v1.31.0 + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $GOPATH/bin v1.40.1 - go mod vendor script: - make check + +deploy: + - provider: script + script: bash ./docker/scripts/docker-push.sh -t "$TRAVIS_BRANCH" -p + on: + branch: master + condition: $TRAVIS_PULL_REQUEST = false + - provider: script + script: bash ./docker/scripts/docker-push.sh -t "$TRAVIS_BRANCH" -p + on: + branch: develop + condition: $TRAVIS_PULL_REQUEST = false + diff --git a/Makefile b/Makefile index 0f5f7f6fd..21d6c3f4d 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,5 @@ +SHELL := /bin/bash + .DEFAULT_GOAL := help .PHONY : check lint install-linters dep test build @@ -29,8 +31,8 @@ BUILDINFO_COMMIT := -X $(BUILDINFO_PATH).commit=$(COMMIT) BUILDINFO?=$(BUILDINFO_VERSION) $(BUILDINFO_DATE) $(BUILDINFO_COMMIT) -BUILD_OPTS?="-ldflags=$(BUILDINFO)" -BUILD_OPTS_DEPLOY?="-ldflags=$(BUILDINFO) -w -s" +BUILD_OPTS?=-mod=vendor "-ldflags=$(BUILDINFO)" +BUILD_OPTS_DEPLOY?=-mod=vendor "-ldflags=$(BUILDINFO) -w -s" check: lint test ## Run linters and tests @@ -47,13 +49,12 @@ test: ## Run tests ${OPTS} go test ${TEST_OPTS} ./... install-linters: ## Install linters - - VERSION=1.23.1 ./ci_scripts/install-golangci-lint.sh # GO111MODULE=off go get -u github.com/FiloSottile/vendorcheck # For some reason this install method is not recommended, see https://github.com/golangci/golangci-lint#install # However, they suggest `curl ... | bash` which we should not do - # ${OPTS} go get -u github.com/golangci/golangci-lint/cmd/golangci-lint + ${OPTS} go get -u github.com/golangci/golangci-lint/cmd/golangci-lint ${OPTS} go get -u golang.org/x/tools/cmd/goimports - ${OPTS} go get -u github.com/incu6us/goimports-reviser + ${OPTS} go get -u github.com/incu6us/goimports-reviser/v2 format: ## Formats the code. Must have goimports and goimports-reviser installed (use make install-linters). ${OPTS} goimports -w -local ${DMSG_REPO} . @@ -62,6 +63,9 @@ dep: ## Sorts dependencies ${OPTS} go mod download ${OPTS} go mod tidy -v +install: ## Install `dmsg-discovery`, `dmsg-server`, `dmsgget`,`dmsgpty-cli`, `dmsgpty-host`, `dmsgpty-ui` + ${OPTS} go install ${BUILD_OPTS} ./cmd/* + build: ## Build binaries into ./bin mkdir -p ${BIN}; go build ${BUILD_OPTS} -o ${BIN} ./cmd/* @@ -69,6 +73,9 @@ build-deploy: ## Build for deployment Docker images go build -tags netgo ${BUILD_OPTS_DEPLOY} -o /release/dmsg-discovery ./cmd/dmsg-discovery go build -tags netgo ${BUILD_OPTS_DEPLOY} -o /release/dmsg-server ./cmd/dmsg-server +build-docker: + ./docker/scripts/docker-push.sh -t "develop" -b + start-db: ## Init local database env. source ./integration/env.sh && init_redis diff --git a/README.md b/README.md index b55cd438f..3c892831f 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ The connection between a `dmsg.Client` and `dmsg.Server` is called a `dmsg.Sessi ## Dmsg tools and libraries - [`dmsgget`](./docs/dmsgget.md) - Simplified `wget` over `dmsg`. - +- [`dmsgpty`](./docs/dmsgpty.md) - Simplified `SSH` over `dmsg`. ## Additional resources - [`dmsg` examples.](./examples) - [`dmsg.Discovery` documentation.](./cmd/dmsg-discovery/README.md) diff --git a/ci_scripts/install-golangci-lint.sh b/ci_scripts/install-golangci-lint.sh deleted file mode 100755 index cfe033782..000000000 --- a/ci_scripts/install-golangci-lint.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -set -e -o pipefail - -if [[ -z "$VERSION" ]]; then - echo "VERSION must be set" - exit 1 -fi - -if [[ -z "$GOBIN" ]]; then - export GOBIN="$HOME/go/bin" -fi - -# In alpine linux (as it does not come with curl by default) -wget -O - -q https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $GOBIN v$VERSION - -golangci-lint --version diff --git a/cmd/dmsg-discovery/commands/root.go b/cmd/dmsg-discovery/commands/root.go index 68366a902..002bf4303 100644 --- a/cmd/dmsg-discovery/commands/root.go +++ b/cmd/dmsg-discovery/commands/root.go @@ -17,7 +17,6 @@ import ( "github.com/skycoin/dmsg/cmd/dmsg-discovery/internal/store" "github.com/skycoin/dmsg/cmdutil" "github.com/skycoin/dmsg/discmetrics" - "github.com/skycoin/dmsg/discord" "github.com/skycoin/dmsg/metricsutil" ) @@ -52,15 +51,6 @@ var rootCmd = &cobra.Command{ log := sf.Logger() - if discordWebhookURL := discord.GetWebhookURLFromEnv(); discordWebhookURL != "" { - // Workaround for Discord logger hook. Actually, it's Info. - log.Error(discord.StartLogMessage) - defer log.Error(discord.StopLogMessage) - } else { - log.Info(discord.StartLogMessage) - defer log.Info(discord.StopLogMessage) - } - metricsutil.ServeHTTPMetrics(log, sf.MetricsAddr) db := prepareDB(log) diff --git a/cmd/dmsg-discovery/internal/store/storer.go b/cmd/dmsg-discovery/internal/store/storer.go index 463725e38..b2d61d72e 100644 --- a/cmd/dmsg-discovery/internal/store/storer.go +++ b/cmd/dmsg-discovery/internal/store/storer.go @@ -45,7 +45,7 @@ type Config struct { // Config defaults. const ( DefaultURL = "redis://localhost:6379" - DefaultTimeout = time.Minute + DefaultTimeout = time.Minute * 3 ) // DefaultConfig returns a config with default values. diff --git a/cmd/dmsg-discovery/internal/store/testing.go b/cmd/dmsg-discovery/internal/store/testing.go index 0bc9ae7e2..52a084848 100644 --- a/cmd/dmsg-discovery/internal/store/testing.go +++ b/cmd/dmsg-discovery/internal/store/testing.go @@ -117,8 +117,8 @@ func (ms *MockStore) AvailableServers(ctx context.Context, maxCount int) ([]*dis // CountEntries implements Storer CountEntries method for MockStore func (ms *MockStore) CountEntries(ctx context.Context) (int64, int64, error) { - var numberOfServers int64 = 0 - var numberOfClients int64 = 0 + var numberOfServers int64 + var numberOfClients int64 ms.serversLock.RLock() defer ms.serversLock.RUnlock() diff --git a/cmd/dmsg-server/commands/root.go b/cmd/dmsg-server/commands/root.go index fcf2594fc..094b8af42 100644 --- a/cmd/dmsg-server/commands/root.go +++ b/cmd/dmsg-server/commands/root.go @@ -2,6 +2,9 @@ package commands import ( "context" + "fmt" + "io" + "io/ioutil" "log" "net" "os" @@ -9,6 +12,7 @@ import ( "github.com/go-chi/chi" "github.com/go-chi/chi/middleware" + jsoniter "github.com/json-iterator/go" "github.com/pires/go-proxyproto" "github.com/spf13/cobra" @@ -18,17 +22,22 @@ import ( "github.com/skycoin/dmsg/cmd/dmsg-server/internal/api" "github.com/skycoin/dmsg/cmdutil" "github.com/skycoin/dmsg/disc" - "github.com/skycoin/dmsg/discord" "github.com/skycoin/dmsg/metricsutil" "github.com/skycoin/dmsg/servermetrics" ) +const ( + defaultDiscoveryURL = "https://dmsg.discovery.skywire.skycoin.com" + defaultPort = ":8081" + defaultConfigPath = "config.json" +) + var ( sf cmdutil.ServiceFlags ) func init() { - sf.Init(rootCmd, "dmsg_srv", "config.json") + sf.Init(rootCmd, "dmsg_srv", defaultConfigPath) } var rootCmd = &cobra.Command{ @@ -42,18 +51,9 @@ var rootCmd = &cobra.Command{ log := sf.Logger() - if discordWebhookURL := discord.GetWebhookURLFromEnv(); discordWebhookURL != "" { - // Workaround for Discord logger hook. Actually, it's Info. - log.Error(discord.StartLogMessage) - defer log.Error(discord.StopLogMessage) - } else { - log.Info(discord.StartLogMessage) - defer log.Info(discord.StopLogMessage) - } - var conf Config - if err := sf.ParseConfig(os.Args, true, &conf); err != nil { - log.WithError(err).Fatal() + if err := sf.ParseConfig(os.Args, true, &conf, genDefaultConfig); err != nil { + log.WithError(err).Fatal("parsing config failed, generating default one...") } var m servermetrics.Metrics @@ -71,15 +71,19 @@ var rootCmd = &cobra.Command{ r.Use(middleware.Logger) r.Use(middleware.Recoverer) - a := api.New(r, log, m) - r.Get("/health", a.Health) + api := api.New(r, log, m) ln, err := net.Listen("tcp", conf.LocalAddress) if err != nil { log.Fatalf("Error listening on %s: %v", conf.LocalAddress, err) } lis := &proxyproto.Listener{Listener: ln} - defer lis.Close() // nolint:errcheck + defer func(lis *proxyproto.Listener) { + err = lis.Close() + if err != nil { + log.Warnf("Error closing listener: %v", err) + } + }(lis) if err != nil { log.Fatalf("Error creating proxy on %s: %v", conf.LocalAddress, err) @@ -92,15 +96,15 @@ var rootCmd = &cobra.Command{ srv := dmsg.NewServer(conf.PubKey, conf.SecKey, disc.NewHTTP(conf.Discovery), &srvConf, m) srv.SetLogger(log) - a.SetDmsgServer(srv) - defer func() { log.WithError(srv.Close()).Info("Closed server.") }() + api.SetDmsgServer(srv) + defer func() { log.WithError(api.Close()).Info("Closed server.") }() ctx, cancel := cmdutil.SignalContext(context.Background(), log) defer cancel() - go a.RunBackgroundTasks(ctx) + go api.RunBackgroundTasks(ctx) go func() { - if err := srv.Serve(lis, conf.PublicAddress); err != nil { + if err := api.Serve(lis, conf.PublicAddress); err != nil { log.Errorf("Serve: %v", err) cancel() } @@ -122,6 +126,31 @@ type Config struct { LogLevel string `json:"log_level"` } +func genDefaultConfig() (io.ReadCloser, error) { + pk, sk := cipher.GenerateKeyPair() + + cfg := Config{ + PubKey: pk, + SecKey: sk, + Discovery: defaultDiscoveryURL, + LocalAddress: fmt.Sprintf("localhost%s", defaultPort), + PublicAddress: defaultPort, + MaxSessions: 2048, + LogLevel: "info", + } + + configData, err := jsoniter.MarshalIndent(&cfg, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal default json config: %v", err) + } + + if err = ioutil.WriteFile(defaultConfigPath, configData, 0600); err != nil { + return nil, err + } + + return os.Open(defaultConfigPath) +} + // Execute executes root CLI command. func Execute() { if err := rootCmd.Execute(); err != nil { diff --git a/cmd/dmsg-server/internal/api/api.go b/cmd/dmsg-server/internal/api/api.go index 2aa5dbba3..eecebbcaa 100644 --- a/cmd/dmsg-server/internal/api/api.go +++ b/cmd/dmsg-server/internal/api/api.go @@ -5,6 +5,7 @@ import ( "encoding/json" "math" "math/big" + "net" "net/http" "sync" "time" @@ -30,6 +31,7 @@ type API struct { minuteEncValues map[*dmsg.SessionCommon]uint64 secondDecValues map[*dmsg.SessionCommon]uint64 secondEncValues map[*dmsg.SessionCommon]uint64 + router *chi.Mux } // HealthCheckResponse is struct of /health endpoint @@ -47,8 +49,10 @@ func New(r *chi.Mux, log *logging.Logger, m servermetrics.Metrics) *API { minuteEncValues: make(map[*dmsg.SessionCommon]uint64), secondDecValues: make(map[*dmsg.SessionCommon]uint64), secondEncValues: make(map[*dmsg.SessionCommon]uint64), + router: r, } r.Use(httputil.SetLoggerMiddleware(log)) + r.Get("/health", api.health) return api } @@ -80,8 +84,28 @@ func (a *API) SetDmsgServer(srv *dmsg.Server) { a.dmsgServer = srv } +// Serve runs dmsg Serve function alongside health endpoint in the same port +func (a *API) Serve(lis net.Listener, addr string) error { + errCh := make(chan error) + + go func(l net.Listener, address string) { + if err := a.dmsgServer.Serve(l, address); err != nil { + errCh <- err + } + }(lis, addr) + if err := http.Serve(lis, a.router); err != nil { + errCh <- err + } + return <-errCh +} + +// Close closes connection to both http server and dmsg server +func (a *API) Close() error { + return a.dmsgServer.Close() +} + // Health serves health page -func (a *API) Health(w http.ResponseWriter, r *http.Request) { +func (a *API) health(w http.ResponseWriter, r *http.Request) { info := buildinfo.Get() a.writeJSON(w, r, http.StatusOK, HealthCheckResponse{ BuildInfo: info, diff --git a/cmd/dmsgpty-cli/commands/root.go b/cmd/dmsgpty-cli/commands/root.go index b6bd24398..17ba29558 100644 --- a/cmd/dmsgpty-cli/commands/root.go +++ b/cmd/dmsgpty-cli/commands/root.go @@ -2,7 +2,10 @@ package commands import ( "context" + "encoding/json" + "io/ioutil" "log" + "os" "github.com/spf13/cobra" @@ -20,13 +23,24 @@ func init() { rootCmd.PersistentFlags().StringVar(&cli.Addr, "cliaddr", cli.Addr, "address to use for dialing to dmsgpty-host") + + rootCmd.PersistentFlags().StringVar(&confPath, "confpath", confPath, + "config path") } +// conf to update whitelists +var conf dmsgpty.Config = dmsgpty.DefaultConfig() + +// path for config file ( required for whitelists ) +var confPath = "config.json" + var remoteAddr dmsg.Addr var cmdName = dmsgpty.DefaultCmd var cmdArgs []string func init() { + + cobra.OnInitialize(initConfig) rootCmd.Flags().Var(&remoteAddr, "addr", "remote dmsg address of format 'pk:port'. If unspecified, the pty will start locally") @@ -35,6 +49,48 @@ func init() { rootCmd.Flags().StringSliceVarP(&cmdArgs, "args", "a", cmdArgs, "command arguments") + +} + +// initConfig sources whitelist from config file +// by default : it will look for (default "config.json") +// +// case 1 : config file is new (does not contain a "wl" key) +// - create a "wl" key within the config file +// +// case 2 : config file is old (already contains "wl" key) +// - load config file into memory to manipulate whitelists +// - writes changes back to config file +func initConfig() { + + println(confPath) + + if _, err := os.Stat(confPath); err != nil { + cli.Log.Fatalln("Default config file \"config.json\" not found.") + } + + // read file using ioutil + file, err := ioutil.ReadFile(confPath) + if err != nil { + cli.Log.Fatalln("Unable to read ", confPath, err) + } + + // store config.json into conf to manipulate whitelists + err = json.Unmarshal(file, &conf) + if err != nil { + cli.Log.Errorln(err) + // ignoring this error + b, err := json.MarshalIndent(conf, "", " ") + if err != nil { + cli.Log.Fatalln("Unable to marshal conf") + } + + // write to config.json + err = ioutil.WriteFile(confPath, b, 0600) + if err != nil { + cli.Log.Fatalln("Unable to write", confPath, err) + } + } } var rootCmd = &cobra.Command{ diff --git a/cmd/dmsgpty-cli/commands/whitelist.go b/cmd/dmsgpty-cli/commands/whitelist.go index b9c72112c..ff4a538a4 100644 --- a/cmd/dmsgpty-cli/commands/whitelist.go +++ b/cmd/dmsgpty-cli/commands/whitelist.go @@ -2,6 +2,7 @@ package commands import ( "fmt" + "log" "github.com/spf13/cobra" @@ -27,8 +28,12 @@ var whitelistCmd = &cobra.Command{ if err != nil { return err } - for _, pk := range pks { - fmt.Println(pk) + if len(pks) == 0 { + log.Println("Whitelist Empty") + } else { + for _, pk := range pks { + fmt.Println(pk) + } } return nil }, @@ -39,15 +44,22 @@ var whitelistAddCmd = &cobra.Command{ Short: "adds public key(s) to the whitelist", Args: cobra.MinimumNArgs(1), RunE: func(_ *cobra.Command, args []string) error { + pks, err := pksFromArgs(args) if err != nil { return err } + wlC, err := cli.WhitelistClient() if err != nil { return err } - return wlC.WhitelistAdd(pks...) + err = wlC.WhitelistAdd(pks...) + if err != nil { + fmt.Println(err) + return nil + } + return nil }, } @@ -56,10 +68,12 @@ var whitelistRemoveCmd = &cobra.Command{ Short: "removes public key(s) from the whitelist", Args: cobra.MinimumNArgs(1), RunE: func(_ *cobra.Command, args []string) error { + pks, err := pksFromArgs(args) if err != nil { return err } + wlC, err := cli.WhitelistClient() if err != nil { return err diff --git a/cmd/dmsgpty-host/commands/confgen.go b/cmd/dmsgpty-host/commands/confgen.go index 0c7264d8c..941481874 100644 --- a/cmd/dmsgpty-host/commands/confgen.go +++ b/cmd/dmsgpty-host/commands/confgen.go @@ -1,8 +1,12 @@ package commands import ( + "fmt" + "github.com/spf13/cobra" - "github.com/spf13/viper" + + "github.com/skycoin/dmsg/dmsgpty" + "github.com/skycoin/dmsg/fsutil" ) var unsafe = false @@ -17,12 +21,32 @@ func init() { var confgenCmd = &cobra.Command{ Use: "confgen ", Short: "generates config file", - Args: cobra.ExactArgs(1), - PreRun: prepareVariables, + Args: cobra.MaximumNArgs(1), + PreRun: func(cmd *cobra.Command, args []string) {}, RunE: func(cmd *cobra.Command, args []string) error { + + if len(args) == 0 { + confPath = "./config.json" + } else { + confPath = args[0] + } + + conf, err := getConfig(cmd, true) + if err != nil { + return fmt.Errorf("failed to get config: %w", err) + } if unsafe { - return viper.WriteConfigAs(args[0]) + return dmsgpty.WriteConfig(conf, confPath) + } + + exists, err := fsutil.Exists(confPath) + if err != nil { + return fmt.Errorf("failed to check if config file exists: %w", err) } - return viper.SafeWriteConfigAs(args[0]) + if exists { + return fmt.Errorf("config file %s already exists", confPath) + } + + return dmsgpty.WriteConfig(conf, confPath) }, } diff --git a/cmd/dmsgpty-host/commands/root.go b/cmd/dmsgpty-host/commands/root.go index 1414b3eea..aa0797954 100644 --- a/cmd/dmsgpty-host/commands/root.go +++ b/cmd/dmsgpty-host/commands/root.go @@ -1,21 +1,19 @@ package commands import ( - "bytes" "context" "fmt" stdlog "log" "net" "os" + "strconv" + "strings" "sync" jsoniter "github.com/json-iterator/go" "github.com/sirupsen/logrus" "github.com/skycoin/skycoin/src/util/logging" - "github.com/spf13/cast" "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/spf13/viper" "github.com/skycoin/dmsg" "github.com/skycoin/dmsg/buildinfo" @@ -33,36 +31,31 @@ var json = jsoniter.ConfigFastest // variables var ( - // persistent flags (with viper references) - sk cipher.SecKey - wlPath = "" + // persistent flags dmsgDisc = dmsg.DefaultDiscAddr dmsgSessions = dmsg.DefaultMinSessions dmsgPort = dmsgpty.DefaultPort cliNet = dmsgpty.DefaultCLINet cliAddr = dmsgpty.DefaultCLIAddr + sk cipher.SecKey + pk cipher.PubKey + wl cipher.PubKeys - // persistent flags (without viper references) - skGen = false + // persistent flags envPrefix = defaultEnvPrefix - // root command flags (without viper references) + // root command flags confStdin = false - confPath = "" + confPath = "./config.json" ) // init prepares flags. -// Some flags are persistent, and some need to be bound with env/config references (via viper). func init() { // Prepare flags with env/config references. - // We will bind flags to associated viper values so that they can be set with envs and config file. - - rootCmd.PersistentFlags().Var(&sk, "sk", - "secret key of the dmsgpty-host") - rootCmd.PersistentFlags().StringVar(&wlPath, "wl", wlPath, - "path of json whitelist file (if unspecified, a memory whitelist will be used)") + rootCmd.PersistentFlags().Var(&wl, "wl", + "whitelist of the dmsgpty-host") rootCmd.PersistentFlags().StringVar(&dmsgDisc, "dmsgdisc", dmsgDisc, "dmsg discovery address") @@ -79,140 +72,233 @@ func init() { rootCmd.PersistentFlags().StringVar(&cliAddr, "cliaddr", cliAddr, "address used for listening for cli connections") - cmdutil.Catch(viper.BindPFlags(rootCmd.PersistentFlags())) // Bind above flags with env/config references. - // Prepare flags without associated env/config references. - rootCmd.PersistentFlags().BoolVar(&skGen, "skgen", skGen, - "if set, a random secret key will be generated") - rootCmd.PersistentFlags().StringVar(&envPrefix, "envprefix", envPrefix, "env prefix") rootCmd.Flags().BoolVar(&confStdin, "confstdin", confStdin, "config will be read from stdin if set") - rootCmd.Flags().StringVar(&confPath, "confpath", confPath, + rootCmd.Flags().StringVarP(&confPath, "confpath", "c", confPath, "config path") } -// prepareVariables sources variables in the following precedence order: flags, env, config, default. -// -// The following actions are performed: -// - Prepare how envs are sourced. -// - Prepare how config is to be sourced. -// - Grab final values of variables. -// Viper uses the following precedence order: flags, env, config, default. -// Source: https://github.com/spf13/viper#why-viper -// -// Panics are called via `cmdutil.Catch` or `cmdutil.CatchWithMsg`. -// These are recovered in a defer statement where the help message is printed. -func prepareVariables(cmd *cobra.Command, _ []string) { - - // Recover and print help on panic. - defer func() { - if r := recover(); r != nil { - cmd.PrintErrln("Error:", r) - fmt.Print("Help:\n ") - if err := cmd.Help(); err != nil { - panic(err) - } - os.Exit(1) - } - }() - - // Prepare how ENVs are sourced. - viper.SetEnvPrefix(envPrefix) - viper.AutomaticEnv() +func configFromJSON(conf dmsgpty.Config) (dmsgpty.Config, error) { + var jsonConf dmsgpty.Config + if confStdin { + if err := json.NewDecoder(os.Stdin).Decode(&jsonConf); err != nil { + return dmsgpty.Config{}, fmt.Errorf("flag 'confstdin' is set, but config read from stdin is invalid: %w", err) + } + } + + if confPath != "" { + f, err := os.Open(confPath) + if err != nil { + return dmsgpty.Config{}, fmt.Errorf("failed to open config file: %w", err) + } + if err := json.NewDecoder(f).Decode(&jsonConf); err != nil { + return dmsgpty.Config{}, fmt.Errorf("flag 'confpath' is set, but we failed to read config from specified path: %w", err) + } + } + + if jsonConf.SK != "" { + if err := sk.Set(jsonConf.SK); err != nil { + return dmsgpty.Config{}, fmt.Errorf("provided SK is invalid: %w", err) + } + } + + if !sk.Null() { + conf.SK = jsonConf.SK + } + + if jsonConf.PK != "" { + if err := pk.Set(jsonConf.PK); err != nil { + return dmsgpty.Config{}, fmt.Errorf("provided PK is invalid: %w", err) + } + } + + if !pk.Null() { + conf.PK = jsonConf.PK + } + + if len(jsonConf.WL) > 0 { + ustString := strings.Join(jsonConf.WL, ",") + if err := wl.Set(ustString); err != nil { + return dmsgpty.Config{}, fmt.Errorf("provided WL's are invalid: %w", err) + } + } + + if len(wl) > 0 { + conf.WL = jsonConf.WL + } + + if jsonConf.DmsgDisc != "" { + conf.DmsgDisc = jsonConf.DmsgDisc + } + + if conf.DmsgSessions != 0 { + conf.DmsgSessions = jsonConf.DmsgSessions + } + + if conf.DmsgPort != 0 { + conf.DmsgPort = jsonConf.DmsgPort + } + + if conf.CLINet != "" { + conf.CLINet = jsonConf.CLINet + } + + if conf.CLIAddr != "" { + conf.CLIAddr = jsonConf.CLIAddr + } + + return conf, nil +} + +func fillConfigFromENV(conf dmsgpty.Config) (dmsgpty.Config, error) { + + if val, ok := os.LookupEnv(envPrefix + "_DMSGDISC"); ok { + conf.DmsgDisc = val + } + + if val, ok := os.LookupEnv(envPrefix + "_DMSGSESSIONS"); ok { + dmsgSessions, err := strconv.Atoi(val) + if err != nil { + return conf, fmt.Errorf("failed to parse dmsg sessions: %w", err) + } + + conf.DmsgSessions = dmsgSessions + } + + if val, ok := os.LookupEnv(envPrefix + "_DMSGPORT"); ok { + dmsgPort, err := strconv.ParseUint(val, 10, 16) + if err != nil { + return conf, fmt.Errorf("failed to parse dmsg port: %w", err) + } + + conf.DmsgPort = uint16(dmsgPort) + } + + if val, ok := os.LookupEnv(envPrefix + "_CLINET"); ok { + conf.CLINet = val + } + + if val, ok := os.LookupEnv(envPrefix + "_CLIADDR"); ok { + conf.CLIAddr = val + } + + return conf, nil +} + +func fillConfigFromFlags(conf dmsgpty.Config) dmsgpty.Config { + if dmsgDisc != dmsg.DefaultDiscAddr { + conf.DmsgDisc = dmsgDisc + } + + if dmsgSessions != dmsg.DefaultMinSessions { + conf.DmsgSessions = dmsgSessions + } + + if dmsgPort != dmsgpty.DefaultPort { + conf.DmsgPort = dmsgPort + } + + if cliNet != dmsgpty.DefaultCLINet { + conf.CLINet = cliNet + } + + if cliAddr != dmsgpty.DefaultCLIAddr { + conf.CLIAddr = cliAddr + } + + return conf +} + +// getConfig sources variables in the following precedence order: flags, env, config, default. +func getConfig(cmd *cobra.Command, skGen bool) (dmsgpty.Config, error) { + conf := dmsgpty.DefaultConfig() + + var err error // Prepare how config file is sourced (if root command). if cmd.Name() == cmdutil.RootCmdName() { - viper.SetConfigName("config") - viper.SetConfigType("json") - if confStdin { - v := make(map[string]interface{}) - buf := new(bytes.Buffer) - cmdutil.CatchWithMsg("flag 'confstdin' is set, but config read from stdin is invalid", - json.NewDecoder(os.Stdin).Decode(&v), - json.NewEncoder(buf).Encode(v), - viper.ReadConfig(buf)) - } else if confPath != "" { - viper.SetConfigFile(confPath) - cmdutil.CatchWithMsg("flag 'confpath' is set, but we failed to read config from specified path", - viper.ReadInConfig()) + conf, err = configFromJSON(conf) + if err != nil { + return dmsgpty.Config{}, fmt.Errorf("failed to read config from JSON: %w", err) } } + conf, err = fillConfigFromENV(conf) + if err != nil { + return conf, fmt.Errorf("failed to fill config from ENV: %w", err) + } - // Grab final values of variables. - - // Grab secret key (from 'sk' and 'skgen' flags). if skGen { - if !sk.Null() { - log.Fatal("Values 'skgen' and 'sk' cannot be both set.") - } - var pk cipher.PubKey pk, sk = cipher.GenerateKeyPair() log.WithField("pubkey", pk). WithField("seckey", sk). Info("Generating key pair as 'skgen' is set.") - viper.Set("sk", sk) + conf.SK = sk.Hex() + conf.PK = pk.Hex() } - skStr := viper.GetString("sk") - cmdutil.CatchWithMsg("value 'seckey' is invalid", sk.Set(skStr)) + conf = fillConfigFromFlags(conf) - wlPath = viper.GetString("wl") - dmsgDisc = viper.GetString("dmsgdisc") - dmsgSessions = viper.GetInt("dmsgsessions") - dmsgPort = cast.ToUint16(viper.Get("dmsgport")) - cliNet = viper.GetString("clinet") - cliAddr = viper.GetString("cliaddr") + if sk.Null() { + return conf, fmt.Errorf("value 'seckey' is invalid") + } // Print values. pLog := logrus.FieldLogger(log) - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if v := viper.Get(flag.Name); v != nil { - pLog = pLog.WithField(flag.Name, v) - } - }) + pLog = pLog.WithField("dmsgdisc", conf.DmsgDisc) + pLog = pLog.WithField("dmsgsessions", conf.DmsgSessions) + pLog = pLog.WithField("dmsgport", conf.DmsgPort) + pLog = pLog.WithField("clinet", conf.CLINet) + pLog = pLog.WithField("cliaddr", conf.CLIAddr) + pLog = pLog.WithField("pk", conf.PK) + pLog = pLog.WithField("wl", conf.WL) pLog.Info("Init complete.") + + return conf, nil } var rootCmd = &cobra.Command{ Use: cmdutil.RootCmdName(), Short: "runs a standalone dmsgpty-host instance", - PreRun: prepareVariables, - Run: func(cmd *cobra.Command, args []string) { + PreRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(cmd *cobra.Command, args []string) error { + conf, err := getConfig(cmd, false) + if err != nil { + return fmt.Errorf("failed to get config: %w", err) + } + if _, err := buildinfo.Get().WriteTo(stdlog.Writer()); err != nil { log.Printf("Failed to output build info: %v", err) } - log := logging.MustGetLogger("dmsgpty-host") - ctx, cancel := cmdutil.SignalContext(context.Background(), log) defer cancel() pk, err := sk.PubKey() - cmdutil.CatchWithLog(log, "failed to derive public key from secret key", err) + if err != nil { + return fmt.Errorf("failed to derive public key from secret key: %w", err) + } // Prepare and serve dmsg client and wait until ready. - dmsgC := dmsg.NewClient(pk, sk, disc.NewHTTP(dmsgDisc), &dmsg.Config{ - MinSessions: dmsgSessions, + dmsgC := dmsg.NewClient(pk, sk, disc.NewHTTP(conf.DmsgDisc), &dmsg.Config{ + MinSessions: conf.DmsgSessions, }) go dmsgC.Serve(context.Background()) select { case <-ctx.Done(): - cmdutil.CatchWithLog(log, "failed to wait unti dmsg client to be ready", ctx.Err()) + return fmt.Errorf("failed to wait dmsg client to be ready: %w", ctx.Err()) case <-dmsgC.Ready(): } // Prepare whitelist. - var wl dmsgpty.Whitelist - if wlPath == "" { - wl = dmsgpty.NewMemoryWhitelist() - } else { - var err error - wl, err = dmsgpty.NewJSONFileWhiteList(wlPath) - cmdutil.CatchWithLog(log, "failed to init whitelist", err) + // var wl dmsgpty.Whitelist + wl, err := dmsgpty.NewConfigWhitelist(confPath) + if err != nil { + return fmt.Errorf("failed to init whitelist: %w", err) } // Prepare dmsgpty host. @@ -221,11 +307,13 @@ var rootCmd = &cobra.Command{ wg.Add(2) // Prepare CLI. - if cliNet == "unix" { - _ = os.Remove(cliAddr) //nolint:errcheck + if conf.CLINet == "unix" { + _ = os.Remove(conf.CLIAddr) //nolint:errcheck + } + cliL, err := net.Listen(conf.CLINet, conf.CLIAddr) + if err != nil { + return fmt.Errorf("failed to serve CLI: %w", err) } - cliL, err := net.Listen(cliNet, cliAddr) - cmdutil.CatchWithLog(log, "failed to serve CLI", err) log.WithField("addr", cliL.Addr()).Info("Listening for CLI connections.") go func() { log.WithError(host.ServeCLI(ctx, cliL)). @@ -234,15 +322,16 @@ var rootCmd = &cobra.Command{ }() // Serve dmsgpty. - log.WithField("port", dmsgPort). + log.WithField("port", conf.DmsgPort). Info("Listening for dmsg streams.") go func() { - log.WithError(host.ListenAndServe(ctx, dmsgPort)). + log.WithError(host.ListenAndServe(ctx, conf.DmsgPort)). Info("Stopped serving dmsgpty-host.") wg.Done() }() wg.Wait() + return nil }, } diff --git a/cmdutil/service_flags.go b/cmdutil/service_flags.go index 94d07bb17..6fbaa75e5 100644 --- a/cmdutil/service_flags.go +++ b/cmdutil/service_flags.go @@ -5,18 +5,13 @@ import ( "fmt" "io" "io/ioutil" - "log/syslog" "os" "strings" "unicode" jsoniter "github.com/json-iterator/go" - "github.com/sirupsen/logrus" - logrussyslog "github.com/sirupsen/logrus/hooks/syslog" "github.com/skycoin/skycoin/src/util/logging" "github.com/spf13/cobra" - - "github.com/skycoin/dmsg/discord" ) // Associated errors. @@ -134,20 +129,7 @@ func (sf *ServiceFlags) Logger() *logging.Logger { logging.SetLevel(logLvl) if sf.Syslog != "" { - hook, err := logrussyslog.NewSyslogHook(sf.SyslogNet, sf.Syslog, sysLvl, sf.Tag) - if err != nil { - log.WithError(err). - WithField("net", sf.SyslogNet). - WithField("addr", sf.Syslog). - Fatal("Failed to connect to syslog daemon.") - } - logging.AddHook(hook) - } - - if discordWebhookURL := discord.GetWebhookURLFromEnv(); discordWebhookURL != "" { - discordOpts := discord.GetDefaultOpts() - hook := discord.NewHook(sf.Tag, discordWebhookURL, discordOpts...) - logging.AddHook(hook) + sf.sysLogHook(log, sysLvl) } return log @@ -155,13 +137,13 @@ func (sf *ServiceFlags) Logger() *logging.Logger { // ParseConfig parses config from service tags. // If checkArgs is set, we additionally parse os.Args to find a config path. -func (sf *ServiceFlags) ParseConfig(args []string, checkArgs bool, v interface{}) error { - r, err := sf.obtainConfigReader(args, checkArgs) +func (sf *ServiceFlags) ParseConfig(args []string, checkArgs bool, v interface{}, genDefaultFunc func() (io.ReadCloser, error)) error { + r, err := sf.obtainConfigReader(args, checkArgs, genDefaultFunc) if err != nil { return err } defer func() { - if err := r.Close(); err != nil { + if err = r.Close(); err != nil { sf.logger.WithError(err).Warn("Failed to close config source.") } }() @@ -171,7 +153,7 @@ func (sf *ServiceFlags) ParseConfig(args []string, checkArgs bool, v interface{} return fmt.Errorf("failed to read from config source: %w", err) } - if err := json.Unmarshal(b, v); err != nil { + if err = json.Unmarshal(b, v); err != nil { return fmt.Errorf("failed to decode config file: %w", err) } @@ -184,7 +166,7 @@ func (sf *ServiceFlags) ParseConfig(args []string, checkArgs bool, v interface{} return nil } -func (sf *ServiceFlags) obtainConfigReader(args []string, checkArgs bool) (io.ReadCloser, error) { +func (sf *ServiceFlags) obtainConfigReader(args []string, checkArgs bool, genDefaultFunc func() (io.ReadCloser, error)) (io.ReadCloser, error) { switch { case sf.Stdin || strings.ToLower(sf.Config) == stdinConfig: stdin := ioutil.NopCloser(os.Stdin) // ensure stdin is not closed @@ -192,12 +174,14 @@ func (sf *ServiceFlags) obtainConfigReader(args []string, checkArgs bool) (io.Re case checkArgs: if len(args) == 1 { - break + return genDefaultFunc() } for i, arg := range args { if strings.HasSuffix(arg, ".json") && i > 0 && !strings.HasPrefix(args[i-1], "-") { - f, err := os.Open(arg) //nolint:gosec + var f io.ReadCloser + var err error + f, err = os.Open(arg) //nolint:gosec if err != nil { return nil, fmt.Errorf("failed to open config file: %w", err) } @@ -211,6 +195,7 @@ func (sf *ServiceFlags) obtainConfigReader(args []string, checkArgs bool) (io.Re return nil, fmt.Errorf("failed to open config file: %w", err) } return f, nil + } return nil, errors.New("no config location specified") @@ -247,26 +232,6 @@ func ValidTag(tag string) error { return nil } -// LevelFromString returns a logrus.Level and syslog.Priority from a string identifier. -func LevelFromString(s string) (logrus.Level, syslog.Priority, error) { - switch strings.ToLower(s) { - case "debug": - return logrus.DebugLevel, syslog.LOG_DEBUG, nil - case "info", "notice": - return logrus.InfoLevel, syslog.LOG_INFO, nil - case "warn", "warning": - return logrus.WarnLevel, syslog.LOG_WARNING, nil - case "error": - return logrus.ErrorLevel, syslog.LOG_ERR, nil - case "fatal", "critical": - return logrus.FatalLevel, syslog.LOG_CRIT, nil - case "panic": - return logrus.PanicLevel, syslog.LOG_EMERG, nil - default: - return logrus.DebugLevel, syslog.LOG_DEBUG, ErrInvalidLogString - } -} - func alreadyDone(done *bool) bool { if *done { return true diff --git a/cmdutil/signal_context.go b/cmdutil/signal_context.go index 950cad181..56291a482 100644 --- a/cmdutil/signal_context.go +++ b/cmdutil/signal_context.go @@ -4,7 +4,6 @@ import ( "context" "os" "os/signal" - "syscall" "github.com/sirupsen/logrus" ) @@ -18,7 +17,8 @@ func SignalContext(ctx context.Context, log logrus.FieldLogger) (context.Context ctx, cancel := context.WithCancel(ctx) ch := make(chan os.Signal) - signal.Notify(ch, []os.Signal{syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT}...) + listenSigs := listenSignals() + signal.Notify(ch, listenSigs...) go func() { select { diff --git a/cmdutil/signal_unix.go b/cmdutil/signal_unix.go new file mode 100644 index 000000000..c2978a761 --- /dev/null +++ b/cmdutil/signal_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package cmdutil + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func listenSignals() []os.Signal { + return []os.Signal{unix.SIGINT, unix.SIGTERM, unix.SIGQUIT} +} diff --git a/cmdutil/signal_windows.go b/cmdutil/signal_windows.go new file mode 100644 index 000000000..4dff9d835 --- /dev/null +++ b/cmdutil/signal_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package cmdutil + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func listenSignals() []os.Signal { + return []os.Signal{windows.SIGINT, windows.SIGTERM, windows.SIGQUIT} +} diff --git a/cmdutil/sysloghook_unix.go b/cmdutil/sysloghook_unix.go new file mode 100644 index 000000000..49eee1a72 --- /dev/null +++ b/cmdutil/sysloghook_unix.go @@ -0,0 +1,43 @@ +// +build !windows + +package cmdutil + +import ( + "log/syslog" + "strings" + + "github.com/sirupsen/logrus" + logrussyslog "github.com/sirupsen/logrus/hooks/syslog" + "github.com/skycoin/skycoin/src/util/logging" +) + +func (sf *ServiceFlags) sysLogHook(log *logging.Logger, sysLvl int) { + hook, err := logrussyslog.NewSyslogHook(sf.SyslogNet, sf.Syslog, syslog.Priority(sysLvl), sf.Tag) + if err != nil { + log.WithError(err). + WithField("net", sf.SyslogNet). + WithField("addr", sf.Syslog). + Fatal("Failed to connect to syslog daemon.") + } + logging.AddHook(hook) +} + +// LevelFromString returns a logrus.Level and syslog.Priority from a string identifier. +func LevelFromString(s string) (logrus.Level, int, error) { + switch strings.ToLower(s) { + case "debug": + return logrus.DebugLevel, int(syslog.LOG_DEBUG), nil + case "info", "notice": + return logrus.InfoLevel, int(syslog.LOG_INFO), nil + case "warn", "warning": + return logrus.WarnLevel, int(syslog.LOG_WARNING), nil + case "error": + return logrus.ErrorLevel, int(syslog.LOG_ERR), nil + case "fatal", "critical": + return logrus.FatalLevel, int(syslog.LOG_CRIT), nil + case "panic": + return logrus.PanicLevel, int(syslog.LOG_EMERG), nil + default: + return logrus.DebugLevel, int(syslog.LOG_DEBUG), ErrInvalidLogString + } +} diff --git a/cmdutil/sysloghook_windows.go b/cmdutil/sysloghook_windows.go new file mode 100644 index 000000000..b2cfe7486 --- /dev/null +++ b/cmdutil/sysloghook_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package cmdutil + +import ( + "strings" + + "github.com/sirupsen/logrus" + "github.com/skycoin/skycoin/src/util/logging" +) + +func (sf *ServiceFlags) sysLogHook(_ *logging.Logger, _ int) { +} + +// LevelFromString returns a logrus.Level and syslog.Priority from a string identifier. +func LevelFromString(s string) (logrus.Level, int, error) { + switch strings.ToLower(s) { + case "debug": + return logrus.DebugLevel, 0, nil + case "info", "notice": + return logrus.InfoLevel, 0, nil + case "warn", "warning": + return logrus.WarnLevel, 0, nil + case "error": + return logrus.ErrorLevel, 0, nil + case "fatal", "critical": + return logrus.FatalLevel, 0, nil + case "panic": + return logrus.PanicLevel, 0, nil + default: + return logrus.DebugLevel, 0, ErrInvalidLogString + } +} diff --git a/const.go b/const.go index ac7e69a58..60500f989 100644 --- a/const.go +++ b/const.go @@ -4,12 +4,11 @@ import "time" // Constants. const ( - // TODO(evanlinjin): Reference the production address on release - DefaultDiscAddr = "http://dmsg.discovery.skywire.cc" + DefaultDiscAddr = "http://dmsg.discovery.skywire.skycoin.com" DefaultMinSessions = 1 - DefaultUpdateInterval = time.Second * 15 + DefaultUpdateInterval = time.Minute DefaultMaxSessions = 100 ) diff --git a/disc/client.go b/disc/client.go index 8632d09db..ba1fa19ab 100644 --- a/disc/client.go +++ b/disc/client.go @@ -57,8 +57,6 @@ func (c *httpClient) Entry(ctx context.Context, publicKey cipher.PubKey) (*Entry return nil, err } - addKeepAlive(req) - req = req.WithContext(ctx) resp, err := c.client.Do(req) @@ -106,7 +104,6 @@ func (c *httpClient) PostEntry(ctx context.Context, e *Entry) error { return err } - addKeepAlive(req) req.Header.Set("Content-Type", "application/json") // Since v0.3.0 visors send ?timeout=true, before v0.3.0 do not. @@ -190,7 +187,7 @@ func (c *httpClient) AvailableServers(ctx context.Context) ([]*Entry, error) { if err != nil { return nil, err } - addKeepAlive(req) + req = req.WithContext(ctx) resp, err := c.client.Do(req) @@ -223,7 +220,3 @@ func (c *httpClient) AvailableServers(ctx context.Context) ([]*Entry, error) { return entries, nil } - -func addKeepAlive(req *http.Request) { - req.Header.Add("Connection", "keep-alive") -} diff --git a/discord/hook.go b/discord/hook.go deleted file mode 100644 index 514ee3dad..000000000 --- a/discord/hook.go +++ /dev/null @@ -1,120 +0,0 @@ -package discord - -import ( - "os" - "time" - - "github.com/kz/discordrus" - "github.com/sirupsen/logrus" -) - -const webhookURLEnvName = "DISCORD_WEBHOOK_URL" - -const ( - loggedLevel = logrus.ErrorLevel - startStopLogLevel = logrus.InfoLevel -) - -const ( - // StartLogMessage defines a message on binary starting. - StartLogMessage = "Starting" - // StopLogMessage defines a message on binary stopping. - StopLogMessage = "Stopping" -) - -// DefaultRateLimiterThreshold defines default rate limiter threshold. -const DefaultRateLimiterThreshold = 10 * time.Minute - -// Hook is a Discord logger hook. -type Hook struct { - *discordrus.Hook - limit time.Duration - timestamps map[string]time.Time -} - -// Option defines an option for Discord logger hook. -type Option func(*Hook) - -// WithLimit enables logger rate limiter with specified limit. -func WithLimit(limit time.Duration) Option { - return func(h *Hook) { - h.limit = limit - h.timestamps = make(map[string]time.Time) - } -} - -// WithAuthor sets log entry author. -func WithAuthor(author string) Option { - return func(h *Hook) { - h.Hook.Opts.Author = author - } -} - -// NewHook returns a new Hook. -func NewHook(tag, webHookURL string, opts ...Option) logrus.Hook { - parent := discordrus.NewHook(webHookURL, loggedLevel, discordOpts(tag)) - - hook := &Hook{ - Hook: parent, - } - - for _, opt := range opts { - opt(hook) - } - - return hook -} - -// Fire checks whether rate is fine and fires the underlying hook. -func (h *Hook) Fire(entry *logrus.Entry) error { - switch entry.Message { - case StartLogMessage, StopLogMessage: - // Start and stop messages should be logged by Hook but they should have Info level. - // With Info level, they would not be passed to hook. - // So we can use Error level in the codebase and change level to Info in the hook, - // then it appears as Info in logs. - entry.Level = startStopLogLevel - } - - if h.shouldFire(entry) { - return h.Hook.Fire(entry) - } - - return nil -} - -func (h *Hook) shouldFire(entry *logrus.Entry) bool { - if h.limit != 0 && h.timestamps != nil { - v, ok := h.timestamps[entry.Message] - if ok && entry.Time.Sub(v) < h.limit { - return false - } - - h.timestamps[entry.Message] = entry.Time - } - - return true -} - -func discordOpts(tag string) *discordrus.Opts { - return &discordrus.Opts{ - Username: tag, - TimestampFormat: time.RFC3339, - TimestampLocale: time.UTC, - } -} - -// GetWebhookURLFromEnv extracts webhook URL from an environment variable. -func GetWebhookURLFromEnv() string { - return os.Getenv(webhookURLEnvName) -} - -// GetDefaultOpts returns default options. -func GetDefaultOpts() []Option { - hostname, err := os.Hostname() - if err != nil { - hostname = "" - } - - return []Option{WithLimit(DefaultRateLimiterThreshold), WithAuthor(hostname)} -} diff --git a/discord/hook_test.go b/discord/hook_test.go deleted file mode 100644 index e0fada3ab..000000000 --- a/discord/hook_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package discord - -import ( - "testing" - "time" - - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" -) - -func TestHook_shouldFire(t *testing.T) { - hook := &Hook{ - limit: 1 * time.Millisecond, - timestamps: make(map[string]time.Time), - } - - ts := time.Now() - - tests := []struct { - name string - message string - timestamp time.Time - want bool - }{ - { - name: "Case 1", - message: "Message 1", - timestamp: ts, - want: true, - }, - { - name: "Case 2", - message: "Message 2", - timestamp: ts, - want: true, - }, - { - name: "Case 3", - message: "Message 1", - timestamp: ts, - want: false, - }, - { - name: "Case 4", - message: "Message 1", - timestamp: ts.Add(500 * time.Microsecond), - want: false, - }, - { - name: "Case 5", - message: "Message 1", - timestamp: ts.Add(1500 * time.Microsecond), - want: true, - }, - { - name: "Case 6", - message: "Message 1", - timestamp: ts.Add(2000 * time.Microsecond), - want: false, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - entry := &logrus.Entry{ - Time: tt.timestamp, - Message: tt.message, - } - - assert.Equal(t, tt.want, hook.shouldFire(entry)) - }) - } -} diff --git a/dmsgget/dmsgget.go b/dmsgget/dmsgget.go index 6ec508bcb..b159e5063 100644 --- a/dmsgget/dmsgget.go +++ b/dmsgget/dmsgget.go @@ -119,7 +119,7 @@ func (dg *DmsgGet) Run(ctx context.Context, log logrus.FieldLogger, skStr string return fmt.Errorf("failed to reset file: %w", err) } - if err := Download(log, &httpC, file, u.URL.String()); err != nil { + if err := Download(ctx, log, &httpC, file, u.URL.String()); err != nil { log.WithError(err).Error() select { case <-ctx.Done(): @@ -215,7 +215,7 @@ func (dg *DmsgGet) startDmsg(ctx context.Context, log logrus.FieldLogger, pk cip } // Download downloads a file from the given URL into 'w'. -func Download(log logrus.FieldLogger, httpC *http.Client, w io.Writer, urlStr string) error { +func Download(ctx context.Context, log logrus.FieldLogger, httpC *http.Client, w io.Writer, urlStr string) error { req, err := http.NewRequest(http.MethodGet, urlStr, nil) if err != nil { log.WithError(err).Fatal("Failed to formulate HTTP request.") @@ -225,15 +225,40 @@ func Download(log logrus.FieldLogger, httpC *http.Client, w io.Writer, urlStr st if err != nil { return fmt.Errorf("failed to connect to HTTP server: %w", err) } + n, err := CancellableCopy(ctx, w, resp.Body, resp.ContentLength) + if err != nil { + return fmt.Errorf("download failed at %d/%dB: %w", n, resp.ContentLength, err) + } defer func() { if err := resp.Body.Close(); err != nil { log.WithError(err).Warn("HTTP Response body closed with non-nil error.") } }() - n, err := io.Copy(io.MultiWriter(w, &ProgressWriter{Total: resp.ContentLength}), resp.Body) - if err != nil { - return fmt.Errorf("download failed at %d/%dB: %w", n, resp.ContentLength, err) - } return nil } + +type readerFunc func(p []byte) (n int, err error) + +func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) } + +// CancellableCopy will call the Reader and Writer interface multiple time, in order +// to copy by chunk (avoiding loading the whole file in memory). +func CancellableCopy(ctx context.Context, w io.Writer, body io.ReadCloser, length int64) (int64, error) { + + n, err := io.Copy(io.MultiWriter(w, &ProgressWriter{Total: length}), readerFunc(func(p []byte) (int, error) { + + // golang non-blocking channel: https://gobyexample.com/non-blocking-channel-operations + select { + + // if context has been canceled + case <-ctx.Done(): + // stop process and propagate "Download Canceled" error + return 0, errors.New("Download Canceled") + default: + // otherwise just run default io.Reader implementation + return body.Read(p) + } + })) + return n, err +} diff --git a/dmsgget/dmsgget_test.go b/dmsgget/dmsgget_test.go index b583902d0..5139f778d 100644 --- a/dmsgget/dmsgget_test.go +++ b/dmsgget/dmsgget_test.go @@ -17,6 +17,7 @@ import ( "github.com/skycoin/dmsg" "github.com/skycoin/dmsg/cipher" + "github.com/skycoin/dmsg/cmdutil" "github.com/skycoin/dmsg/disc" "github.com/skycoin/dmsg/dmsghttp" ) @@ -63,7 +64,9 @@ func TestDownload(t *testing.T) { for i := 0; i < dlClients; i++ { func(i int) { log := logging.MustGetLogger(fmt.Sprintf("dl_client_%d", i)) - err := Download(log, newHTTPClient(t, dc), dsts[i], hsAddr) + ctx, cancel := cmdutil.SignalContext(context.Background(), log) + defer cancel() + err := Download(ctx, log, newHTTPClient(t, dc), dsts[i], hsAddr) errs[i] <- err close(errs[i]) diff --git a/dmsgpty/cli.go b/dmsgpty/cli.go index f460ad677..c2d7e0175 100644 --- a/dmsgpty/cli.go +++ b/dmsgpty/cli.go @@ -12,7 +12,7 @@ import ( "github.com/creack/pty" "github.com/sirupsen/logrus" "github.com/skycoin/skycoin/src/util/logging" - "golang.org/x/crypto/ssh/terminal" + terminal "golang.org/x/term" "github.com/skycoin/dmsg/cipher" ) diff --git a/dmsgpty/conf.go b/dmsgpty/conf.go new file mode 100644 index 000000000..84f2a7a2d --- /dev/null +++ b/dmsgpty/conf.go @@ -0,0 +1,42 @@ +package dmsgpty + +import ( + "fmt" + "os" + + "github.com/skycoin/dmsg" +) + +// Config struct is used to read the values from the config.json file +type Config struct { + DmsgDisc string `json:"dmsgdisc"` + DmsgSessions int `json:"dmsgsessions"` + DmsgPort uint16 `json:"dmsgport"` + CLINet string `json:"clinet"` + CLIAddr string `json:"cliaddr"` + SK string `json:"sk"` + PK string `json:"pk"` + WL []string `json:"wl"` +} + +// DefaultConfig is used to populate the config struct with its default values +func DefaultConfig() Config { + return Config{ + DmsgDisc: dmsg.DefaultDiscAddr, + DmsgSessions: dmsg.DefaultMinSessions, + DmsgPort: DefaultPort, + CLINet: DefaultCLINet, + CLIAddr: DefaultCLIAddr, + } +} + +// WriteConfig write the config struct to the provided path +func WriteConfig(conf Config, path string) error { + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) //nolint:gosec + if err != nil { + return fmt.Errorf("failed to open config file: %w", err) + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + return enc.Encode(&conf) +} diff --git a/dmsgpty/host_test.go b/dmsgpty/host_test.go index d480162ed..83bdd3ad0 100644 --- a/dmsgpty/host_test.go +++ b/dmsgpty/host_test.go @@ -1,36 +1,55 @@ package dmsgpty +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/net/nettest" + + "github.com/skycoin/skycoin/src/util/logging" + + "github.com/skycoin/dmsg" + "github.com/skycoin/dmsg/cipher" + "github.com/skycoin/dmsg/dmsgtest" +) + // TODO(evanlinjin): fix failing tests -/* + func TestHost(t *testing.T) { const port = uint16(22) - defaultConf := dmsg.Config{MinSessions: 2} // Prepare dmsg env. env := dmsgtest.NewEnv(t, dmsgtest.DefaultTimeout) - require.NoError(t, env.Startup(2, 2, 1, &defaultConf)) + defaultConf := dmsg.Config{MinSessions: 2} + require.NoError(t, env.Startup(dmsgtest.DefaultTimeout, 2, 2, &defaultConf)) + t.Cleanup(env.Shutdown) dcA := env.AllClients()[0] dcB := env.AllClients()[1] - // Prepare whitelist. - wl, delWhitelist := tempWhitelist(t) - require.NoError(t, wl.Add(dcA.LocalPK())) - require.NoError(t, wl.Add(dcB.LocalPK())) - + // Prepare whitelists. + wlA, delWhitelistA := tempWhitelist(t, dcA) + wlB, delWhitelistB := tempWhitelist(t, dcB) + require.NoError(t, wlB.Add(dcA.LocalPK())) + require.NoError(t, wlA.Add(dcB.LocalPK())) t.Run("serveConn_whitelist", func(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) connH, connC := net.Pipe() - - host := NewHost(dcA, wl) + host := NewHost(dcA, wlA) hMux := cliEndpoints(host) go host.serveConn(ctx, logging.MustGetLogger("host_conn"), &hMux, connH) - wlC, err := NewWhitelistClient(connC) + wlCli, err := NewWhitelistClient(connC) require.NoError(t, err) - checkWhitelist(t, wlC, 2, 10) + checkWhitelist(t, wlCli, 1, 10) // Closing logic. cancel() @@ -43,7 +62,7 @@ func TestHost(t *testing.T) { connH, connC := net.Pipe() - host := NewHost(dcA, wl) + host := NewHost(dcA, wlA) hMux := cliEndpoints(host) go host.serveConn(ctx, logging.MustGetLogger("host_conn"), &hMux, connH) @@ -63,14 +82,14 @@ func TestHost(t *testing.T) { connB, connCLI := net.Pipe() - hostA := NewHost(dcA, wl) + hostA := NewHost(dcA, wlA) errA := make(chan error, 1) go func() { errA <- hostA.ListenAndServe(ctx, port) close(errA) }() - hostB := NewHost(dcB, wl) + hostB := NewHost(dcB, wlB) hBMux := cliEndpoints(hostB) go hostB.serveConn(ctx, logging.MustGetLogger("hostB_conn"), &hBMux, connB) @@ -92,14 +111,14 @@ func TestHost(t *testing.T) { cliL, err := nettest.NewLocalListener("tcp") require.NoError(t, err) - hostA := NewHost(dcA, wl) + hostA := NewHost(dcA, wlA) errA := make(chan error, 1) go func() { errA <- hostA.ListenAndServe(ctx, port) close(errA) }() - hostB := NewHost(dcB, wl) + hostB := NewHost(dcB, wlB) errB := make(chan error, 1) go func() { errB <- hostB.ServeCLI(ctx, cliL) @@ -113,10 +132,10 @@ func TestHost(t *testing.T) { } t.Run("endpoint_whitelist", func(t *testing.T) { - wlC, err := cliB.WhitelistClient() + wlCli, err := cliB.WhitelistClient() require.NoError(t, err) - checkWhitelist(t, wlC, 2, 10) + checkWhitelist(t, wlCli, 1, 10) }) t.Run("endpoint_pty", func(t *testing.T) { @@ -152,11 +171,12 @@ func TestHost(t *testing.T) { dcC, err := env.NewClient(&defaultConf) require.NoError(t, err) + wlC, delWhitelistC := tempWhitelist(t, dcC) lisC, err := nettest.NewLocalListener("tcp") require.NoError(t, err) ctx, cancel := context.WithCancel(ctx) - hostC := NewHost(dcC, wl) + hostC := NewHost(dcC, wlC) cErr := make(chan error, 1) go func() { cErr <- hostC.ServeCLI(ctx, lisC) @@ -177,6 +197,7 @@ func TestHost(t *testing.T) { // Closing logic. cancel() + delWhitelistC() require.NoError(t, <-cErr) }) @@ -187,18 +208,24 @@ func TestHost(t *testing.T) { }) // Closing logic. - delWhitelist() + delWhitelistA() + delWhitelistB() env.Shutdown() } -func tempWhitelist(t *testing.T) (Whitelist, func()) { +func tempWhitelist(t *testing.T, c *dmsg.Client) (Whitelist, func()) { f, err := ioutil.TempFile(os.TempDir(), "") require.NoError(t, err) fName := f.Name() require.NoError(t, f.Close()) - wl, err := NewJSONFileWhiteList(fName) + conf := getConfig(c) + err = WriteConfig(conf, fName) + require.NoError(t, err) + + t.Log(fName) + wl, err := NewConfigWhitelist(fName) require.NoError(t, err) return wl, func() { @@ -218,27 +245,41 @@ func checkPty(t *testing.T, ptyC *PtyClient, msg string) { require.NoError(t, ptyC.Stop()) } -func checkWhitelist(t *testing.T, wlC *WhitelistClient, initN, rounds int) { - pks, err := wlC.ViewWhitelist() +func checkWhitelist(t *testing.T, wlCli *WhitelistClient, initN, rounds int) { + pks, err := wlCli.ViewWhitelist() require.NoError(t, err) require.Len(t, pks, initN) newPKS := make([]cipher.PubKey, rounds) for i := 0; i < rounds; i++ { pk, _ := cipher.GenerateKeyPair() - require.NoError(t, wlC.WhitelistAdd(pk), i) + require.NoError(t, wlCli.WhitelistAdd(pk), i) newPKS[i] = pk - pks, err := wlC.ViewWhitelist() + pks, err := wlCli.ViewWhitelist() require.NoError(t, err) require.Len(t, pks, initN+i+1) } for i, newPK := range newPKS { - require.NoError(t, wlC.WhitelistRemove(newPK)) + require.NoError(t, wlCli.WhitelistRemove(newPK)) - pks, err := wlC.ViewWhitelist() + pks, err := wlCli.ViewWhitelist() require.NoError(t, err) require.Len(t, pks, initN+len(newPKS)-i-1) } } -*/ + +func getConfig(c *dmsg.Client) Config { + conf := DefaultConfig() + conf.SK = c.LocalSK().Hex() + conf.PK = c.LocalPK().Hex() + return conf +} + +// NewHost creates a new dmsgpty.Host with a given dmsg.Client and whitelist. +// func ewHost(dmsgC *dmsg.Client, wl Whitelist) *Host { +// host := new(Host) +// host.dmsgC = dmsgC +// host.wl = wl +// return host +// } diff --git a/dmsgpty/ui.go b/dmsgpty/ui.go index 5315a615b..bf1b5d4a1 100644 --- a/dmsgpty/ui.go +++ b/dmsgpty/ui.go @@ -156,6 +156,8 @@ func (ui *UI) Handler() http.HandlerFunc { defer func() { log.WithError(ptyC.Close()).Debug("Closed ptyC.") }() if err := ptyC.StartWithSize(ui.conf.CmdName, ui.conf.CmdArgs, &pty.Winsize{Rows: wsRows, Cols: wsCols}); err != nil { + log.Print("xxxx") + writeWSError(log, wsConn, err) return } diff --git a/dmsgpty/whitelist.go b/dmsgpty/whitelist.go index c679d31a6..027cd27c3 100644 --- a/dmsgpty/whitelist.go +++ b/dmsgpty/whitelist.go @@ -1,9 +1,14 @@ package dmsgpty import ( + "errors" "fmt" + "io/fs" + "io/ioutil" + "log" "os" "path/filepath" + "strings" "sync" jsoniter "github.com/json-iterator/go" @@ -11,7 +16,10 @@ import ( "github.com/skycoin/dmsg/cipher" ) -var json = jsoniter.ConfigFastest +var ( + json = jsoniter.ConfigFastest + wl cipher.PubKeys +) // Whitelist represents a whitelist of public keys. type Whitelist interface { @@ -21,93 +29,182 @@ type Whitelist interface { Remove(pks ...cipher.PubKey) error } -// NewJSONFileWhiteList creates a JSON file implementation of a whitelist. -func NewJSONFileWhiteList(fileName string) (Whitelist, error) { - fileName, err := filepath.Abs(fileName) +// conf to update whitelists +var conf = Config{} + +// NewConfigWhitelist creates a config file implementation of a whitelist. +func NewConfigWhitelist(confPath string) (Whitelist, error) { + confPath, err := filepath.Abs(confPath) if err != nil { return nil, err } - - if err := os.MkdirAll(filepath.Dir(fileName), 0750); err != nil { + if err = os.MkdirAll(filepath.Dir(confPath), 0750); err != nil { return nil, err } - return &jsonFileWhitelist{fileName: fileName}, nil + return &configWhitelist{confPath: confPath}, nil } -type jsonFileWhitelist struct { - fileName string +type configWhitelist struct { + confPath string } -func (w *jsonFileWhitelist) Get(pk cipher.PubKey) (bool, error) { +func (w *configWhitelist) Get(pk cipher.PubKey) (bool, error) { var ok bool - err := w.open(os.O_RDONLY|os.O_CREATE, func(pkMap map[cipher.PubKey]bool, _ *os.File) error { - ok = pkMap[pk] - return nil - }) - return ok, jsonFileErr(err) + err := w.open() + if err != nil { + return ok, err + } + for _, k := range wl { + if k == pk { + ok = true + } + } + return ok, nil } -func (w *jsonFileWhitelist) All() (map[cipher.PubKey]bool, error) { - var out map[cipher.PubKey]bool - err := w.open(os.O_RDONLY|os.O_CREATE, func(pkMap map[cipher.PubKey]bool, _ *os.File) error { - out = pkMap - return nil - }) - return out, jsonFileErr(err) +func (w *configWhitelist) All() (map[cipher.PubKey]bool, error) { + err := w.open() + if err != nil { + return nil, err + } + out := make(map[cipher.PubKey]bool) + for _, k := range wl { + out[k] = true + } + return out, nil } -func (w *jsonFileWhitelist) Add(pks ...cipher.PubKey) error { - return jsonFileErr(w.open(os.O_RDWR|os.O_CREATE, func(pkMap map[cipher.PubKey]bool, f *os.File) error { - for _, pk := range pks { - pkMap[pk] = true +func (w *configWhitelist) Add(pks ...cipher.PubKey) error { + err := w.open() + if err != nil { + return err + } + // duplicate flag + var dFlag bool + + // append new pks to the whitelist slice within the config file + // for each pk to be added + var pke []string + for _, k := range pks { + + dFlag = false + // check if the pk already exists + for _, p := range wl { + + // if it does + if p == k { + // flag it + dFlag = true + pke = append(pke, p.String()) + fmt.Printf("skipping append for %v. Already exists", k) + break + } } - return json.NewEncoder(f).Encode(pkMap) - })) -} -func (w *jsonFileWhitelist) Remove(pks ...cipher.PubKey) error { - return jsonFileErr(w.open(os.O_RDWR|os.O_CREATE, func(pkMap map[cipher.PubKey]bool, f *os.File) error { - for _, pk := range pks { - delete(pkMap, pk) + // if pk does already not exist + if !dFlag { + // append it + wl = append(wl, k) + conf.WL = append(conf.WL, k.Hex()) } - return json.NewEncoder(f).Encode(pkMap) - })) + + } + + // write the changes back to the config file + err = updateFile(w.confPath) + if err != nil { + log.Println("unable to update config file") + return err + } + if len(pke) != 0 { + return errors.New("skipping append for " + strings.Join(pke, ",") + ". Already exists") + } + return nil } -func (w *jsonFileWhitelist) open(perm int, fn func(pkMap map[cipher.PubKey]bool, f *os.File) error) error { - f, err := os.OpenFile(w.fileName, perm, 0600) // nolint:gosec +func (w *configWhitelist) Remove(pks ...cipher.PubKey) error { + err := w.open() + if err != nil { + return err + } + + // for each pubkey to be removed + for _, k := range pks { + + // find occurrence of pubkey in config whitelist + for i := 0; i < len(wl); i++ { + + // if an occurrence is found + if k == wl[i] { + // remove element + wl = append(wl[:i], wl[i+1:]...) + conf.WL = append(conf.WL[:i], conf.WL[i+1:]...) + break + } + } + } + // write changes back to the config file + err = updateFile(w.confPath) if err != nil { + log.Println("unable to update config file") return err } - defer func() { _ = f.Close() }() //nolint:errcheck + return nil +} - // get file size - info, err := f.Stat() +func (w *configWhitelist) open() error { + info, err := os.Stat(w.confPath) if err != nil { + if errors.Is(err, fs.ErrNotExist) { + _, err = os.Create(w.confPath) + if err != nil { + return err + } + } return err } - // read public key map from file - pks := make(map[cipher.PubKey]bool) - if info.Size() > 0 { - if err := json.NewDecoder(f).Decode(&pks); err != nil { + if info.Size() == 0 { + if err = updateFile(w.confPath); err != nil { return err } } - // seek back to start of file - if _, err := f.Seek(0, 0); err != nil { + // read file using ioutil + file, err := ioutil.ReadFile(w.confPath) + if err != nil { return err } - - return fn(pks, f) + // store config.json into conf to manipulate whitelists + err = json.Unmarshal(file, &conf) + if err != nil { + return err + } + // convert []string to cipher.PubKeys + if len(conf.WL) > 0 { + ustString := strings.Join(conf.WL, ",") + if err := wl.Set(ustString); err != nil { + return err + } + } + return nil } -func jsonFileErr(err error) error { +// updateFile writes changes to config file +func updateFile(confPath string) error { + + // marshal content + b, err := json.MarshalIndent(conf, "", " ") + if err != nil { + return err + } + // write to config file + err = ioutil.WriteFile(confPath, b, 0600) if err != nil { - return fmt.Errorf("json file whitelist: %v", err) + return err } + return nil } @@ -157,78 +254,3 @@ func (w *memoryWhitelist) Remove(pks ...cipher.PubKey) error { w.mux.Unlock() return nil } - -// NewCombinedWhitelist returns a combined whitelist. -// 'modI' defines the index of the internal whitelist in which Add and Remove operations are performed on. -// If 'modI < 0', Add and Remove operations are performed on all internal whitelists. -func NewCombinedWhitelist(modI int, lists ...Whitelist) Whitelist { - if modI >= len(lists) { - panic(fmt.Errorf("NewCombinedWhitelist: modI > len(lists)")) - } - return &combinedWhitelist{ - modI: modI, - lists: lists, - } -} - -type combinedWhitelist struct { - modI int - lists []Whitelist -} - -func (w *combinedWhitelist) Get(pk cipher.PubKey) (bool, error) { - for _, list := range w.lists { - ok, err := list.Get(pk) - if err != nil { - return false, err - } - if ok { - return true, nil - } - } - return false, nil -} - -func (w *combinedWhitelist) All() (map[cipher.PubKey]bool, error) { - all := make(map[cipher.PubKey]bool) - for _, list := range w.lists { - pks, err := list.All() - if err != nil { - return nil, err - } - for pk, ok := range pks { - if ok { - all[pk] = ok - } - } - } - return all, nil -} - -func (w *combinedWhitelist) Add(pks ...cipher.PubKey) error { - // Add to all internal whitelists if modI < 0 - if w.modI < 0 { - for _, list := range w.lists { - if err := list.Add(pks...); err != nil { - return err - } - } - return nil - } - // Otherwise, add to the specified internal whitelist at index. - return w.lists[w.modI].Add(pks...) -} - -func (w *combinedWhitelist) Remove(pks ...cipher.PubKey) error { - // Remove from all internal whitelists if modI < 0 - if w.modI < 0 { - for _, list := range w.lists { - if err := list.Remove(pks...); err != nil { - return err - } - } - return nil - } - // Otherwise, remove from the specified internal whitelist at index. - return w.lists[w.modI].Remove(pks...) -} diff --git a/docker/images/dmsg-discovery/Dockerfile b/docker/images/dmsg-discovery/Dockerfile index 7e2234eb6..2b37d2a41 100755 --- a/docker/images/dmsg-discovery/Dockerfile +++ b/docker/images/dmsg-discovery/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine AS builder +FROM golang:1.16-alpine AS builder ARG CGO_ENABLED=0 ENV CGO_ENABLED=${CGO_ENABLED} \ @@ -7,14 +7,17 @@ ENV CGO_ENABLED=${CGO_ENABLED} \ GO111MODULE=on COPY . /dmsg - WORKDIR /dmsg # Build dmsg discovery -RUN go build -o /release/dmsg-discovery ./cmd/dmsg-discovery +RUN apk add --no-cache make bash git && \ + make build-deploy # Build image -FROM scratch +FROM alpine:latest COPY --from=builder /release/dmsg-discovery /usr/local/bin/dmsg-discovery + +EXPOSE 9090 +STOPSIGNAL SIGINT ENTRYPOINT [ "dmsg-discovery" ] diff --git a/docker/images/dmsg-server/Dockerfile b/docker/images/dmsg-server/Dockerfile index 6257f1cbb..58fc5a1af 100755 --- a/docker/images/dmsg-server/Dockerfile +++ b/docker/images/dmsg-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine AS builder +FROM golang:1.16-alpine AS builder ARG CGO_ENABLED=0 ENV CGO_ENABLED=${CGO_ENABLED} \ @@ -7,14 +7,17 @@ ENV CGO_ENABLED=${CGO_ENABLED} \ GO111MODULE=on COPY . /dmsg - WORKDIR /dmsg # Build dmsg server -RUN go build -o /release/dmsg-server ./cmd/dmsg-server +RUN apk add --no-cache bash make git && \ + make build-deploy # Build image -FROM scratch +FROM alpine:latest COPY --from=builder /release/dmsg-server /usr/local/bin/dmsg-server -ENTRYPOINT [ "dmsg-server" ] + +STOPSIGNAL SIGINT + +ENTRYPOINT [ "sh", "-c", "dmsg-server" ] diff --git a/docker/images/dmsg-server/entrypoint.sh b/docker/images/dmsg-server/entrypoint.sh new file mode 100755 index 000000000..b1aafb920 --- /dev/null +++ b/docker/images/dmsg-server/entrypoint.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env sh + +dmsg-server "$@" diff --git a/docker/scripts/docker-push.sh b/docker/scripts/docker-push.sh new file mode 100755 index 000000000..5cd8ec8f6 --- /dev/null +++ b/docker/scripts/docker-push.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +function print_usage() { + echo "Use: $0 [-t ] [-p | -b]" + echo "use -p for push (it builds and push the image)" + echo "use -b for build image locally" +} + +function docker_build() { + docker image build \ + --tag=skycoinpro/dmsg-server:"$tag" \ + -f ./docker/images/dmsg-server/Dockerfile . + + docker image build \ + --tag=skycoinpro/dmsg-discovery:"$tag" \ + -f ./docker/images/dmsg-discovery/Dockerfile . +} + +function docker_push() { + docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" + docker tag skycoinpro/dmsg-server:"$tag" skycoinpro/dmsg-server:"$tag" + docker tag skycoinpro/dmsg-discovery:"$tag" skycoinpro/dmsg-discovery:"$tag" + docker image push skycoinpro/dmsg-server:"$tag" + docker image push skycoinpro/dmsg-discovery:"$tag" +} + +while getopts ":t:pb" o; do + case "${o}" in + t) + tag="$(echo "${OPTARG}" | tr -d '[:space:]')" + if [[ $tag == "develop" ]]; then + tag="test" + elif [[ $tag == "master" ]]; then + tag="latest" + fi + ;; + p) + docker_build + docker_push + ;; + b) + docker_build + ;; + *) + print_usage + ;; + esac +done diff --git a/docs/dmsgpty.md b/docs/dmsgpty.md new file mode 100644 index 000000000..86dda19a7 --- /dev/null +++ b/docs/dmsgpty.md @@ -0,0 +1,217 @@ +# Dmsgpty +`dmsgpty` is a remote shell utility over `dmsg` (similar concept to SSH) to connect to the servers hosted over the `dmsg` network. + +``` +$ ./bin/dmsgpty-host --help + runs a standalone dmsgpty-host instance + + Usage: + dmsgpty-host [flags] + dmsgpty-host [command] + + Available Commands: + confgen generates config file + help Help about any command + + Flags: + --cliaddr string address used for listening for cli connections (default "/tmp/dmsgpty.sock") + --clinet string network used for listening for cli connections (default "unix") + -c, --confpath string config path (default "./config.json") + --confstdin config will be read from stdin if set + --dmsgdisc string dmsg discovery address (default "http://dmsg.discovery.skywire.skycoin.com") + --dmsgport uint16 dmsg port for listening for remote hosts (default 22) + --dmsgsessions int minimum number of dmsg sessions to ensure (default 1) + --envprefix string env prefix (default "DMSGPTY") + -h, --help help for dmsgpty-host + --sk cipher.SecKey secret key of the dmsgpty-host (default 0000000000000000000000000000000000000000000000000000000000000000) + --wl cipher.PubKeys whitelist of the dmsgpty-host (default public keys: + ) + + Use "dmsgpty-host [command] --help" for more information about a command. +``` + +``` +$ ./bin/dmsgpty-cli --help + Run commands over dmsg + + Usage: + dmsgpty-cli [flags] + dmsgpty-cli [command] + + Available Commands: + help Help about any command + whitelist lists all whitelisted public keys + whitelist-add adds public key(s) to the whitelist + whitelist-remove removes public key(s) from the whitelist + + Flags: + --addr dmsg.Addr remote dmsg address of format 'pk:port'. If unspecified, the pty will start locally (default 000000000000000000000000000000000000000000000000000000000000000000:~) + -a, --args strings command arguments + --cliaddr string address to use for dialing to dmsgpty-host (default "/tmp/dmsgpty.sock") + --clinet string network to use for dialing to dmsgpty-host (default "unix") + -c, --cmd string name of command to run (default "/bin/bash") + --confpath string config path (default "config.json") + -h, --help help for dmsgpty-cli + + Use "dmsgpty-cli [command] --help" for more information about a command. + +``` + +## Example usage +In this example, we will use the `dmsg` network where the `dmsg.Discovery` address is `http://dmsg.discovery.skywire.skycoin.com`. However, any `dmsg.Discovery` would work. + +### Example 1 +Setting up remote command execution over dmsg. + +First, let's generate a config file for the dmsgpty-host. + +```shell script +// Generate config file +$ ./bin/dmsgpty-host confgen +``` +Config file will be generated. +```JSON +{ + "dmsgdisc": "http://dmsg.discovery.skywire.skycoin.com", + "dmsgsessions": 1, + "dmsgport": 22, + "clinet": "unix", + "cliaddr": "/tmp/dmsgpty.sock", + "sk": "8770be1ae64aa22a6d442086dc5870339a4d402c10e30499fa8a53d34413d412", + "pk": "03d3d3744f7d6a943b3d467fce8477ccc580b7568160346b8d8bbd95e343ad6be4", + "wl": null +} +``` +To start the `dmsgpty-host` simply run + +```shell script +$ ./bin/dmsgpty-host +``` +To interact with this host use `dmsgpty-cli`.
+`dmsgpty-cli` can be used to view, add or remove whitelist. +To view the whitelist run the following in a new terminal. +```shell script +$ ./bin/dmsgpty-cli whitelist +``` + +To add a whitelist use the following command with a Public key of a node you want to whitelist. +```shell script +$ ./bin/dmsgpty-cli whitelist-add 0278a4adc9071c695992d27123c5be7075abe369b1ef6cb4ee2716ac9151843d00 +``` + +To remove a whitelist use the following command with a Public key of a node you want to remove. +```shell script +$ ./bin/dmsgpty-cli whitelist-remove 0278a4adc9071c695992d27123c5be7075abe369b1ef6cb4ee2716ac9151843d00 +``` + +To start the `dmsgpty-ui` simply run + +```shell script +$ ./bin/dmsgpty-ui +``` + +And open the browser at http://127.0.0.1:8080/ + +### Example 2 +Connect two remote machines with each other via DMSGPTY +#### Step 1: Config Generation +First, lets generate a config file for the `dmsgpty-host` both machines if its not already generated. +```shell script +// Generate config file +host-1&2$ ./bin/dmsgpty-host confgen +``` +Config file will be generated for the `dmsgpty-host`. +`dmsgpty-host 1` +```JSON +{ + "dmsgdisc": "http://dmsg.discovery.skywire.skycoin.com", + "dmsgsessions": 1, + "dmsgport": 22, + "clinet": "unix", + "cliaddr": "/tmp/dmsgpty.sock", + "sk": "8770be1ae64aa22a6d442086dc5870339a4d402c10e30499fa8a53d34413d412", + "pk": "03d3d3744f7d6a943b3d467fce8477ccc580b7568160346b8d8bbd95e343ad6be4", + "wl": null +} +``` +`dmsgpty-host 2` +```JSON +{ + "dmsgdisc": "http://dmsg.discovery.skywire.skycoin.com", + "dmsgsessions": 1, + "dmsgport": 22, + "clinet": "unix", + "cliaddr": "/tmp/dmsgpty.sock", + "sk": "76cc80ea9dcc8cbbb54d5463cea8797dd4ed27693daf176878a8d0929a4466d3", + "pk": "024e804f8e8fc3c4fc8562a5e58c4897323e527dace63ec36badfb66b65d4606d7", + "wl": null +} +``` + +#### Step 2: Start `dmsgpty-host` + +To start the `dmsgpty-host` on both machines simply run +```shell script +host-1&2$ ./bin/dmsgpty-host +``` + +#### Step 3: Start whitelist with `dmsgpty-cli` +To interact with the hosts, use `dmsgpty-cli` in a new terminal.
+`dmsgpty-cli` can be used to view, add or remove whitelist. + +Now whitelist the Public key of `dmsgpty-host 1` ON `dmsgpty-host 2`.
+So that `dmsgpty-host 2` will accept connection request from `dmsgpty-host 1` +```shell script +host-2$ ./bin/dmsgpty-cli whitelist-add 03d3d3744f7d6a943b3d467fce8477ccc580b7568160346b8d8bbd95e343ad6be4 +``` + +#### Step 4: Connect to dmsgpty-host 2 from dmsgpty-host 1 +Now connect to the shell of `dmsgpty-host 2` FROM `dmsgpty-host 1` run +```shell script +host-1$ ./bin/dmsgpty-cli --addr 024e804f8e8fc3c4fc8562a5e58c4897323e527dace63ec36badfb66b65d4606d7 +``` + +To exit from the shell of `dmsgpty-host 2` simply run +```shell script +$ exit +``` + +`If you are using the same machine for both hosts, there are some changes for the second host.` +#### Step 1: Config Generation +We are changing the cliaddress of `dmsgpty-host 2` since both the hosts are on the same machine, and the same cliaddr will clash. +```shell script +// Generate config file +host-2$ ./bin/dmsgpty-host confgen config2.json --cliaddr /tmp/dmsgpty2.sock +``` +Config file will be generated for the `dmsgpty-host 2`. +```JSON +{ + "dmsgdisc": "http://dmsg.discovery.skywire.skycoin.com", + "dmsgsessions": 1, + "dmsgport": 22, + "clinet": "unix", + "cliaddr": "/tmp/dmsgpty2.sock", + "sk": "76cc80ea9dcc8cbbb54d5463cea8797dd4ed27693daf176878a8d0929a4466d3", + "pk": "024e804f8e8fc3c4fc8562a5e58c4897323e527dace63ec36badfb66b65d4606d7", + "wl": null +} +``` + +#### Step 2: Start `dmsgpty-host` +To start the `dmsgpty-host 2`, simply run the following in a new terminal. +```shell script +host-2$ ./bin/dmsgpty-host -c ./config2.json +``` + +#### Step 3: Start whitelist with `dmsgpty-cli` +To interact with the hosts, use `dmsgpty-cli` in a new terminal.
+`dmsgpty-cli` can be used to view, add or remove whitelist. + +Now whitelist the Public key of `dmsgpty-host 1` in `dmsgpty-host 2`.
+So that `dmsgpty-host 2` will accept connection request from `dmsgpty-host 1` +```shell script +host-2$ ./bin/dmsgpty-cli whitelist-add 03d3d3744f7d6a943b3d467fce8477ccc580b7568160346b8d8bbd95e343ad6be4 --cliaddr /tmp/dmsgpty2.sock +``` + +#### Step 4: +Same as above \ No newline at end of file diff --git a/docs/dockerized.md b/docs/dockerized.md new file mode 100644 index 000000000..126c4c76f --- /dev/null +++ b/docs/dockerized.md @@ -0,0 +1,32 @@ +## Dockerized dmsg-server and dmsg-discovery + +### Requirements + +- Docker / Docker-CE +- bash or compatible shell +- redis (dockerized or not) + +### How to + +1. Clone this repository +2. Run this command to build `dmsg-server` and `dmsg-discovery` images +```bash +$ ./docker/scripts/docker-push.sh -t develop -b +``` +3. Create a new docker network +```bash +$ docker network create -d bridge br-dmsg0 +``` +4. Run redis +```bash +$ docker run --network="br-dmsg0" --rm --name=redis -d -p 6379:6379 redis:alpine +``` +5. Run `dmsg-discovery` and `dmsg-server` +```bash +$ docker run --rm --network="br-dmsg0" --name=dmsg-discovery skycoinpro/dmsg-discovery:test --redis redis://redis:6379 +# Run dmsg-server with default config (default points to production server) +$ docker run --network="br-dmsg0" --rm --name=dmsg-server skycoinpro/dmsg-server:test +# or run it with your own config +$ docker run -v :/etc/dmsg --network="br-dmsg0" --rm --name=dmsg-server \ + skycoinpro/dmsg-server:test / +``` diff --git a/fsutil/fsutil.go b/fsutil/fsutil.go new file mode 100644 index 000000000..afdac52d2 --- /dev/null +++ b/fsutil/fsutil.go @@ -0,0 +1,17 @@ +package fsutil + +import ( + "os" +) + +// Exists checks if file exists at `path`. +func Exists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} diff --git a/go.mod b/go.mod index 6cd89cfde..665bfaa5b 100644 --- a/go.mod +++ b/go.mod @@ -1,37 +1,31 @@ module github.com/skycoin/dmsg -go 1.14 +go 1.16 require ( github.com/VictoriaMetrics/metrics v1.12.3 - github.com/creack/pty v1.1.9 + github.com/creack/pty v1.1.10 github.com/go-chi/chi v4.1.2+incompatible - github.com/go-redis/redis v6.15.6+incompatible + github.com/go-redis/redis v6.15.8+incompatible + github.com/google/go-cmp v0.5.5 // indirect github.com/json-iterator/go v1.1.10 - github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect - github.com/kr/pretty v0.2.0 // indirect - github.com/kz/discordrus v1.2.0 - github.com/mattn/go-colorable v0.1.6 // indirect + github.com/klauspost/compress v1.11.0 // indirect + github.com/kr/pretty v0.2.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mattn/go-colorable v0.1.8 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect - github.com/onsi/ginkgo v1.12.0 // indirect - github.com/onsi/gomega v1.9.0 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/onsi/ginkgo v1.15.0 // indirect + github.com/onsi/gomega v1.10.5 // indirect github.com/pires/go-proxyproto v0.3.3 - github.com/sirupsen/logrus v1.4.2 + github.com/sirupsen/logrus v1.8.1 github.com/skycoin/noise v0.0.0-20180327030543-2492fe189ae6 github.com/skycoin/skycoin v0.26.0 github.com/skycoin/yamux v0.0.0-20200803175205-571ceb89da9f - github.com/spf13/cast v1.3.0 github.com/spf13/cobra v0.0.5 - github.com/spf13/pflag v1.0.3 - github.com/spf13/viper v1.6.2 github.com/stretchr/testify v1.4.0 - golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 - golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 - golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 // indirect - golang.org/x/text v0.3.2 // indirect + golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb + golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect - gopkg.in/ini.v1 v1.51.1 // indirect nhooyr.io/websocket v1.8.2 ) diff --git a/go.sum b/go.sum index 4e54f5676..ac9db4222 100644 --- a/go.sum +++ b/go.sum @@ -1,262 +1,183 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/VictoriaMetrics/metrics v1.12.3 h1:Fe6JHC6MSEKa+BtLhPN8WIvS+HKPzMc2evEpNeCGy7I= github.com/VictoriaMetrics/metrics v1.12.3/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.10 h1:Xv3/hZlzZeTSMk5upBEt3iFdxWaPS3xYIm+BBySIqlY= +github.com/creack/pty v1.1.10/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-redis/redis v6.15.6+incompatible h1:H9evprGPLI8+ci7fxQx6WNZHJSb7be8FqJQRhdQZ5Sg= -github.com/go-redis/redis v6.15.6+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-redis/redis v6.15.8+incompatible h1:BKZuG6mCnRj5AOaWJXoCgf6rqTYnYJLe4en2hxT7r9o= +github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= +github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kz/discordrus v1.2.0 h1:r5uplKozPR+TIJ1NUZT758Lv7eukf8+fp3L4uRj+6xs= -github.com/kz/discordrus v1.2.0/go.mod h1:cJ3TiJUUuY5Gm3DNYHnnaUa3iol8VBRPzztAeZm7exc= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= +github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pires/go-proxyproto v0.3.3 h1:jOXGrsAfSQVFiD1hWg1aiHpLYsd6SJw/8cLN594sB7Q= github.com/pires/go-proxyproto v0.3.3/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/skycoin/noise v0.0.0-20180327030543-2492fe189ae6 h1:1Nc5EBY6pjfw1kwW0duwyG+7WliWz5u9kgk1h5MnLuA= github.com/skycoin/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:UXghlricA7J3aRD/k7p/zBObQfmBawwCxIVPVjz2Q3o= github.com/skycoin/skycoin v0.26.0 h1:xDxe2r8AclMntZ550Y/vUQgwgLtwrf9Wu5UYiYcN5/o= github.com/skycoin/skycoin v0.26.0/go.mod h1:78nHjQzd8KG0jJJVL/j0xMmrihXi70ti63fh8vXScJw= github.com/skycoin/yamux v0.0.0-20200803175205-571ceb89da9f h1:A5dEM1OE9YhN3LciZU9qPjo7fJ46JeHNi3JCroDkK0Y= github.com/skycoin/yamux v0.0.0-20200803175205-571ceb89da9f/go.mod h1:48cleOxgkiLbgv322LOg2Vrxtu180Mb8GG1HbuhmFYM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E= -github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI= github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= github.com/valyala/histogram v1.1.2 h1:vOk5VrGjMBIoPR5k6wA8vBaC8toeJ8XO0yfRjFEc1h8= github.com/valyala/histogram v1.1.2/go.mod h1:CZAr6gK9dbD7hYx2s8WSPh0p5x5wETjC+2b3PJVtEdg= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191204025024-5ee1b9f4859a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 h1:VqE9gduFZ4dbR7XoL77lHFp0/DyDUBKSXK7CMFkVcV0= +golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.1 h1:GyboHr4UqMiLUybYjd22ZjQIKEJEpgtLXtuGbR21Oho= -gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= nhooyr.io/websocket v1.8.2 h1:LwdzfyyOZKtVFoXay6A39Acu03KmidSZ3YUUvPa13PA= nhooyr.io/websocket v1.8.2/go.mod h1:LiqdCg1Cu7TPWxEvPjPa0TGYxCsy4pHNTN9gGluwBpQ= diff --git a/integration/README.md b/integration/README.md index a0c0f8efa..790fb08ed 100644 --- a/integration/README.md +++ b/integration/README.md @@ -6,15 +6,19 @@ ```bash $ make build ``` -2. Ensure `redis` is running and listening on port 6379. +2. Ensure `redis-server` is installed on the system. If not alredy installed install it. (e.g. for Linux) ```bash - $ redis-server + $ sudo apt install redis-server ``` -3. Start `dmsg-discovery`. +3. Ensure `redis` is running and listening on port 6379. ```bash - $ ./bin/dmsg-discovery + $ redis-server ``` -4. Start `dmsg-server`. +4. Start `dmsg-discovery` in testing mode. + ```bash + $ ./bin/dmsg-discovery -t + ``` +5. Start `dmsg-server`. ```bash $ ./bin/dmsg-server ./integration/configs/dmsgserver1.json ``` diff --git a/integration/env.sh b/integration/env.sh index 807e84677..81c8ea810 100644 --- a/integration/env.sh +++ b/integration/env.sh @@ -173,7 +173,7 @@ function init_dmsg() { func_print "Running ${DMSG_SRV2}..." tmux send-keys -t ${DMSG_SRV2} './bin/dmsg-server ./integration/configs/dmsgserver2.json' C-m catch_ec $? - + sleep 1 func_print "${DMSG} session started successfully." tmux select-window -t bash } diff --git a/vendor/github.com/VictoriaMetrics/metrics/LICENSE b/vendor/github.com/VictoriaMetrics/metrics/LICENSE new file mode 100644 index 000000000..539b7a4c0 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2019 VictoriaMetrics + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/VictoriaMetrics/metrics/README.md b/vendor/github.com/VictoriaMetrics/metrics/README.md new file mode 100644 index 000000000..4f1283abb --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/README.md @@ -0,0 +1,104 @@ +[![Build Status](https://github.com/VictoriaMetrics/metrics/workflows/main/badge.svg)](https://github.com/VictoriaMetrics/metrics/actions) +[![GoDoc](https://godoc.org/github.com/VictoriaMetrics/metrics?status.svg)](http://godoc.org/github.com/VictoriaMetrics/metrics) +[![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/metrics)](https://goreportcard.com/report/github.com/VictoriaMetrics/metrics) +[![codecov](https://codecov.io/gh/VictoriaMetrics/metrics/branch/master/graph/badge.svg)](https://codecov.io/gh/VictoriaMetrics/metrics) + + +# metrics - lightweight package for exporting metrics in Prometheus format + + +### Features + +* Lightweight. Has minimal number of third-party dependencies and all these deps are small. + See [this article](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d) for details. +* Easy to use. See the [API docs](http://godoc.org/github.com/VictoriaMetrics/metrics). +* Fast. +* Allows exporting distinct metric sets via distinct endpoints. See [Set](http://godoc.org/github.com/VictoriaMetrics/metrics#Set). +* Supports [easy-to-use histograms](http://godoc.org/github.com/VictoriaMetrics/metrics#Histogram), which just work without any tuning. + Read more about VictoriaMetrics histograms at [this article](https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). + + +### Limitations + +* It doesn't implement advanced functionality from [github.com/prometheus/client_golang](https://godoc.org/github.com/prometheus/client_golang). + + +### Usage + +```go +import "github.com/VictoriaMetrics/metrics" + +// Register various time series. +// Time series name may contain labels in Prometheus format - see below. +var ( + // Register counter without labels. + requestsTotal = metrics.NewCounter("requests_total") + + // Register summary with a single label. + requestDuration = metrics.NewSummary(`requests_duration_seconds{path="/foobar/baz"}`) + + // Register gauge with two labels. + queueSize = metrics.NewGauge(`queue_size{queue="foobar",topic="baz"}`, func() float64 { + return float64(foobarQueue.Len()) + }) + + // Register histogram with a single label. + responseSize = metrics.NewHistogram(`response_size{path="/foo/bar"}`) +) + +// ... +func requestHandler() { + // Increment requestTotal counter. + requestsTotal.Inc() + + startTime := time.Now() + processRequest() + // Update requestDuration summary. + requestDuration.UpdateDuration(startTime) + + // Update responseSize histogram. + responseSize.Update(responseSize) +} + +// Expose the registered metrics at `/metrics` path. +http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { + metrics.WritePrometheus(w, true) +}) +``` + +See [docs](http://godoc.org/github.com/VictoriaMetrics/metrics) for more info. + + +### Users + +* `Metrics` has been extracted from [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) sources. + See [this article](https://medium.com/devopslinks/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac) + for more info about `VictoriaMetrics`. + + +### FAQ + +#### Why the `metrics` API isn't compatible with `github.com/prometheus/client_golang`? + +Because the `github.com/prometheus/client_golang` is too complex and is hard to use. + + +#### Why the `metrics.WritePrometheus` doesn't expose documentation for each metric? + +Because this documentation is ignored by Prometheus. The documentation is for users. +Just add comments in the source code or in other suitable place explaining each metric +exposed from your application. + + +#### How to implement [CounterVec](https://godoc.org/github.com/prometheus/client_golang/prometheus#CounterVec) in `metrics`? + +Just use [GetOrCreateCounter](http://godoc.org/github.com/VictoriaMetrics/metrics#GetOrCreateCounter) +instead of `CounterVec.With`. See [this example](https://godoc.org/github.com/VictoriaMetrics/metrics#example-Counter--Vec) for details. + + +#### Why [Histogram](http://godoc.org/github.com/VictoriaMetrics/metrics#Histogram) buckets contain `vmrange` labels instead of `le` labels like in Prometheus histograms? + +Buckets with `vmrange` labels occupy less disk space comparing to Promethes-style buckets with `le` labels, +because `vmrange` buckets don't include counters for the previous ranges. [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) provides `prometheus_buckets` +function, which converts `vmrange` buckets to Prometheus-style buckets with `le` labels. This is useful for building heatmaps in Grafana. +Additionally, its' `histogram_quantile` function transparently handles histogram buckets with `vmrange` labels. diff --git a/vendor/github.com/VictoriaMetrics/metrics/counter.go b/vendor/github.com/VictoriaMetrics/metrics/counter.go new file mode 100644 index 000000000..a7d954923 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/counter.go @@ -0,0 +1,77 @@ +package metrics + +import ( + "fmt" + "io" + "sync/atomic" +) + +// NewCounter registers and returns new counter with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned counter is safe to use from concurrent goroutines. +func NewCounter(name string) *Counter { + return defaultSet.NewCounter(name) +} + +// Counter is a counter. +// +// It may be used as a gauge if Dec and Set are called. +type Counter struct { + n uint64 +} + +// Inc increments c. +func (c *Counter) Inc() { + atomic.AddUint64(&c.n, 1) +} + +// Dec decrements c. +func (c *Counter) Dec() { + atomic.AddUint64(&c.n, ^uint64(0)) +} + +// Add adds n to c. +func (c *Counter) Add(n int) { + atomic.AddUint64(&c.n, uint64(n)) +} + +// Get returns the current value for c. +func (c *Counter) Get() uint64 { + return atomic.LoadUint64(&c.n) +} + +// Set sets c value to n. +func (c *Counter) Set(n uint64) { + atomic.StoreUint64(&c.n, n) +} + +// marshalTo marshals c with the given prefix to w. +func (c *Counter) marshalTo(prefix string, w io.Writer) { + v := c.Get() + fmt.Fprintf(w, "%s %d\n", prefix, v) +} + +// GetOrCreateCounter returns registered counter with the given name +// or creates new counter if the registry doesn't contain counter with +// the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned counter is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewCounter instead of GetOrCreateCounter. +func GetOrCreateCounter(name string) *Counter { + return defaultSet.GetOrCreateCounter(name) +} diff --git a/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go b/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go new file mode 100644 index 000000000..d01dd851e --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go @@ -0,0 +1,82 @@ +package metrics + +import ( + "fmt" + "io" + "sync" +) + +// NewFloatCounter registers and returns new counter of float64 type with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned counter is safe to use from concurrent goroutines. +func NewFloatCounter(name string) *FloatCounter { + return defaultSet.NewFloatCounter(name) +} + +// FloatCounter is a float64 counter guarded by RWmutex. +// +// It may be used as a gauge if Add and Sub are called. +type FloatCounter struct { + mu sync.Mutex + n float64 +} + +// Add adds n to fc. +func (fc *FloatCounter) Add(n float64) { + fc.mu.Lock() + fc.n += n + fc.mu.Unlock() +} + +// Sub substracts n from fc. +func (fc *FloatCounter) Sub(n float64) { + fc.mu.Lock() + fc.n -= n + fc.mu.Unlock() +} + +// Get returns the current value for fc. +func (fc *FloatCounter) Get() float64 { + fc.mu.Lock() + n := fc.n + fc.mu.Unlock() + return n +} + +// Set sets fc value to n. +func (fc *FloatCounter) Set(n float64) { + fc.mu.Lock() + fc.n = n + fc.mu.Unlock() +} + +// marshalTo marshals fc with the given prefix to w. +func (fc *FloatCounter) marshalTo(prefix string, w io.Writer) { + v := fc.Get() + fmt.Fprintf(w, "%s %g\n", prefix, v) +} + +// GetOrCreateFloatCounter returns registered FloatCounter with the given name +// or creates new FloatCounter if the registry doesn't contain FloatCounter with +// the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned FloatCounter is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewFloatCounter instead of GetOrCreateFloatCounter. +func GetOrCreateFloatCounter(name string) *FloatCounter { + return defaultSet.GetOrCreateFloatCounter(name) +} diff --git a/vendor/github.com/VictoriaMetrics/metrics/gauge.go b/vendor/github.com/VictoriaMetrics/metrics/gauge.go new file mode 100644 index 000000000..05bf1473f --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/gauge.go @@ -0,0 +1,67 @@ +package metrics + +import ( + "fmt" + "io" +) + +// NewGauge registers and returns gauge with the given name, which calls f +// to obtain gauge value. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// f must be safe for concurrent calls. +// +// The returned gauge is safe to use from concurrent goroutines. +// +// See also FloatCounter for working with floating-point values. +func NewGauge(name string, f func() float64) *Gauge { + return defaultSet.NewGauge(name, f) +} + +// Gauge is a float64 gauge. +// +// See also Counter, which could be used as a gauge with Set and Dec calls. +type Gauge struct { + f func() float64 +} + +// Get returns the current value for g. +func (g *Gauge) Get() float64 { + return g.f() +} + +func (g *Gauge) marshalTo(prefix string, w io.Writer) { + v := g.f() + if float64(int64(v)) == v { + // Marshal integer values without scientific notation + fmt.Fprintf(w, "%s %d\n", prefix, int64(v)) + } else { + fmt.Fprintf(w, "%s %g\n", prefix, v) + } +} + +// GetOrCreateGauge returns registered gauge with the given name +// or creates new gauge if the registry doesn't contain gauge with +// the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned gauge is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewGauge instead of GetOrCreateGauge. +// +// See also FloatCounter for working with floating-point values. +func GetOrCreateGauge(name string, f func() float64) *Gauge { + return defaultSet.GetOrCreateGauge(name, f) +} diff --git a/vendor/github.com/VictoriaMetrics/metrics/go.mod b/vendor/github.com/VictoriaMetrics/metrics/go.mod new file mode 100644 index 000000000..a66c19bd3 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/go.mod @@ -0,0 +1,5 @@ +module github.com/VictoriaMetrics/metrics + +require github.com/valyala/histogram v1.1.2 + +go 1.12 diff --git a/vendor/github.com/VictoriaMetrics/metrics/go.sum b/vendor/github.com/VictoriaMetrics/metrics/go.sum new file mode 100644 index 000000000..b1219448d --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/go.sum @@ -0,0 +1,4 @@ +github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI= +github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= +github.com/valyala/histogram v1.1.2 h1:vOk5VrGjMBIoPR5k6wA8vBaC8toeJ8XO0yfRjFEc1h8= +github.com/valyala/histogram v1.1.2/go.mod h1:CZAr6gK9dbD7hYx2s8WSPh0p5x5wETjC+2b3PJVtEdg= diff --git a/vendor/github.com/VictoriaMetrics/metrics/go_metrics.go b/vendor/github.com/VictoriaMetrics/metrics/go_metrics.go new file mode 100644 index 000000000..f8b606731 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/go_metrics.go @@ -0,0 +1,64 @@ +package metrics + +import ( + "fmt" + "io" + "runtime" + + "github.com/valyala/histogram" +) + +func writeGoMetrics(w io.Writer) { + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + fmt.Fprintf(w, "go_memstats_alloc_bytes %d\n", ms.Alloc) + fmt.Fprintf(w, "go_memstats_alloc_bytes_total %d\n", ms.TotalAlloc) + fmt.Fprintf(w, "go_memstats_buck_hash_sys_bytes %d\n", ms.BuckHashSys) + fmt.Fprintf(w, "go_memstats_frees_total %d\n", ms.Frees) + fmt.Fprintf(w, "go_memstats_gc_cpu_fraction %g\n", ms.GCCPUFraction) + fmt.Fprintf(w, "go_memstats_gc_sys_bytes %d\n", ms.GCSys) + fmt.Fprintf(w, "go_memstats_heap_alloc_bytes %d\n", ms.HeapAlloc) + fmt.Fprintf(w, "go_memstats_heap_idle_bytes %d\n", ms.HeapIdle) + fmt.Fprintf(w, "go_memstats_heap_inuse_bytes %d\n", ms.HeapInuse) + fmt.Fprintf(w, "go_memstats_heap_objects %d\n", ms.HeapObjects) + fmt.Fprintf(w, "go_memstats_heap_released_bytes %d\n", ms.HeapReleased) + fmt.Fprintf(w, "go_memstats_heap_sys_bytes %d\n", ms.HeapSys) + fmt.Fprintf(w, "go_memstats_last_gc_time_seconds %g\n", float64(ms.LastGC)/1e9) + fmt.Fprintf(w, "go_memstats_lookups_total %d\n", ms.Lookups) + fmt.Fprintf(w, "go_memstats_mallocs_total %d\n", ms.Mallocs) + fmt.Fprintf(w, "go_memstats_mcache_inuse_bytes %d\n", ms.MCacheInuse) + fmt.Fprintf(w, "go_memstats_mcache_sys_bytes %d\n", ms.MCacheSys) + fmt.Fprintf(w, "go_memstats_mspan_inuse_bytes %d\n", ms.MSpanInuse) + fmt.Fprintf(w, "go_memstats_mspan_sys_bytes %d\n", ms.MSpanSys) + fmt.Fprintf(w, "go_memstats_next_gc_bytes %d\n", ms.NextGC) + fmt.Fprintf(w, "go_memstats_other_sys_bytes %d\n", ms.OtherSys) + fmt.Fprintf(w, "go_memstats_stack_inuse_bytes %d\n", ms.StackInuse) + fmt.Fprintf(w, "go_memstats_stack_sys_bytes %d\n", ms.StackSys) + fmt.Fprintf(w, "go_memstats_sys_bytes %d\n", ms.Sys) + + fmt.Fprintf(w, "go_cgo_calls_count %d\n", runtime.NumCgoCall()) + fmt.Fprintf(w, "go_cpu_count %d\n", runtime.NumCPU()) + + gcPauses := histogram.NewFast() + for _, pauseNs := range ms.PauseNs[:] { + gcPauses.Update(float64(pauseNs) / 1e9) + } + phis := []float64{0, 0.25, 0.5, 0.75, 1} + quantiles := make([]float64, 0, len(phis)) + for i, q := range gcPauses.Quantiles(quantiles[:0], phis) { + fmt.Fprintf(w, `go_gc_duration_seconds{quantile="%g"} %g`+"\n", phis[i], q) + } + fmt.Fprintf(w, `go_gc_duration_seconds_sum %g`+"\n", float64(ms.PauseTotalNs)/1e9) + fmt.Fprintf(w, `go_gc_duration_seconds_count %d`+"\n", ms.NumGC) + fmt.Fprintf(w, `go_gc_forced_count %d`+"\n", ms.NumForcedGC) + + fmt.Fprintf(w, `go_gomaxprocs %d`+"\n", runtime.GOMAXPROCS(0)) + fmt.Fprintf(w, `go_goroutines %d`+"\n", runtime.NumGoroutine()) + numThread, _ := runtime.ThreadCreateProfile(nil) + fmt.Fprintf(w, `go_threads %d`+"\n", numThread) + + // Export build details. + fmt.Fprintf(w, "go_info{version=%q} 1\n", runtime.Version()) + fmt.Fprintf(w, "go_info_ext{compiler=%q, GOARCH=%q, GOOS=%q, GOROOT=%q} 1\n", + runtime.Compiler, runtime.GOARCH, runtime.GOOS, runtime.GOROOT()) +} diff --git a/vendor/github.com/VictoriaMetrics/metrics/histogram.go b/vendor/github.com/VictoriaMetrics/metrics/histogram.go new file mode 100644 index 000000000..c1879996a --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/histogram.go @@ -0,0 +1,306 @@ +package metrics + +import ( + "fmt" + "io" + "math" + "sync" + "time" +) + +const ( + e10Min = -9 + e10Max = 18 + decimalMultiplier = 2 + bucketSize = 9 * decimalMultiplier + bucketsCount = e10Max - e10Min + decimalPrecision = 1e-12 +) + +// Histogram is a histogram for non-negative values with automatically created buckets. +// +// See https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350 +// +// Each bucket contains a counter for values in the given range. +// Each non-empty bucket is exposed via the following metric: +// +// _bucket{,vmrange="..."} +// +// Where: +// +// - is the metric name passed to NewHistogram +// - is optional tags for the , which are passed to NewHistogram +// - and - start and end values for the given bucket +// - - the number of hits to the given bucket during Update* calls +// +// Histogram buckets can be converted to Prometheus-like buckets with `le` labels +// with `prometheus_buckets(_bucket)` function from PromQL extensions in VictoriaMetrics. +// (see https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/MetricsQL ): +// +// prometheus_buckets(request_duration_bucket) +// +// Time series produced by the Histogram have better compression ratio comparing to +// Prometheus histogram buckets with `le` labels, since they don't include counters +// for all the previous buckets. +// +// Zero histogram is usable. +type Histogram struct { + // Mu gurantees synchronous update for all the counters and sum. + mu sync.Mutex + + buckets [bucketsCount]*histogramBucket + + zeros uint64 + lower uint64 + upper uint64 + + sum float64 +} + +// Reset resets the given histogram. +func (h *Histogram) Reset() { + h.mu.Lock() + h.resetLocked() + h.mu.Unlock() +} + +func (h *Histogram) resetLocked() { + for _, hb := range h.buckets[:] { + if hb == nil { + continue + } + for offset := range hb.counts[:] { + hb.counts[offset] = 0 + } + } + h.zeros = 0 + h.lower = 0 + h.upper = 0 +} + +// Update updates h with v. +// +// Negative values and NaNs are ignored. +func (h *Histogram) Update(v float64) { + if math.IsNaN(v) || v < 0 { + // Skip NaNs and negative values. + return + } + bucketIdx, offset := getBucketIdxAndOffset(v) + h.mu.Lock() + h.updateLocked(v, bucketIdx, offset) + h.mu.Unlock() +} + +func (h *Histogram) updateLocked(v float64, bucketIdx int, offset uint) { + h.sum += v + if bucketIdx < 0 { + // Special cases for zero, too small or too big value + if offset == 0 { + h.zeros++ + } else if offset == 1 { + h.lower++ + } else { + h.upper++ + } + return + } + hb := h.buckets[bucketIdx] + if hb == nil { + hb = &histogramBucket{} + h.buckets[bucketIdx] = hb + } + hb.counts[offset]++ +} + +// VisitNonZeroBuckets calls f for all buckets with non-zero counters. +// +// vmrange contains "..." string with bucket bounds. The lower bound +// isn't included in the bucket, while the upper bound is included. +// This is required to be compatible with Prometheus-style histogram buckets +// with `le` (less or equal) labels. +func (h *Histogram) VisitNonZeroBuckets(f func(vmrange string, count uint64)) { + h.mu.Lock() + h.visitNonZeroBucketsLocked(f) + h.mu.Unlock() +} + +func (h *Histogram) visitNonZeroBucketsLocked(f func(vmrange string, count uint64)) { + if h.zeros > 0 { + vmrange := getVMRange(-1, 0) + f(vmrange, h.zeros) + } + if h.lower > 0 { + vmrange := getVMRange(-1, 1) + f(vmrange, h.lower) + } + for bucketIdx, hb := range h.buckets[:] { + if hb == nil { + continue + } + for offset, count := range hb.counts[:] { + if count > 0 { + vmrange := getVMRange(bucketIdx, uint(offset)) + f(vmrange, count) + } + } + } + if h.upper > 0 { + vmrange := getVMRange(-1, 2) + f(vmrange, h.upper) + } +} + +type histogramBucket struct { + counts [bucketSize]uint64 +} + +// NewHistogram creates and returns new histogram with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned histogram is safe to use from concurrent goroutines. +func NewHistogram(name string) *Histogram { + return defaultSet.NewHistogram(name) +} + +// GetOrCreateHistogram returns registered histogram with the given name +// or creates new histogram if the registry doesn't contain histogram with +// the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned histogram is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewHistogram instead of GetOrCreateHistogram. +func GetOrCreateHistogram(name string) *Histogram { + return defaultSet.GetOrCreateHistogram(name) +} + +// UpdateDuration updates request duration based on the given startTime. +func (h *Histogram) UpdateDuration(startTime time.Time) { + d := time.Since(startTime).Seconds() + h.Update(d) +} + +func getVMRange(bucketIdx int, offset uint) string { + bucketRangesOnce.Do(initBucketRanges) + if bucketIdx < 0 { + if offset > 2 { + panic(fmt.Errorf("BUG: offset must be in range [0...2] for negative bucketIdx; got %d", offset)) + } + return bucketRanges[offset] + } + idx := 3 + uint(bucketIdx)*bucketSize + offset + return bucketRanges[idx] +} + +func initBucketRanges() { + bucketRanges[0] = "0...0" + bucketRanges[1] = fmt.Sprintf("0...%.1fe%d", 1.0, e10Min) + bucketRanges[2] = fmt.Sprintf("%.1fe%d...+Inf", 1.0, e10Max) + idx := 3 + start := fmt.Sprintf("%.1fe%d", 1.0, e10Min) + for bucketIdx := 0; bucketIdx < bucketsCount; bucketIdx++ { + for offset := 0; offset < bucketSize; offset++ { + e10 := e10Min + bucketIdx + m := 1 + float64(offset+1)/decimalMultiplier + if math.Abs(m-10) < decimalPrecision { + m = 1 + e10++ + } + end := fmt.Sprintf("%.1fe%d", m, e10) + bucketRanges[idx] = start + "..." + end + idx++ + start = end + } + } +} + +var ( + // 3 additional buckets for zero, lower and upper. + bucketRanges [3 + bucketsCount*bucketSize]string + bucketRangesOnce sync.Once +) + +func (h *Histogram) marshalTo(prefix string, w io.Writer) { + countTotal := uint64(0) + h.VisitNonZeroBuckets(func(vmrange string, count uint64) { + tag := fmt.Sprintf("vmrange=%q", vmrange) + metricName := addTag(prefix, tag) + name, filters := splitMetricName(metricName) + fmt.Fprintf(w, "%s_bucket%s %d\n", name, filters, count) + countTotal += count + }) + if countTotal == 0 { + return + } + name, filters := splitMetricName(prefix) + sum := h.getSum() + if float64(int64(sum)) == sum { + fmt.Fprintf(w, "%s_sum%s %d\n", name, filters, int64(sum)) + } else { + fmt.Fprintf(w, "%s_sum%s %g\n", name, filters, sum) + } + fmt.Fprintf(w, "%s_count%s %d\n", name, filters, countTotal) +} + +func (h *Histogram) getSum() float64 { + h.mu.Lock() + sum := h.sum + h.mu.Unlock() + return sum +} + +func getBucketIdxAndOffset(v float64) (int, uint) { + if v < 0 { + panic(fmt.Errorf("BUG: v must be positive; got %g", v)) + } + if v == 0 { + return -1, 0 + } + if math.IsInf(v, 1) { + return -1, 2 + } + e10 := int(math.Floor(math.Log10(v))) + bucketIdx := e10 - e10Min + if bucketIdx < 0 { + return -1, 1 + } + if bucketIdx >= bucketsCount { + if bucketIdx == bucketsCount && math.Abs(math.Pow10(e10)-v) < decimalPrecision { + // Adjust m to be on par with Prometheus 'le' buckets (aka 'less or equal') + return bucketsCount - 1, bucketSize - 1 + } + return -1, 2 + } + m := ((v / math.Pow10(e10)) - 1) * decimalMultiplier + offset := int(m) + if offset < 0 { + offset = 0 + } else if offset >= bucketSize { + offset = bucketSize - 1 + } + if math.Abs(float64(offset)-m) < decimalPrecision { + // Adjust offset to be on par with Prometheus 'le' buckets (aka 'less or equal') + offset-- + if offset < 0 { + bucketIdx-- + offset = bucketSize - 1 + if bucketIdx < 0 { + return -1, 1 + } + } + } + return bucketIdx, uint(offset) +} diff --git a/vendor/github.com/VictoriaMetrics/metrics/metrics.go b/vendor/github.com/VictoriaMetrics/metrics/metrics.go new file mode 100644 index 000000000..962ceef5b --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/metrics.go @@ -0,0 +1,69 @@ +// Package metrics implements Prometheus-compatible metrics for applications. +// +// This package is lightweight alternative to https://github.com/prometheus/client_golang +// with simpler API and smaller dependencies. +// +// Usage: +// +// 1. Register the required metrics via New* functions. +// 2. Expose them to `/metrics` page via WritePrometheus. +// 3. Update the registered metrics during application lifetime. +// +// The package has been extracted from https://victoriametrics.com/ +package metrics + +import ( + "io" +) + +type namedMetric struct { + name string + metric metric +} + +type metric interface { + marshalTo(prefix string, w io.Writer) +} + +var defaultSet = NewSet() + +// WritePrometheus writes all the registered metrics in Prometheus format to w. +// +// If exposeProcessMetrics is true, then various `go_*` and `process_*` metrics +// are exposed for the current process. +// +// The WritePrometheus func is usually called inside "/metrics" handler: +// +// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { +// metrics.WritePrometheus(w, true) +// }) +// +func WritePrometheus(w io.Writer, exposeProcessMetrics bool) { + defaultSet.WritePrometheus(w) + if exposeProcessMetrics { + WriteProcessMetrics(w) + } +} + +// WriteProcessMetrics writes additional process metrics in Prometheus format to w. +// +// Various `go_*` and `process_*` metrics are exposed for the currently +// running process. +// +// The WriteProcessMetrics func is usually called in combination with writing Set metrics +// inside "/metrics" handler: +// +// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { +// mySet.WritePrometheus(w) +// metrics.WriteProcessMetrics(w) +// }) +// +func WriteProcessMetrics(w io.Writer) { + writeGoMetrics(w) + writeProcessMetrics(w) +} + +// UnregisterMetric removes metric with the given name from default set. +func UnregisterMetric(name string) bool { + return defaultSet.UnregisterMetric(name) +} diff --git a/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go new file mode 100644 index 000000000..444299278 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go @@ -0,0 +1,83 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "time" +) + +const statFilepath = "/proc/self/stat" + +// See https://github.com/prometheus/procfs/blob/a4ac0826abceb44c40fc71daed2b301db498b93e/proc_stat.go#L40 . +const userHZ = 100 + +// See http://man7.org/linux/man-pages/man5/proc.5.html +type procStat struct { + State byte + Ppid int + Pgrp int + Session int + TtyNr int + Tpgid int + Flags uint + Minflt uint + Cminflt uint + Majflt uint + Cmajflt uint + Utime uint + Stime uint + Cutime int + Cstime int + Priority int + Nice int + NumThreads int + ItrealValue int + Starttime uint64 + Vsize uint + Rss int +} + +func writeProcessMetrics(w io.Writer) { + data, err := ioutil.ReadFile(statFilepath) + if err != nil { + log.Printf("ERROR: cannot open %s: %s", statFilepath, err) + return + } + // Search for the end of command. + n := bytes.LastIndex(data, []byte(") ")) + if n < 0 { + log.Printf("ERROR: cannot find command in parentheses in %q read from %s", data, statFilepath) + return + } + data = data[n+2:] + + var p procStat + bb := bytes.NewBuffer(data) + _, err = fmt.Fscanf(bb, "%c %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d", + &p.State, &p.Ppid, &p.Pgrp, &p.Session, &p.TtyNr, &p.Tpgid, &p.Flags, &p.Minflt, &p.Cminflt, &p.Majflt, &p.Cmajflt, + &p.Utime, &p.Stime, &p.Cutime, &p.Cstime, &p.Priority, &p.Nice, &p.NumThreads, &p.ItrealValue, &p.Starttime, &p.Vsize, &p.Rss) + if err != nil { + log.Printf("ERROR: cannot parse %q read from %s: %s", data, statFilepath, err) + return + } + + // It is expensive obtaining `process_open_fds` when big number of file descriptors is opened, + // don't do it here. + + utime := float64(p.Utime) / userHZ + stime := float64(p.Stime) / userHZ + fmt.Fprintf(w, "process_cpu_seconds_system_total %g\n", stime) + fmt.Fprintf(w, "process_cpu_seconds_total %g\n", utime+stime) + fmt.Fprintf(w, "process_cpu_seconds_user_total %g\n", utime) + fmt.Fprintf(w, "process_major_pagefaults_total %d\n", p.Majflt) + fmt.Fprintf(w, "process_minor_pagefaults_total %d\n", p.Minflt) + fmt.Fprintf(w, "process_num_threads %d\n", p.NumThreads) + fmt.Fprintf(w, "process_resident_memory_bytes %d\n", p.Rss*4096) + fmt.Fprintf(w, "process_start_time_seconds %d\n", startTimeSeconds) + fmt.Fprintf(w, "process_virtual_memory_bytes %d\n", p.Vsize) +} + +var startTimeSeconds = time.Now().Unix() diff --git a/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go new file mode 100644 index 000000000..6874de33e --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go @@ -0,0 +1,11 @@ +// +build !linux + +package metrics + +import ( + "io" +) + +func writeProcessMetrics(w io.Writer) { + // TODO: implement it +} diff --git a/vendor/github.com/VictoriaMetrics/metrics/set.go b/vendor/github.com/VictoriaMetrics/metrics/set.go new file mode 100644 index 000000000..69b4de866 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/set.go @@ -0,0 +1,519 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + "time" +) + +// Set is a set of metrics. +// +// Metrics belonging to a set are exported separately from global metrics. +// +// Set.WritePrometheus must be called for exporting metrics from the set. +type Set struct { + mu sync.Mutex + a []*namedMetric + m map[string]*namedMetric + summaries []*Summary +} + +// NewSet creates new set of metrics. +func NewSet() *Set { + return &Set{ + m: make(map[string]*namedMetric), + } +} + +// WritePrometheus writes all the metrics from s to w in Prometheus format. +func (s *Set) WritePrometheus(w io.Writer) { + // Collect all the metrics in in-memory buffer in order to prevent from long locking due to slow w. + var bb bytes.Buffer + lessFunc := func(i, j int) bool { + return s.a[i].name < s.a[j].name + } + s.mu.Lock() + for _, sm := range s.summaries { + sm.updateQuantiles() + } + if !sort.SliceIsSorted(s.a, lessFunc) { + sort.Slice(s.a, lessFunc) + } + sa := append([]*namedMetric(nil), s.a...) + s.mu.Unlock() + + // Call marshalTo without the global lock, since certain metric types such as Gauge + // can call a callback, which, in turn, can try calling s.mu.Lock again. + for _, nm := range sa { + nm.metric.marshalTo(nm.name, &bb) + } + w.Write(bb.Bytes()) +} + +// NewHistogram creates and returns new histogram in s with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned histogram is safe to use from concurrent goroutines. +func (s *Set) NewHistogram(name string) *Histogram { + h := &Histogram{} + s.registerMetric(name, h) + return h +} + +// GetOrCreateHistogram returns registered histogram in s with the given name +// or creates new histogram if s doesn't contain histogram with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned histogram is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewHistogram instead of GetOrCreateHistogram. +func (s *Set) GetOrCreateHistogram(name string) *Histogram { + s.mu.Lock() + nm := s.m[name] + s.mu.Unlock() + if nm == nil { + // Slow path - create and register missing histogram. + if err := validateMetric(name); err != nil { + panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err)) + } + nmNew := &namedMetric{ + name: name, + metric: &Histogram{}, + } + s.mu.Lock() + nm = s.m[name] + if nm == nil { + nm = nmNew + s.m[name] = nm + s.a = append(s.a, nm) + } + s.mu.Unlock() + } + h, ok := nm.metric.(*Histogram) + if !ok { + panic(fmt.Errorf("BUG: metric %q isn't a Histogram. It is %T", name, nm.metric)) + } + return h +} + +// NewCounter registers and returns new counter with the given name in the s. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned counter is safe to use from concurrent goroutines. +func (s *Set) NewCounter(name string) *Counter { + c := &Counter{} + s.registerMetric(name, c) + return c +} + +// GetOrCreateCounter returns registered counter in s with the given name +// or creates new counter if s doesn't contain counter with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned counter is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewCounter instead of GetOrCreateCounter. +func (s *Set) GetOrCreateCounter(name string) *Counter { + s.mu.Lock() + nm := s.m[name] + s.mu.Unlock() + if nm == nil { + // Slow path - create and register missing counter. + if err := validateMetric(name); err != nil { + panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err)) + } + nmNew := &namedMetric{ + name: name, + metric: &Counter{}, + } + s.mu.Lock() + nm = s.m[name] + if nm == nil { + nm = nmNew + s.m[name] = nm + s.a = append(s.a, nm) + } + s.mu.Unlock() + } + c, ok := nm.metric.(*Counter) + if !ok { + panic(fmt.Errorf("BUG: metric %q isn't a Counter. It is %T", name, nm.metric)) + } + return c +} + +// NewFloatCounter registers and returns new FloatCounter with the given name in the s. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned FloatCounter is safe to use from concurrent goroutines. +func (s *Set) NewFloatCounter(name string) *FloatCounter { + c := &FloatCounter{} + s.registerMetric(name, c) + return c +} + +// GetOrCreateFloatCounter returns registered FloatCounter in s with the given name +// or creates new FloatCounter if s doesn't contain FloatCounter with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned FloatCounter is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewFloatCounter instead of GetOrCreateFloatCounter. +func (s *Set) GetOrCreateFloatCounter(name string) *FloatCounter { + s.mu.Lock() + nm := s.m[name] + s.mu.Unlock() + if nm == nil { + // Slow path - create and register missing counter. + if err := validateMetric(name); err != nil { + panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err)) + } + nmNew := &namedMetric{ + name: name, + metric: &FloatCounter{}, + } + s.mu.Lock() + nm = s.m[name] + if nm == nil { + nm = nmNew + s.m[name] = nm + s.a = append(s.a, nm) + } + s.mu.Unlock() + } + c, ok := nm.metric.(*FloatCounter) + if !ok { + panic(fmt.Errorf("BUG: metric %q isn't a Counter. It is %T", name, nm.metric)) + } + return c +} + +// NewGauge registers and returns gauge with the given name in s, which calls f +// to obtain gauge value. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// f must be safe for concurrent calls. +// +// The returned gauge is safe to use from concurrent goroutines. +func (s *Set) NewGauge(name string, f func() float64) *Gauge { + if f == nil { + panic(fmt.Errorf("BUG: f cannot be nil")) + } + g := &Gauge{ + f: f, + } + s.registerMetric(name, g) + return g +} + +// GetOrCreateGauge returns registered gauge with the given name in s +// or creates new gauge if s doesn't contain gauge with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned gauge is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewGauge instead of GetOrCreateGauge. +func (s *Set) GetOrCreateGauge(name string, f func() float64) *Gauge { + s.mu.Lock() + nm := s.m[name] + s.mu.Unlock() + if nm == nil { + // Slow path - create and register missing gauge. + if f == nil { + panic(fmt.Errorf("BUG: f cannot be nil")) + } + if err := validateMetric(name); err != nil { + panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err)) + } + nmNew := &namedMetric{ + name: name, + metric: &Gauge{ + f: f, + }, + } + s.mu.Lock() + nm = s.m[name] + if nm == nil { + nm = nmNew + s.m[name] = nm + s.a = append(s.a, nm) + } + s.mu.Unlock() + } + g, ok := nm.metric.(*Gauge) + if !ok { + panic(fmt.Errorf("BUG: metric %q isn't a Gauge. It is %T", name, nm.metric)) + } + return g +} + +// NewSummary creates and returns new summary with the given name in s. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned summary is safe to use from concurrent goroutines. +func (s *Set) NewSummary(name string) *Summary { + return s.NewSummaryExt(name, defaultSummaryWindow, defaultSummaryQuantiles) +} + +// NewSummaryExt creates and returns new summary in s with the given name, +// window and quantiles. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned summary is safe to use from concurrent goroutines. +func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary { + if err := validateMetric(name); err != nil { + panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err)) + } + sm := newSummary(window, quantiles) + + s.mu.Lock() + // defer will unlock in case of panic + // checks in tests + defer s.mu.Unlock() + + s.mustRegisterLocked(name, sm) + registerSummaryLocked(sm) + s.registerSummaryQuantilesLocked(name, sm) + s.summaries = append(s.summaries, sm) + return sm +} + +// GetOrCreateSummary returns registered summary with the given name in s +// or creates new summary if s doesn't contain summary with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned summary is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewSummary instead of GetOrCreateSummary. +func (s *Set) GetOrCreateSummary(name string) *Summary { + return s.GetOrCreateSummaryExt(name, defaultSummaryWindow, defaultSummaryQuantiles) +} + +// GetOrCreateSummaryExt returns registered summary with the given name, +// window and quantiles in s or creates new summary if s doesn't +// contain summary with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned summary is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewSummaryExt instead of GetOrCreateSummaryExt. +func (s *Set) GetOrCreateSummaryExt(name string, window time.Duration, quantiles []float64) *Summary { + s.mu.Lock() + nm := s.m[name] + s.mu.Unlock() + if nm == nil { + // Slow path - create and register missing summary. + if err := validateMetric(name); err != nil { + panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err)) + } + sm := newSummary(window, quantiles) + nmNew := &namedMetric{ + name: name, + metric: sm, + } + s.mu.Lock() + nm = s.m[name] + if nm == nil { + nm = nmNew + s.m[name] = nm + s.a = append(s.a, nm) + registerSummaryLocked(sm) + s.registerSummaryQuantilesLocked(name, sm) + } + s.summaries = append(s.summaries, sm) + s.mu.Unlock() + } + sm, ok := nm.metric.(*Summary) + if !ok { + panic(fmt.Errorf("BUG: metric %q isn't a Summary. It is %T", name, nm.metric)) + } + if sm.window != window { + panic(fmt.Errorf("BUG: invalid window requested for the summary %q; requested %s; need %s", name, window, sm.window)) + } + if !isEqualQuantiles(sm.quantiles, quantiles) { + panic(fmt.Errorf("BUG: invalid quantiles requested from the summary %q; requested %v; need %v", name, quantiles, sm.quantiles)) + } + return sm +} + +func (s *Set) registerSummaryQuantilesLocked(name string, sm *Summary) { + for i, q := range sm.quantiles { + quantileValueName := addTag(name, fmt.Sprintf(`quantile="%g"`, q)) + qv := &quantileValue{ + sm: sm, + idx: i, + } + s.mustRegisterLocked(quantileValueName, qv) + } +} + +func (s *Set) registerMetric(name string, m metric) { + if err := validateMetric(name); err != nil { + panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err)) + } + s.mu.Lock() + // defer will unlock in case of panic + // checks in test + defer s.mu.Unlock() + s.mustRegisterLocked(name, m) +} + +// mustRegisterLocked registers given metric with +// the given name. Panics if the given name was +// already registered before. +func (s *Set) mustRegisterLocked(name string, m metric) { + nm, ok := s.m[name] + if !ok { + nm = &namedMetric{ + name: name, + metric: m, + } + s.m[name] = nm + s.a = append(s.a, nm) + } + if ok { + panic(fmt.Errorf("BUG: metric %q is already registered", name)) + } +} + +// UnregisterMetric removes metric with the given name from s. +// +// True is returned if the metric has been removed. +// False is returned if the given metric is missing in s. +func (s *Set) UnregisterMetric(name string) bool { + s.mu.Lock() + defer s.mu.Unlock() + + nm, ok := s.m[name] + if !ok { + return false + } + m := nm.metric + + delete(s.m, name) + + deleteFromList := func(metricName string) { + for i, nm := range s.a { + if nm.name == metricName { + s.a = append(s.a[:i], s.a[i+1:]...) + return + } + } + panic(fmt.Errorf("BUG: cannot find metric %q in the list of registered metrics", name)) + } + + // remove metric from s.a + deleteFromList(name) + + sm, ok := m.(*Summary) + if !ok { + // There is no need in cleaning up summary. + return true + } + + // cleanup registry from per-quantile metrics + for _, q := range sm.quantiles { + quantileValueName := addTag(name, fmt.Sprintf(`quantile="%g"`, q)) + delete(s.m, quantileValueName) + deleteFromList(quantileValueName) + } + + // Remove sm from s.summaries + found := false + for i, xsm := range s.summaries { + if xsm == sm { + s.summaries = append(s.summaries[:i], s.summaries[i+1:]...) + found = true + break + } + } + if !found { + panic(fmt.Errorf("BUG: cannot find summary %q in the list of registered summaries", name)) + } + unregisterSummary(sm) + return true +} + +// ListMetricNames returns a list of all the metrics in s. +func (s *Set) ListMetricNames() []string { + var list []string + for name := range s.m { + list = append(list, name) + } + return list +} diff --git a/vendor/github.com/VictoriaMetrics/metrics/summary.go b/vendor/github.com/VictoriaMetrics/metrics/summary.go new file mode 100644 index 000000000..0f01e9ae1 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/summary.go @@ -0,0 +1,254 @@ +package metrics + +import ( + "fmt" + "io" + "math" + "strings" + "sync" + "time" + + "github.com/valyala/histogram" +) + +const defaultSummaryWindow = 5 * time.Minute + +var defaultSummaryQuantiles = []float64{0.5, 0.9, 0.97, 0.99, 1} + +// Summary implements summary. +type Summary struct { + mu sync.Mutex + + curr *histogram.Fast + next *histogram.Fast + + quantiles []float64 + quantileValues []float64 + + sum float64 + count uint64 + + window time.Duration +} + +// NewSummary creates and returns new summary with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned summary is safe to use from concurrent goroutines. +func NewSummary(name string) *Summary { + return defaultSet.NewSummary(name) +} + +// NewSummaryExt creates and returns new summary with the given name, +// window and quantiles. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned summary is safe to use from concurrent goroutines. +func NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary { + return defaultSet.NewSummaryExt(name, window, quantiles) +} + +func newSummary(window time.Duration, quantiles []float64) *Summary { + // Make a copy of quantiles in order to prevent from their modification by the caller. + quantiles = append([]float64{}, quantiles...) + validateQuantiles(quantiles) + sm := &Summary{ + curr: histogram.NewFast(), + next: histogram.NewFast(), + quantiles: quantiles, + quantileValues: make([]float64, len(quantiles)), + window: window, + } + return sm +} + +func validateQuantiles(quantiles []float64) { + for _, q := range quantiles { + if q < 0 || q > 1 { + panic(fmt.Errorf("BUG: quantile must be in the range [0..1]; got %v", q)) + } + } +} + +// Update updates the summary. +func (sm *Summary) Update(v float64) { + sm.mu.Lock() + sm.curr.Update(v) + sm.next.Update(v) + sm.sum += v + sm.count++ + sm.mu.Unlock() +} + +// UpdateDuration updates request duration based on the given startTime. +func (sm *Summary) UpdateDuration(startTime time.Time) { + d := time.Since(startTime).Seconds() + sm.Update(d) +} + +func (sm *Summary) marshalTo(prefix string, w io.Writer) { + // Marshal only *_sum and *_count values. + // Quantile values should be already updated by the caller via sm.updateQuantiles() call. + // sm.quantileValues will be marshaled later via quantileValue.marshalTo. + sm.mu.Lock() + sum := sm.sum + count := sm.count + sm.mu.Unlock() + + if count > 0 { + name, filters := splitMetricName(prefix) + if float64(int64(sum)) == sum { + // Marshal integer sum without scientific notation + fmt.Fprintf(w, "%s_sum%s %d\n", name, filters, int64(sum)) + } else { + fmt.Fprintf(w, "%s_sum%s %g\n", name, filters, sum) + } + fmt.Fprintf(w, "%s_count%s %d\n", name, filters, count) + } +} + +func splitMetricName(name string) (string, string) { + n := strings.IndexByte(name, '{') + if n < 0 { + return name, "" + } + return name[:n], name[n:] +} + +func (sm *Summary) updateQuantiles() { + sm.mu.Lock() + sm.quantileValues = sm.curr.Quantiles(sm.quantileValues[:0], sm.quantiles) + sm.mu.Unlock() +} + +// GetOrCreateSummary returns registered summary with the given name +// or creates new summary if the registry doesn't contain summary with +// the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned summary is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewSummary instead of GetOrCreateSummary. +func GetOrCreateSummary(name string) *Summary { + return defaultSet.GetOrCreateSummary(name) +} + +// GetOrCreateSummaryExt returns registered summary with the given name, +// window and quantiles or creates new summary if the registry doesn't +// contain summary with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// * foo +// * foo{bar="baz"} +// * foo{bar="baz",aaa="b"} +// +// The returned summary is safe to use from concurrent goroutines. +// +// Performance tip: prefer NewSummaryExt instead of GetOrCreateSummaryExt. +func GetOrCreateSummaryExt(name string, window time.Duration, quantiles []float64) *Summary { + return defaultSet.GetOrCreateSummaryExt(name, window, quantiles) +} + +func isEqualQuantiles(a, b []float64) bool { + // Do not use relfect.DeepEqual, since it is slower than the direct comparison. + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +type quantileValue struct { + sm *Summary + idx int +} + +func (qv *quantileValue) marshalTo(prefix string, w io.Writer) { + qv.sm.mu.Lock() + v := qv.sm.quantileValues[qv.idx] + qv.sm.mu.Unlock() + if !math.IsNaN(v) { + fmt.Fprintf(w, "%s %g\n", prefix, v) + } +} + +func addTag(name, tag string) string { + if len(name) == 0 || name[len(name)-1] != '}' { + return fmt.Sprintf("%s{%s}", name, tag) + } + return fmt.Sprintf("%s,%s}", name[:len(name)-1], tag) +} + +func registerSummaryLocked(sm *Summary) { + window := sm.window + summariesLock.Lock() + summaries[window] = append(summaries[window], sm) + if len(summaries[window]) == 1 { + go summariesSwapCron(window) + } + summariesLock.Unlock() +} + +func unregisterSummary(sm *Summary) { + window := sm.window + summariesLock.Lock() + sms := summaries[window] + found := false + for i, xsm := range sms { + if xsm == sm { + sms = append(sms[:i], sms[i+1:]...) + found = true + break + } + } + if !found { + panic(fmt.Errorf("BUG: cannot find registered summary %p", sm)) + } + summaries[window] = sms + summariesLock.Unlock() +} + +func summariesSwapCron(window time.Duration) { + for { + time.Sleep(window / 2) + summariesLock.Lock() + for _, sm := range summaries[window] { + sm.mu.Lock() + tmp := sm.curr + sm.curr = sm.next + sm.next = tmp + sm.next.Reset() + sm.mu.Unlock() + } + summariesLock.Unlock() + } +} + +var ( + summaries = map[time.Duration][]*Summary{} + summariesLock sync.Mutex +) diff --git a/vendor/github.com/VictoriaMetrics/metrics/validator.go b/vendor/github.com/VictoriaMetrics/metrics/validator.go new file mode 100644 index 000000000..27e88ca33 --- /dev/null +++ b/vendor/github.com/VictoriaMetrics/metrics/validator.go @@ -0,0 +1,84 @@ +package metrics + +import ( + "fmt" + "regexp" + "strings" +) + +func validateMetric(s string) error { + if len(s) == 0 { + return fmt.Errorf("metric cannot be empty") + } + n := strings.IndexByte(s, '{') + if n < 0 { + return validateIdent(s) + } + ident := s[:n] + s = s[n+1:] + if err := validateIdent(ident); err != nil { + return err + } + if len(s) == 0 || s[len(s)-1] != '}' { + return fmt.Errorf("missing closing curly brace at the end of %q", ident) + } + return validateTags(s[:len(s)-1]) +} + +func validateTags(s string) error { + if len(s) == 0 { + return nil + } + for { + n := strings.IndexByte(s, '=') + if n < 0 { + return fmt.Errorf("missing `=` after %q", s) + } + ident := s[:n] + s = s[n+1:] + if err := validateIdent(ident); err != nil { + return err + } + if len(s) == 0 || s[0] != '"' { + return fmt.Errorf("missing starting `\"` for %q value; tail=%q", ident, s) + } + s = s[1:] + again: + n = strings.IndexByte(s, '"') + if n < 0 { + return fmt.Errorf("missing trailing `\"` for %q value; tail=%q", ident, s) + } + m := n + for m > 0 && s[m-1] == '\\' { + m-- + } + if (n-m)%2 == 1 { + s = s[n+1:] + goto again + } + s = s[n+1:] + if len(s) == 0 { + return nil + } + if !strings.HasPrefix(s, ",") { + return fmt.Errorf("missing `,` after %q value; tail=%q", ident, s) + } + s = skipSpace(s[1:]) + } +} + +func skipSpace(s string) string { + for len(s) > 0 && s[0] == ' ' { + s = s[1:] + } + return s +} + +func validateIdent(s string) error { + if !identRegexp.MatchString(s) { + return fmt.Errorf("invalid identifier %q", s) + } + return nil +} + +var identRegexp = regexp.MustCompile("^[a-zA-Z_:][a-zA-Z0-9_:]*$") diff --git a/vendor/github.com/creack/pty/.gitignore b/vendor/github.com/creack/pty/.gitignore new file mode 100644 index 000000000..1f0a99f2f --- /dev/null +++ b/vendor/github.com/creack/pty/.gitignore @@ -0,0 +1,4 @@ +[568].out +_go* +_test* +_obj diff --git a/vendor/github.com/creack/pty/Dockerfile.riscv b/vendor/github.com/creack/pty/Dockerfile.riscv new file mode 100644 index 000000000..adfdf82c8 --- /dev/null +++ b/vendor/github.com/creack/pty/Dockerfile.riscv @@ -0,0 +1,14 @@ +FROM golang:1.13 + +# Clone and complie a riscv compatible version of the go compiler. +RUN git clone https://review.gerrithub.io/riscv/riscv-go /riscv-go +# riscvdev branch HEAD as of 2019-06-29. +RUN cd /riscv-go && git checkout 04885fddd096d09d4450726064d06dd107e374bf +ENV PATH=/riscv-go/misc/riscv:/riscv-go/bin:$PATH +RUN cd /riscv-go/src && GOROOT_BOOTSTRAP=$(go env GOROOT) ./make.bash +ENV GOROOT=/riscv-go + +# Make sure we compile. +WORKDIR pty +ADD . . +RUN GOOS=linux GOARCH=riscv go build diff --git a/vendor/github.com/creack/pty/LICENSE b/vendor/github.com/creack/pty/LICENSE new file mode 100644 index 000000000..6b7558b6b --- /dev/null +++ b/vendor/github.com/creack/pty/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2011 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/creack/pty/README.md b/vendor/github.com/creack/pty/README.md new file mode 100644 index 000000000..5275014a7 --- /dev/null +++ b/vendor/github.com/creack/pty/README.md @@ -0,0 +1,100 @@ +# pty + +Pty is a Go package for using unix pseudo-terminals. + +## Install + + go get github.com/creack/pty + +## Example + +### Command + +```go +package main + +import ( + "github.com/creack/pty" + "io" + "os" + "os/exec" +) + +func main() { + c := exec.Command("grep", "--color=auto", "bar") + f, err := pty.Start(c) + if err != nil { + panic(err) + } + + go func() { + f.Write([]byte("foo\n")) + f.Write([]byte("bar\n")) + f.Write([]byte("baz\n")) + f.Write([]byte{4}) // EOT + }() + io.Copy(os.Stdout, f) +} +``` + +### Shell + +```go +package main + +import ( + "io" + "log" + "os" + "os/exec" + "os/signal" + "syscall" + + "github.com/creack/pty" + "golang.org/x/crypto/ssh/terminal" +) + +func test() error { + // Create arbitrary command. + c := exec.Command("bash") + + // Start the command with a pty. + ptmx, err := pty.Start(c) + if err != nil { + return err + } + // Make sure to close the pty at the end. + defer func() { _ = ptmx.Close() }() // Best effort. + + // Handle pty size. + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGWINCH) + go func() { + for range ch { + if err := pty.InheritSize(os.Stdin, ptmx); err != nil { + log.Printf("error resizing pty: %s", err) + } + } + }() + ch <- syscall.SIGWINCH // Initial resize. + + // Set stdin in raw mode. + oldState, err := terminal.MakeRaw(int(os.Stdin.Fd())) + if err != nil { + panic(err) + } + defer func() { _ = terminal.Restore(int(os.Stdin.Fd()), oldState) }() // Best effort. + + // Copy stdin to the pty and the pty to stdout. + go func() { _, _ = io.Copy(ptmx, os.Stdin) }() + _, _ = io.Copy(os.Stdout, ptmx) + + return nil +} + +func main() { + if err := test(); err != nil { + log.Fatal(err) + } +} +``` diff --git a/vendor/github.com/creack/pty/doc.go b/vendor/github.com/creack/pty/doc.go new file mode 100644 index 000000000..190cfbea9 --- /dev/null +++ b/vendor/github.com/creack/pty/doc.go @@ -0,0 +1,16 @@ +// Package pty provides functions for working with Unix terminals. +package pty + +import ( + "errors" + "os" +) + +// ErrUnsupported is returned if a function is not +// available on the current platform. +var ErrUnsupported = errors.New("unsupported") + +// Opens a pty and its corresponding tty. +func Open() (pty, tty *os.File, err error) { + return open() +} diff --git a/vendor/github.com/creack/pty/go.mod b/vendor/github.com/creack/pty/go.mod new file mode 100644 index 000000000..e48decaf4 --- /dev/null +++ b/vendor/github.com/creack/pty/go.mod @@ -0,0 +1,4 @@ +module github.com/creack/pty + +go 1.13 + diff --git a/vendor/github.com/creack/pty/ioctl.go b/vendor/github.com/creack/pty/ioctl.go new file mode 100644 index 000000000..c85cdcd14 --- /dev/null +++ b/vendor/github.com/creack/pty/ioctl.go @@ -0,0 +1,13 @@ +// +build !windows,!solaris + +package pty + +import "syscall" + +func ioctl(fd, cmd, ptr uintptr) error { + _, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr) + if e != 0 { + return e + } + return nil +} diff --git a/vendor/github.com/creack/pty/ioctl_bsd.go b/vendor/github.com/creack/pty/ioctl_bsd.go new file mode 100644 index 000000000..73b12c53c --- /dev/null +++ b/vendor/github.com/creack/pty/ioctl_bsd.go @@ -0,0 +1,39 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package pty + +// from +const ( + _IOC_VOID uintptr = 0x20000000 + _IOC_OUT uintptr = 0x40000000 + _IOC_IN uintptr = 0x80000000 + _IOC_IN_OUT uintptr = _IOC_OUT | _IOC_IN + _IOC_DIRMASK = _IOC_VOID | _IOC_OUT | _IOC_IN + + _IOC_PARAM_SHIFT = 13 + _IOC_PARAM_MASK = (1 << _IOC_PARAM_SHIFT) - 1 +) + +func _IOC_PARM_LEN(ioctl uintptr) uintptr { + return (ioctl >> 16) & _IOC_PARAM_MASK +} + +func _IOC(inout uintptr, group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return inout | (param_len&_IOC_PARAM_MASK)<<16 | uintptr(group)<<8 | ioctl_num +} + +func _IO(group byte, ioctl_num uintptr) uintptr { + return _IOC(_IOC_VOID, group, ioctl_num, 0) +} + +func _IOR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_OUT, group, ioctl_num, param_len) +} + +func _IOW(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_IN, group, ioctl_num, param_len) +} + +func _IOWR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_IN_OUT, group, ioctl_num, param_len) +} diff --git a/vendor/github.com/creack/pty/ioctl_solaris.go b/vendor/github.com/creack/pty/ioctl_solaris.go new file mode 100644 index 000000000..249686cff --- /dev/null +++ b/vendor/github.com/creack/pty/ioctl_solaris.go @@ -0,0 +1,31 @@ +package pty + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + // see /usr/include/sys/stropts.h + I_PUSH = uintptr((int32('S')<<8 | 002)) + I_STR = uintptr((int32('S')<<8 | 010)) + I_FIND = uintptr((int32('S')<<8 | 013)) + // see /usr/include/sys/ptms.h + ISPTM = (int32('P') << 8) | 1 + UNLKPT = (int32('P') << 8) | 2 + PTSSTTY = (int32('P') << 8) | 3 + ZONEPT = (int32('P') << 8) | 4 + OWNERPT = (int32('P') << 8) | 5 +) + +type strioctl struct { + ic_cmd int32 + ic_timout int32 + ic_len int32 + ic_dp unsafe.Pointer +} + +func ioctl(fd, cmd, ptr uintptr) error { + return unix.IoctlSetInt(int(fd), uint(cmd), int(ptr)) +} diff --git a/vendor/github.com/creack/pty/mktypes.bash b/vendor/github.com/creack/pty/mktypes.bash new file mode 100644 index 000000000..82ee16721 --- /dev/null +++ b/vendor/github.com/creack/pty/mktypes.bash @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +GOOSARCH="${GOOS}_${GOARCH}" +case "$GOOSARCH" in +_* | *_ | _) + echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 + exit 1 + ;; +esac + +GODEFS="go tool cgo -godefs" + +$GODEFS types.go |gofmt > ztypes_$GOARCH.go + +case $GOOS in +freebsd|dragonfly|openbsd) + $GODEFS types_$GOOS.go |gofmt > ztypes_$GOOSARCH.go + ;; +esac diff --git a/vendor/github.com/creack/pty/pty_darwin.go b/vendor/github.com/creack/pty/pty_darwin.go new file mode 100644 index 000000000..6344b6b0e --- /dev/null +++ b/vendor/github.com/creack/pty/pty_darwin.go @@ -0,0 +1,65 @@ +package pty + +import ( + "errors" + "os" + "syscall" + "unsafe" +) + +func open() (pty, tty *os.File, err error) { + pFD, err := syscall.Open("/dev/ptmx", syscall.O_RDWR|syscall.O_CLOEXEC, 0) + if err != nil { + return nil, nil, err + } + p := os.NewFile(uintptr(pFD), "/dev/ptmx") + // In case of error after this point, make sure we close the ptmx fd. + defer func() { + if err != nil { + _ = p.Close() // Best effort. + } + }() + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + if err := grantpt(p); err != nil { + return nil, nil, err + } + + if err := unlockpt(p); err != nil { + return nil, nil, err + } + + t, err := os.OpenFile(sname, os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func ptsname(f *os.File) (string, error) { + n := make([]byte, _IOC_PARM_LEN(syscall.TIOCPTYGNAME)) + + err := ioctl(f.Fd(), syscall.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n[0]))) + if err != nil { + return "", err + } + + for i, c := range n { + if c == 0 { + return string(n[:i]), nil + } + } + return "", errors.New("TIOCPTYGNAME string not NUL-terminated") +} + +func grantpt(f *os.File) error { + return ioctl(f.Fd(), syscall.TIOCPTYGRANT, 0) +} + +func unlockpt(f *os.File) error { + return ioctl(f.Fd(), syscall.TIOCPTYUNLK, 0) +} diff --git a/vendor/github.com/creack/pty/pty_dragonfly.go b/vendor/github.com/creack/pty/pty_dragonfly.go new file mode 100644 index 000000000..b7d1f20f2 --- /dev/null +++ b/vendor/github.com/creack/pty/pty_dragonfly.go @@ -0,0 +1,80 @@ +package pty + +import ( + "errors" + "os" + "strings" + "syscall" + "unsafe" +) + +// same code as pty_darwin.go +func open() (pty, tty *os.File, err error) { + p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + // In case of error after this point, make sure we close the ptmx fd. + defer func() { + if err != nil { + _ = p.Close() // Best effort. + } + }() + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + if err := grantpt(p); err != nil { + return nil, nil, err + } + + if err := unlockpt(p); err != nil { + return nil, nil, err + } + + t, err := os.OpenFile(sname, os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func grantpt(f *os.File) error { + _, err := isptmaster(f.Fd()) + return err +} + +func unlockpt(f *os.File) error { + _, err := isptmaster(f.Fd()) + return err +} + +func isptmaster(fd uintptr) (bool, error) { + err := ioctl(fd, syscall.TIOCISPTMASTER, 0) + return err == nil, err +} + +var ( + emptyFiodgnameArg fiodgnameArg + ioctl_FIODNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg)) +) + +func ptsname(f *os.File) (string, error) { + name := make([]byte, _C_SPECNAMELEN) + fa := fiodgnameArg{Name: (*byte)(unsafe.Pointer(&name[0])), Len: _C_SPECNAMELEN, Pad_cgo_0: [4]byte{0, 0, 0, 0}} + + err := ioctl(f.Fd(), ioctl_FIODNAME, uintptr(unsafe.Pointer(&fa))) + if err != nil { + return "", err + } + + for i, c := range name { + if c == 0 { + s := "/dev/" + string(name[:i]) + return strings.Replace(s, "ptm", "pts", -1), nil + } + } + return "", errors.New("TIOCPTYGNAME string not NUL-terminated") +} diff --git a/vendor/github.com/creack/pty/pty_freebsd.go b/vendor/github.com/creack/pty/pty_freebsd.go new file mode 100644 index 000000000..63b6d9133 --- /dev/null +++ b/vendor/github.com/creack/pty/pty_freebsd.go @@ -0,0 +1,78 @@ +package pty + +import ( + "errors" + "os" + "syscall" + "unsafe" +) + +func posixOpenpt(oflag int) (fd int, err error) { + r0, _, e1 := syscall.Syscall(syscall.SYS_POSIX_OPENPT, uintptr(oflag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return fd, err +} + +func open() (pty, tty *os.File, err error) { + fd, err := posixOpenpt(syscall.O_RDWR | syscall.O_CLOEXEC) + if err != nil { + return nil, nil, err + } + p := os.NewFile(uintptr(fd), "/dev/pts") + // In case of error after this point, make sure we close the pts fd. + defer func() { + if err != nil { + _ = p.Close() // Best effort. + } + }() + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + t, err := os.OpenFile("/dev/"+sname, os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func isptmaster(fd uintptr) (bool, error) { + err := ioctl(fd, syscall.TIOCPTMASTER, 0) + return err == nil, err +} + +var ( + emptyFiodgnameArg fiodgnameArg + ioctlFIODGNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg)) +) + +func ptsname(f *os.File) (string, error) { + master, err := isptmaster(f.Fd()) + if err != nil { + return "", err + } + if !master { + return "", syscall.EINVAL + } + + const n = _C_SPECNAMELEN + 1 + var ( + buf = make([]byte, n) + arg = fiodgnameArg{Len: n, Buf: (*byte)(unsafe.Pointer(&buf[0]))} + ) + if err := ioctl(f.Fd(), ioctlFIODGNAME, uintptr(unsafe.Pointer(&arg))); err != nil { + return "", err + } + + for i, c := range buf { + if c == 0 { + return string(buf[:i]), nil + } + } + return "", errors.New("FIODGNAME string not NUL-terminated") +} diff --git a/vendor/github.com/creack/pty/pty_linux.go b/vendor/github.com/creack/pty/pty_linux.go new file mode 100644 index 000000000..4a833de18 --- /dev/null +++ b/vendor/github.com/creack/pty/pty_linux.go @@ -0,0 +1,51 @@ +package pty + +import ( + "os" + "strconv" + "syscall" + "unsafe" +) + +func open() (pty, tty *os.File, err error) { + p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + // In case of error after this point, make sure we close the ptmx fd. + defer func() { + if err != nil { + _ = p.Close() // Best effort. + } + }() + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + if err := unlockpt(p); err != nil { + return nil, nil, err + } + + t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func ptsname(f *os.File) (string, error) { + var n _C_uint + err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))) + if err != nil { + return "", err + } + return "/dev/pts/" + strconv.Itoa(int(n)), nil +} + +func unlockpt(f *os.File) error { + var u _C_int + // use TIOCSPTLCK with a pointer to zero to clear the lock + return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} diff --git a/vendor/github.com/creack/pty/pty_openbsd.go b/vendor/github.com/creack/pty/pty_openbsd.go new file mode 100644 index 000000000..a6a35d1e6 --- /dev/null +++ b/vendor/github.com/creack/pty/pty_openbsd.go @@ -0,0 +1,33 @@ +package pty + +import ( + "os" + "syscall" + "unsafe" +) + +func open() (pty, tty *os.File, err error) { + /* + * from ptm(4): + * The PTMGET command allocates a free pseudo terminal, changes its + * ownership to the caller, revokes the access privileges for all previous + * users, opens the file descriptors for the pty and tty devices and + * returns them to the caller in struct ptmget. + */ + + p, err := os.OpenFile("/dev/ptm", os.O_RDWR|syscall.O_CLOEXEC, 0) + if err != nil { + return nil, nil, err + } + defer p.Close() + + var ptm ptmget + if err := ioctl(p.Fd(), uintptr(ioctl_PTMGET), uintptr(unsafe.Pointer(&ptm))); err != nil { + return nil, nil, err + } + + pty = os.NewFile(uintptr(ptm.Cfd), "/dev/ptm") + tty = os.NewFile(uintptr(ptm.Sfd), "/dev/ptm") + + return pty, tty, nil +} diff --git a/vendor/github.com/creack/pty/pty_solaris.go b/vendor/github.com/creack/pty/pty_solaris.go new file mode 100644 index 000000000..5c3797259 --- /dev/null +++ b/vendor/github.com/creack/pty/pty_solaris.go @@ -0,0 +1,140 @@ +package pty + +/* based on: +http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/pt.c +*/ + +import ( + "errors" + "os" + "strconv" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +const NODEV = ^uint64(0) + +func open() (pty, tty *os.File, err error) { + masterfd, err := syscall.Open("/dev/ptmx", syscall.O_RDWR|unix.O_NOCTTY, 0) + //masterfd, err := syscall.Open("/dev/ptmx", syscall.O_RDWR|syscall.O_CLOEXEC|unix.O_NOCTTY, 0) + if err != nil { + return nil, nil, err + } + p := os.NewFile(uintptr(masterfd), "/dev/ptmx") + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + err = grantpt(p) + if err != nil { + return nil, nil, err + } + + err = unlockpt(p) + if err != nil { + return nil, nil, err + } + + slavefd, err := syscall.Open(sname, os.O_RDWR|unix.O_NOCTTY, 0) + if err != nil { + return nil, nil, err + } + t := os.NewFile(uintptr(slavefd), sname) + + // pushing terminal driver STREAMS modules as per pts(7) + for _, mod := range []string{"ptem", "ldterm", "ttcompat"} { + err = streams_push(t, mod) + if err != nil { + return nil, nil, err + } + } + + return p, t, nil +} + +func minor(x uint64) uint64 { + return x & 0377 +} + +func ptsdev(fd uintptr) uint64 { + istr := strioctl{ISPTM, 0, 0, nil} + err := ioctl(fd, I_STR, uintptr(unsafe.Pointer(&istr))) + if err != nil { + return NODEV + } + var status unix.Stat_t + err = unix.Fstat(int(fd), &status) + if err != nil { + return NODEV + } + return uint64(minor(status.Rdev)) +} + +func ptsname(f *os.File) (string, error) { + dev := ptsdev(f.Fd()) + if dev == NODEV { + return "", errors.New("not a master pty") + } + fn := "/dev/pts/" + strconv.FormatInt(int64(dev), 10) + // access(2) creates the slave device (if the pty exists) + // F_OK == 0 (unistd.h) + err := unix.Access(fn, 0) + if err != nil { + return "", err + } + return fn, nil +} + +type pt_own struct { + pto_ruid int32 + pto_rgid int32 +} + +func grantpt(f *os.File) error { + if ptsdev(f.Fd()) == NODEV { + return errors.New("not a master pty") + } + var pto pt_own + pto.pto_ruid = int32(os.Getuid()) + // XXX should first attempt to get gid of DEFAULT_TTY_GROUP="tty" + pto.pto_rgid = int32(os.Getgid()) + var istr strioctl + istr.ic_cmd = OWNERPT + istr.ic_timout = 0 + istr.ic_len = int32(unsafe.Sizeof(istr)) + istr.ic_dp = unsafe.Pointer(&pto) + err := ioctl(f.Fd(), I_STR, uintptr(unsafe.Pointer(&istr))) + if err != nil { + return errors.New("access denied") + } + return nil +} + +func unlockpt(f *os.File) error { + istr := strioctl{UNLKPT, 0, 0, nil} + return ioctl(f.Fd(), I_STR, uintptr(unsafe.Pointer(&istr))) +} + +// push STREAMS modules if not already done so +func streams_push(f *os.File, mod string) error { + var err error + buf := []byte(mod) + // XXX I_FIND is not returning an error when the module + // is already pushed even though truss reports a return + // value of 1. A bug in the Go Solaris syscall interface? + // XXX without this we are at risk of the issue + // https://www.illumos.org/issues/9042 + // but since we are not using libc or XPG4.2, we should not be + // double-pushing modules + + err = ioctl(f.Fd(), I_FIND, uintptr(unsafe.Pointer(&buf[0]))) + if err != nil { + return nil + } + err = ioctl(f.Fd(), I_PUSH, uintptr(unsafe.Pointer(&buf[0]))) + return err +} diff --git a/vendor/github.com/creack/pty/pty_unsupported.go b/vendor/github.com/creack/pty/pty_unsupported.go new file mode 100644 index 000000000..ceb425b19 --- /dev/null +++ b/vendor/github.com/creack/pty/pty_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!darwin,!freebsd,!dragonfly,!openbsd,!solaris + +package pty + +import ( + "os" +) + +func open() (pty, tty *os.File, err error) { + return nil, nil, ErrUnsupported +} diff --git a/vendor/github.com/creack/pty/run.go b/vendor/github.com/creack/pty/run.go new file mode 100644 index 000000000..b07942514 --- /dev/null +++ b/vendor/github.com/creack/pty/run.go @@ -0,0 +1,74 @@ +// +build !windows + +package pty + +import ( + "os" + "os/exec" + "syscall" +) + +// Start assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout, +// and c.Stderr, calls c.Start, and returns the File of the tty's +// corresponding pty. +// +// Starts the process in a new session and sets the controlling terminal. +func Start(c *exec.Cmd) (pty *os.File, err error) { + return StartWithSize(c, nil) +} + +// StartWithSize assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout, +// and c.Stderr, calls c.Start, and returns the File of the tty's +// corresponding pty. +// +// This will resize the pty to the specified size before starting the command. +// Starts the process in a new session and sets the controlling terminal. +func StartWithSize(c *exec.Cmd, sz *Winsize) (pty *os.File, err error) { + if c.SysProcAttr == nil { + c.SysProcAttr = &syscall.SysProcAttr{} + } + c.SysProcAttr.Setsid = true + c.SysProcAttr.Setctty = true + return StartWithAttrs(c, sz, c.SysProcAttr) +} + +// StartWithAttrs assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout, +// and c.Stderr, calls c.Start, and returns the File of the tty's +// corresponding pty. +// +// This will resize the pty to the specified size before starting the command if a size is provided. +// The `attrs` parameter overrides the one set in c.SysProcAttr. +// +// This should generally not be needed. Used in some edge cases where it is needed to create a pty +// without a controlling terminal. +func StartWithAttrs(c *exec.Cmd, sz *Winsize, attrs *syscall.SysProcAttr) (pty *os.File, err error) { + pty, tty, err := Open() + if err != nil { + return nil, err + } + defer tty.Close() + + if sz != nil { + if err := Setsize(pty, sz); err != nil { + pty.Close() + return nil, err + } + } + if c.Stdout == nil { + c.Stdout = tty + } + if c.Stderr == nil { + c.Stderr = tty + } + if c.Stdin == nil { + c.Stdin = tty + } + + c.SysProcAttr = attrs + + if err := c.Start(); err != nil { + _ = pty.Close() + return nil, err + } + return pty, err +} diff --git a/vendor/github.com/creack/pty/test_crosscompile.sh b/vendor/github.com/creack/pty/test_crosscompile.sh new file mode 100644 index 000000000..f0b1dcac0 --- /dev/null +++ b/vendor/github.com/creack/pty/test_crosscompile.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env sh + +# Test script checking that all expected os/arch compile properly. +# Does not actually test the logic, just the compilation so we make sure we don't break code depending on the lib. + +echo2() { + echo $@ >&2 +} + +trap end 0 +end() { + [ "$?" = 0 ] && echo2 "Pass." || (echo2 "Fail."; exit 1) +} + +cross() { + os=$1 + shift + echo2 "Build for $os." + for arch in $@; do + echo2 " - $os/$arch" + GOOS=$os GOARCH=$arch go build + done + echo2 +} + +set -e + +cross linux amd64 386 arm arm64 ppc64 ppc64le s390x mips mipsle mips64 mips64le +cross darwin amd64 386 arm arm64 +cross freebsd amd64 386 arm +cross netbsd amd64 386 arm +cross openbsd amd64 386 +cross dragonfly amd64 +cross solaris amd64 + +# Not expected to work but should still compile. +cross windows amd64 386 arm + +# TODO: Fix compilation error on openbsd/arm. +# TODO: Merge the solaris PR. + +# Some os/arch require a different compiler. Run in docker. +if ! hash docker; then + # If docker is not present, stop here. + return +fi + +echo2 "Build for linux." +echo2 " - linux/riscv" +docker build -t test -f Dockerfile.riscv . diff --git a/vendor/github.com/creack/pty/util.go b/vendor/github.com/creack/pty/util.go new file mode 100644 index 000000000..8fdde0bab --- /dev/null +++ b/vendor/github.com/creack/pty/util.go @@ -0,0 +1,64 @@ +// +build !windows,!solaris + +package pty + +import ( + "os" + "syscall" + "unsafe" +) + +// InheritSize applies the terminal size of pty to tty. This should be run +// in a signal handler for syscall.SIGWINCH to automatically resize the tty when +// the pty receives a window size change notification. +func InheritSize(pty, tty *os.File) error { + size, err := GetsizeFull(pty) + if err != nil { + return err + } + err = Setsize(tty, size) + if err != nil { + return err + } + return nil +} + +// Setsize resizes t to s. +func Setsize(t *os.File, ws *Winsize) error { + return windowRectCall(ws, t.Fd(), syscall.TIOCSWINSZ) +} + +// GetsizeFull returns the full terminal size description. +func GetsizeFull(t *os.File) (size *Winsize, err error) { + var ws Winsize + err = windowRectCall(&ws, t.Fd(), syscall.TIOCGWINSZ) + return &ws, err +} + +// Getsize returns the number of rows (lines) and cols (positions +// in each line) in terminal t. +func Getsize(t *os.File) (rows, cols int, err error) { + ws, err := GetsizeFull(t) + return int(ws.Rows), int(ws.Cols), err +} + +// Winsize describes the terminal size. +type Winsize struct { + Rows uint16 // ws_row: Number of rows (in cells) + Cols uint16 // ws_col: Number of columns (in cells) + X uint16 // ws_xpixel: Width in pixels + Y uint16 // ws_ypixel: Height in pixels +} + +func windowRectCall(ws *Winsize, fd, a2 uintptr) error { + _, _, errno := syscall.Syscall( + syscall.SYS_IOCTL, + fd, + a2, + uintptr(unsafe.Pointer(ws)), + ) + if errno != 0 { + return syscall.Errno(errno) + } + return nil +} diff --git a/vendor/github.com/creack/pty/util_solaris.go b/vendor/github.com/creack/pty/util_solaris.go new file mode 100644 index 000000000..8f9731c63 --- /dev/null +++ b/vendor/github.com/creack/pty/util_solaris.go @@ -0,0 +1,52 @@ +// + +package pty + +import ( + "os" + + "golang.org/x/sys/unix" +) + +const ( + TIOCGWINSZ = 21608 // 'T' << 8 | 104 + TIOCSWINSZ = 21607 // 'T' << 8 | 103 +) + +// Winsize describes the terminal size. +type Winsize struct { + Rows uint16 // ws_row: Number of rows (in cells) + Cols uint16 // ws_col: Number of columns (in cells) + X uint16 // ws_xpixel: Width in pixels + Y uint16 // ws_ypixel: Height in pixels +} + +// GetsizeFull returns the full terminal size description. +func GetsizeFull(t *os.File) (size *Winsize, err error) { + var wsz *unix.Winsize + wsz, err = unix.IoctlGetWinsize(int(t.Fd()), TIOCGWINSZ) + + if err != nil { + return nil, err + } else { + return &Winsize{wsz.Row, wsz.Col, wsz.Xpixel, wsz.Ypixel}, nil + } +} + +// Get Windows Size +func Getsize(t *os.File) (rows, cols int, err error) { + var wsz *unix.Winsize + wsz, err = unix.IoctlGetWinsize(int(t.Fd()), TIOCGWINSZ) + + if err != nil { + return 80, 25, err + } else { + return int(wsz.Row), int(wsz.Col), nil + } +} + +// Setsize resizes t to s. +func Setsize(t *os.File, ws *Winsize) error { + wsz := unix.Winsize{ws.Rows, ws.Cols, ws.X, ws.Y} + return unix.IoctlSetWinsize(int(t.Fd()), TIOCSWINSZ, &wsz) +} diff --git a/vendor/github.com/creack/pty/ztypes_386.go b/vendor/github.com/creack/pty/ztypes_386.go new file mode 100644 index 000000000..ff0b8fd83 --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_386.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/creack/pty/ztypes_amd64.go b/vendor/github.com/creack/pty/ztypes_amd64.go new file mode 100644 index 000000000..ff0b8fd83 --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_amd64.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/creack/pty/ztypes_arm.go b/vendor/github.com/creack/pty/ztypes_arm.go new file mode 100644 index 000000000..ff0b8fd83 --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_arm.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/creack/pty/ztypes_arm64.go b/vendor/github.com/creack/pty/ztypes_arm64.go new file mode 100644 index 000000000..6c29a4b91 --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_arm64.go @@ -0,0 +1,11 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +// +build arm64 + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go b/vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go new file mode 100644 index 000000000..6b0ba037f --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_dragonfly_amd64.go @@ -0,0 +1,14 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_dragonfly.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Name *byte + Len uint32 + Pad_cgo_0 [4]byte +} diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_386.go b/vendor/github.com/creack/pty/ztypes_freebsd_386.go new file mode 100644 index 000000000..d9975374e --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_freebsd_386.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_amd64.go b/vendor/github.com/creack/pty/ztypes_freebsd_amd64.go new file mode 100644 index 000000000..5fa102fcd --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_freebsd_amd64.go @@ -0,0 +1,14 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Pad_cgo_0 [4]byte + Buf *byte +} diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_arm.go b/vendor/github.com/creack/pty/ztypes_freebsd_arm.go new file mode 100644 index 000000000..d9975374e --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_freebsd_arm.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/vendor/github.com/creack/pty/ztypes_freebsd_arm64.go b/vendor/github.com/creack/pty/ztypes_freebsd_arm64.go new file mode 100644 index 000000000..4418139b2 --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_freebsd_arm64.go @@ -0,0 +1,13 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0xff +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/vendor/github.com/creack/pty/ztypes_mipsx.go b/vendor/github.com/creack/pty/ztypes_mipsx.go new file mode 100644 index 000000000..f0ce74086 --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_mipsx.go @@ -0,0 +1,12 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +// +build linux +// +build mips mipsle mips64 mips64le + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/creack/pty/ztypes_openbsd_386.go b/vendor/github.com/creack/pty/ztypes_openbsd_386.go new file mode 100644 index 000000000..e67051688 --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_openbsd_386.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package pty + +type ptmget struct { + Cfd int32 + Sfd int32 + Cn [16]int8 + Sn [16]int8 +} + +var ioctl_PTMGET = 0x40287401 diff --git a/vendor/github.com/creack/pty/ztypes_openbsd_amd64.go b/vendor/github.com/creack/pty/ztypes_openbsd_amd64.go new file mode 100644 index 000000000..e67051688 --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_openbsd_amd64.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package pty + +type ptmget struct { + Cfd int32 + Sfd int32 + Cn [16]int8 + Sn [16]int8 +} + +var ioctl_PTMGET = 0x40287401 diff --git a/vendor/github.com/creack/pty/ztypes_ppc64.go b/vendor/github.com/creack/pty/ztypes_ppc64.go new file mode 100644 index 000000000..4e1af8431 --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_ppc64.go @@ -0,0 +1,11 @@ +// +build ppc64 + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/creack/pty/ztypes_ppc64le.go b/vendor/github.com/creack/pty/ztypes_ppc64le.go new file mode 100644 index 000000000..e6780f4e2 --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_ppc64le.go @@ -0,0 +1,11 @@ +// +build ppc64le + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/creack/pty/ztypes_riscvx.go b/vendor/github.com/creack/pty/ztypes_riscvx.go new file mode 100644 index 000000000..99eec8ecb --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_riscvx.go @@ -0,0 +1,11 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types.go + +// +build riscv riscv64 + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/creack/pty/ztypes_s390x.go b/vendor/github.com/creack/pty/ztypes_s390x.go new file mode 100644 index 000000000..a7452b61c --- /dev/null +++ b/vendor/github.com/creack/pty/ztypes_s390x.go @@ -0,0 +1,11 @@ +// +build s390x + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 000000000..bc52e96f2 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 000000000..792994785 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 000000000..205c28d68 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 000000000..1be8ce945 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 000000000..2e3d22f31 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 000000000..aacaac6f1 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 000000000..f78d89fc1 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 000000000..b04edb7d7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 000000000..32c0e3388 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/go-chi/chi/.gitignore b/vendor/github.com/go-chi/chi/.gitignore new file mode 100644 index 000000000..ba22c99a9 --- /dev/null +++ b/vendor/github.com/go-chi/chi/.gitignore @@ -0,0 +1,3 @@ +.idea +*.sw? +.vscode diff --git a/vendor/github.com/go-chi/chi/.travis.yml b/vendor/github.com/go-chi/chi/.travis.yml new file mode 100644 index 000000000..7b8e26bce --- /dev/null +++ b/vendor/github.com/go-chi/chi/.travis.yml @@ -0,0 +1,20 @@ +language: go + +go: + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + +script: + - go get -d -t ./... + - go vet ./... + - go test ./... + - > + go_version=$(go version); + if [ ${go_version:13:4} = "1.12" ]; then + go get -u golang.org/x/tools/cmd/goimports; + goimports -d -e ./ | grep '.*' && { echo; echo "Aborting due to non-empty goimports output."; exit 1; } || :; + fi + diff --git a/vendor/github.com/go-chi/chi/CHANGELOG.md b/vendor/github.com/go-chi/chi/CHANGELOG.md new file mode 100644 index 000000000..9a64a72ee --- /dev/null +++ b/vendor/github.com/go-chi/chi/CHANGELOG.md @@ -0,0 +1,190 @@ +# Changelog + +## v4.1.2 (2020-06-02) + +- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution +- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2 + + +## v4.1.1 (2020-04-16) + +- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp + route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix! +- new middleware.RouteHeaders as a simple router for request headers with wildcard support +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1 + + +## v4.1.0 (2020-04-1) + +- middleware.LogEntry: Write method on interface now passes the response header + and an extra interface type useful for custom logger implementations. +- middleware.WrapResponseWriter: minor fix +- middleware.Recoverer: a bit prettier +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0 + + +## v4.0.4 (2020-03-24) + +- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496) +- a few minor improvements and fixes +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4 + + +## v4.0.3 (2020-01-09) + +- core: fix regexp routing to include default value when param is not matched +- middleware: rewrite of middleware.Compress +- middleware: suppress http.ErrAbortHandler in middleware.Recoverer +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3 + + +## v4.0.2 (2019-02-26) + +- Minor fixes +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2 + + +## v4.0.1 (2019-01-21) + +- Fixes issue with compress middleware: #382 #385 +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1 + + +## v4.0.0 (2019-01-10) + +- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8 +- router: respond with 404 on router with no routes (#362) +- router: additional check to ensure wildcard is at the end of a url pattern (#333) +- middleware: deprecate use of http.CloseNotifier (#347) +- middleware: fix RedirectSlashes to include query params on redirect (#334) +- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0 + + +## v3.3.4 (2019-01-07) + +- Minor middleware improvements. No changes to core library/router. Moving v3 into its +- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11 +- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4 + + +## v3.3.3 (2018-08-27) + +- Minor release +- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3 + + +## v3.3.2 (2017-12-22) + +- Support to route trailing slashes on mounted sub-routers (#281) +- middleware: new `ContentCharset` to check matching charsets. Thank you + @csucu for your community contribution! + + +## v3.3.1 (2017-11-20) + +- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types +- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value +- Minor bug fixes + + +## v3.3.0 (2017-10-10) + +- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage +- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function + + +## v3.2.1 (2017-08-31) + +- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface + and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path +- Add new `RouteMethod` to `*Context` +- Add new `Routes` pointer to `*Context` +- Add new `middleware.GetHead` to route missing HEAD requests to GET handler +- Updated benchmarks (see README) + + +## v3.1.5 (2017-08-02) + +- Setup golint and go vet for the project +- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler` + to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler` + + +## v3.1.0 (2017-07-10) + +- Fix a few minor issues after v3 release +- Move `docgen` sub-pkg to https://github.com/go-chi/docgen +- Move `render` sub-pkg to https://github.com/go-chi/render +- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime + suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in + https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage. + + +## v3.0.0 (2017-06-21) + +- Major update to chi library with many exciting updates, but also some *breaking changes* +- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as + `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the + same router +- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example: + `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")` +- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as + `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like + in `_examples/custom-handler` +- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their + own using file handler with the stdlib, see `_examples/fileserver` for an example +- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()` +- Moved the chi project to its own organization, to allow chi-related community packages to + be easily discovered and supported, at: https://github.com/go-chi +- *NOTE:* please update your import paths to `"github.com/go-chi/chi"` +- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2 + + +## v2.1.0 (2017-03-30) + +- Minor improvements and update to the chi core library +- Introduced a brand new `chi/render` sub-package to complete the story of building + APIs to offer a pattern for managing well-defined request / response payloads. Please + check out the updated `_examples/rest` example for how it works. +- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface + + +## v2.0.0 (2017-01-06) + +- After many months of v2 being in an RC state with many companies and users running it in + production, the inclusion of some improvements to the middlewares, we are very pleased to + announce v2.0.0 of chi. + + +## v2.0.0-rc1 (2016-07-26) + +- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular + community `"net/context"` package has been included in the standard library as `"context"` and + utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other + request-scoped values. We're very excited about the new context addition and are proud to + introduce chi v2, a minimal and powerful routing package for building large HTTP services, + with zero external dependencies. Chi focuses on idiomatic design and encourages the use of + stdlib HTTP handlers and middlwares. +- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc` +- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()` +- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`, + which provides direct access to URL routing parameters, the routing path and the matching + routing patterns. +- Users upgrading from chi v1 to v2, need to: + 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to + the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)` + 2. Use `chi.URLParam(r *http.Request, paramKey string) string` + or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value + + +## v1.0.0 (2016-07-01) + +- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older. + + +## v0.9.0 (2016-03-31) + +- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33) +- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters + has changed to: `chi.URLParam(ctx, "id")` diff --git a/vendor/github.com/go-chi/chi/CONTRIBUTING.md b/vendor/github.com/go-chi/chi/CONTRIBUTING.md new file mode 100644 index 000000000..c0ac2dfe8 --- /dev/null +++ b/vendor/github.com/go-chi/chi/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing + +## Prerequisites + +1. [Install Go][go-install]. +2. Download the sources and switch the working directory: + + ```bash + go get -u -d github.com/go-chi/chi + cd $GOPATH/src/github.com/go-chi/chi + ``` + +## Submitting a Pull Request + +A typical workflow is: + +1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip] +2. [Create a topic branch.][branch] +3. Add tests for your change. +4. Run `go test`. If your tests pass, return to the step 3. +5. Implement the change and ensure the steps from the previous step pass. +6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline. +7. [Add, commit and push your changes.][git-help] +8. [Submit a pull request.][pull-req] + +[go-install]: https://golang.org/doc/install +[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html +[fork]: https://help.github.com/articles/fork-a-repo +[branch]: http://learn.github.com/p/branching.html +[git-help]: https://guides.github.com +[pull-req]: https://help.github.com/articles/using-pull-requests diff --git a/vendor/github.com/go-chi/chi/LICENSE b/vendor/github.com/go-chi/chi/LICENSE new file mode 100644 index 000000000..d99f02ffa --- /dev/null +++ b/vendor/github.com/go-chi/chi/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-chi/chi/README.md b/vendor/github.com/go-chi/chi/README.md new file mode 100644 index 000000000..5a8fc9d09 --- /dev/null +++ b/vendor/github.com/go-chi/chi/README.md @@ -0,0 +1,441 @@ +# chi + + +[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis] + +`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's +especially good at helping you write large REST API services that are kept maintainable as your +project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to +handle signaling, cancelation and request-scoped values across a handler chain. + +The focus of the project has been to seek out an elegant and comfortable design for writing +REST API servers, written during the development of the Pressly API service that powers our +public API service, which in turn powers all of our client-side applications. + +The key considerations of chi's design are: project structure, maintainability, standard http +handlers (stdlib-only), developer productivity, and deconstructing a large system into many small +parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also +included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too! + +## Install + +`go get -u github.com/go-chi/chi` + + +## Features + +* **Lightweight** - cloc'd in ~1000 LOC for the chi router +* **Fast** - yes, see [benchmarks](#benchmarks) +* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http` +* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and subrouter mounting +* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts +* **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91)) +* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown +* **No external dependencies** - plain ol' Go stdlib + net/http + + +## Examples + +See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples. + + +**As easy as:** + +```go +package main + +import ( + "net/http" + + "github.com/go-chi/chi" + "github.com/go-chi/chi/middleware" +) + +func main() { + r := chi.NewRouter() + r.Use(middleware.Logger) + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("welcome")) + }) + http.ListenAndServe(":3000", r) +} +``` + +**REST Preview:** + +Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs +in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in +Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)). + +I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed +above, they will show you all the features of chi and serve as a good form of documentation. + +```go +import ( + //... + "context" + "github.com/go-chi/chi" + "github.com/go-chi/chi/middleware" +) + +func main() { + r := chi.NewRouter() + + // A good base middleware stack + r.Use(middleware.RequestID) + r.Use(middleware.RealIP) + r.Use(middleware.Logger) + r.Use(middleware.Recoverer) + + // Set a timeout value on the request context (ctx), that will signal + // through ctx.Done() that the request has timed out and further + // processing should be stopped. + r.Use(middleware.Timeout(60 * time.Second)) + + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hi")) + }) + + // RESTy routes for "articles" resource + r.Route("/articles", func(r chi.Router) { + r.With(paginate).Get("/", listArticles) // GET /articles + r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017 + + r.Post("/", createArticle) // POST /articles + r.Get("/search", searchArticles) // GET /articles/search + + // Regexp url parameters: + r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto + + // Subrouters: + r.Route("/{articleID}", func(r chi.Router) { + r.Use(ArticleCtx) + r.Get("/", getArticle) // GET /articles/123 + r.Put("/", updateArticle) // PUT /articles/123 + r.Delete("/", deleteArticle) // DELETE /articles/123 + }) + }) + + // Mount the admin sub-router + r.Mount("/admin", adminRouter()) + + http.ListenAndServe(":3333", r) +} + +func ArticleCtx(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + articleID := chi.URLParam(r, "articleID") + article, err := dbGetArticle(articleID) + if err != nil { + http.Error(w, http.StatusText(404), 404) + return + } + ctx := context.WithValue(r.Context(), "article", article) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func getArticle(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + article, ok := ctx.Value("article").(*Article) + if !ok { + http.Error(w, http.StatusText(422), 422) + return + } + w.Write([]byte(fmt.Sprintf("title:%s", article.Title))) +} + +// A completely separate router for administrator routes +func adminRouter() http.Handler { + r := chi.NewRouter() + r.Use(AdminOnly) + r.Get("/", adminIndex) + r.Get("/accounts", adminListAccounts) + return r +} + +func AdminOnly(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + perm, ok := ctx.Value("acl.permission").(YourPermissionType) + if !ok || !perm.IsAdmin() { + http.Error(w, http.StatusText(403), 403) + return + } + next.ServeHTTP(w, r) + }) +} +``` + + +## Router design + +chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree). +The router is fully compatible with `net/http`. + +Built on top of the tree is the `Router` interface: + +```go +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one or more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern`` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // Method and MethodFunc adds routes for `pattern` that matches + // the `method` HTTP method. + Method(method, pattern string, h http.Handler) + MethodFunc(method, pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) + + // MethodNotAllowed defines a handler to respond whenever a method is + // not allowed. + MethodNotAllowed(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the github.com/go-chi/docgen package to generate documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares + + // Match searches the routing tree for a handler that matches + // the method/path - similar to routing a http request, but without + // executing the handler thereafter. + Match(rctx *Context, method, path string) bool +} +``` + +Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern +supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters +can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters +and `chi.URLParam(r, "*")` for a wildcard parameter. + + +### Middleware handlers + +chi's middlewares are just stdlib net/http middleware handlers. There is nothing special +about them, which means the router and all the tooling is designed to be compatible and +friendly with any middleware in the community. This offers much better extensibility and reuse +of packages and is at the heart of chi's purpose. + +Here is an example of a standard net/http middleware handler using the new request context +available in Go. This middleware sets a hypothetical user identifier on the request +context and calls the next handler in the chain. + +```go +// HTTP middleware setting a value on the request context +func MyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithValue(r.Context(), "user", "123") + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} +``` + + +### Request handlers + +chi uses standard net/http request handlers. This little snippet is an example of a http.Handler +func that reads a user identifier from the request context - hypothetically, identifying +the user sending an authenticated request, validated+set by a previous middleware handler. + +```go +// HTTP handler accessing data from the request context. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + user := r.Context().Value("user").(string) + w.Write([]byte(fmt.Sprintf("hi %s", user))) +} +``` + + +### URL parameters + +chi's router parses and stores URL parameters right onto the request context. Here is +an example of how to access URL params in your net/http handlers. And of course, middlewares +are able to access the same information. + +```go +// HTTP handler accessing the url routing parameters. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + userID := chi.URLParam(r, "userID") // from a route like /users/{userID} + + ctx := r.Context() + key := ctx.Value("key").(string) + + w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key))) +} +``` + + +## Middlewares + +chi comes equipped with an optional `middleware` package, providing a suite of standard +`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible +with `net/http` can be used with chi's mux. + +### Core middlewares + +----------------------------------------------------------------------------------------------------------- +| chi/middleware Handler | description | +|:----------------------|:--------------------------------------------------------------------------------- +| AllowContentType | Explicit whitelist of accepted request Content-Types | +| BasicAuth | Basic HTTP authentication | +| Compress | Gzip compression for clients that accept compressed responses | +| GetHead | Automatically route undefined HEAD requests to GET handlers | +| Heartbeat | Monitoring endpoint to check the servers pulse | +| Logger | Logs the start and end of each request with the elapsed processing time | +| NoCache | Sets response headers to prevent clients from caching | +| Profiler | Easily attach net/http/pprof to your routers | +| RealIP | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP | +| Recoverer | Gracefully absorb panics and prints the stack trace | +| RequestID | Injects a request ID into the context of each request | +| RedirectSlashes | Redirect slashes on routing paths | +| SetHeader | Short-hand middleware to set a response header key/value | +| StripSlashes | Strip slashes on routing paths | +| Throttle | Puts a ceiling on the number of concurrent requests | +| Timeout | Signals to the request context when the timeout deadline is reached | +| URLFormat | Parse extension from url and put it on request context | +| WithValue | Short-hand middleware to set a key/value on the request context | +----------------------------------------------------------------------------------------------------------- + +### Extra middlewares & packages + +Please see https://github.com/go-chi for additional packages. + +-------------------------------------------------------------------------------------------------------------------- +| package | description | +|:---------------------------------------------------|:------------------------------------------------------------- +| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) | +| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime | +| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication | +| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing | +| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging | +| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter | +| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library | +| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources | +| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer | +-------------------------------------------------------------------------------------------------------------------- + +please [submit a PR](./CONTRIBUTING.md) if you'd like to include a link to a chi-compatible middleware + + +## context? + +`context` is a tiny pkg that provides simple interface to signal context across call stacks +and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani) +and is available in stdlib since go1.7. + +Learn more at https://blog.golang.org/context + +and.. +* Docs: https://golang.org/pkg/context +* Source: https://github.com/golang/go/tree/master/src/context + + +## Benchmarks + +The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark + +Results as of Jan 9, 2019 with Go 1.11.4 on Linux X1 Carbon laptop + +```shell +BenchmarkChi_Param 3000000 475 ns/op 432 B/op 3 allocs/op +BenchmarkChi_Param5 2000000 696 ns/op 432 B/op 3 allocs/op +BenchmarkChi_Param20 1000000 1275 ns/op 432 B/op 3 allocs/op +BenchmarkChi_ParamWrite 3000000 505 ns/op 432 B/op 3 allocs/op +BenchmarkChi_GithubStatic 3000000 508 ns/op 432 B/op 3 allocs/op +BenchmarkChi_GithubParam 2000000 669 ns/op 432 B/op 3 allocs/op +BenchmarkChi_GithubAll 10000 134627 ns/op 87699 B/op 609 allocs/op +BenchmarkChi_GPlusStatic 3000000 402 ns/op 432 B/op 3 allocs/op +BenchmarkChi_GPlusParam 3000000 500 ns/op 432 B/op 3 allocs/op +BenchmarkChi_GPlus2Params 3000000 586 ns/op 432 B/op 3 allocs/op +BenchmarkChi_GPlusAll 200000 7237 ns/op 5616 B/op 39 allocs/op +BenchmarkChi_ParseStatic 3000000 408 ns/op 432 B/op 3 allocs/op +BenchmarkChi_ParseParam 3000000 488 ns/op 432 B/op 3 allocs/op +BenchmarkChi_Parse2Params 3000000 551 ns/op 432 B/op 3 allocs/op +BenchmarkChi_ParseAll 100000 13508 ns/op 11232 B/op 78 allocs/op +BenchmarkChi_StaticAll 20000 81933 ns/op 67826 B/op 471 allocs/op +``` + +Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc + +NOTE: the allocs in the benchmark above are from the calls to http.Request's +`WithContext(context.Context)` method that clones the http.Request, sets the `Context()` +on the duplicated (alloc'd) request and returns it the new request object. This is just +how setting context on a request in Go works. + + +## Credits + +* Carl Jackson for https://github.com/zenazn/goji + * Parts of chi's thinking comes from goji, and chi's middleware package + sources from goji. +* Armon Dadgar for https://github.com/armon/go-radix +* Contributions: [@VojtechVitek](https://github.com/VojtechVitek) + +We'll be more than happy to see [your contributions](./CONTRIBUTING.md)! + + +## Beyond REST + +chi is just a http router that lets you decompose request handling into many smaller layers. +Many companies use chi to write REST services for their public APIs. But, REST is just a convention +for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server +system or network of microservices. + +Looking beyond REST, I also recommend some newer works in the field: +* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen +* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs +* [graphql](https://github.com/99designs/gqlgen) - Declarative query language +* [NATS](https://nats.io) - lightweight pub-sub + + +## License + +Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka) + +Licensed under [MIT License](./LICENSE) + +[GoDoc]: https://godoc.org/github.com/go-chi/chi +[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg +[Travis]: https://travis-ci.org/go-chi/chi +[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master diff --git a/vendor/github.com/go-chi/chi/chain.go b/vendor/github.com/go-chi/chi/chain.go new file mode 100644 index 000000000..88e684613 --- /dev/null +++ b/vendor/github.com/go-chi/chi/chain.go @@ -0,0 +1,49 @@ +package chi + +import "net/http" + +// Chain returns a Middlewares type from a slice of middleware handlers. +func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares { + return Middlewares(middlewares) +} + +// Handler builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) Handler(h http.Handler) http.Handler { + return &ChainHandler{mws, h, chain(mws, h)} +} + +// HandlerFunc builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler { + return &ChainHandler{mws, h, chain(mws, h)} +} + +// ChainHandler is a http.Handler with support for handler composition and +// execution. +type ChainHandler struct { + Middlewares Middlewares + Endpoint http.Handler + chain http.Handler +} + +func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + c.chain.ServeHTTP(w, r) +} + +// chain builds a http.Handler composed of an inline middleware stack and endpoint +// handler in the order they are passed. +func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler { + // Return ahead of time if there aren't any middlewares for the chain + if len(middlewares) == 0 { + return endpoint + } + + // Wrap the end handler with the middleware chain + h := middlewares[len(middlewares)-1](endpoint) + for i := len(middlewares) - 2; i >= 0; i-- { + h = middlewares[i](h) + } + + return h +} diff --git a/vendor/github.com/go-chi/chi/chi.go b/vendor/github.com/go-chi/chi/chi.go new file mode 100644 index 000000000..b7063dc29 --- /dev/null +++ b/vendor/github.com/go-chi/chi/chi.go @@ -0,0 +1,134 @@ +// +// Package chi is a small, idiomatic and composable router for building HTTP services. +// +// chi requires Go 1.10 or newer. +// +// Example: +// package main +// +// import ( +// "net/http" +// +// "github.com/go-chi/chi" +// "github.com/go-chi/chi/middleware" +// ) +// +// func main() { +// r := chi.NewRouter() +// r.Use(middleware.Logger) +// r.Use(middleware.Recoverer) +// +// r.Get("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("root.")) +// }) +// +// http.ListenAndServe(":3333", r) +// } +// +// See github.com/go-chi/chi/_examples/ for more in-depth examples. +// +// URL patterns allow for easy matching of path components in HTTP +// requests. The matching components can then be accessed using +// chi.URLParam(). All patterns must begin with a slash. +// +// A simple named placeholder {name} matches any sequence of characters +// up to the next / or the end of the URL. Trailing slashes on paths must +// be handled explicitly. +// +// A placeholder with a name followed by a colon allows a regular +// expression match, for example {number:\\d+}. The regular expression +// syntax is Go's normal regexp RE2 syntax, except that regular expressions +// including { or } are not supported, and / will never be +// matched. An anonymous regexp pattern is allowed, using an empty string +// before the colon in the placeholder, such as {:\\d+} +// +// The special placeholder of asterisk matches the rest of the requested +// URL. Any trailing characters in the pattern are ignored. This is the only +// placeholder which will match / characters. +// +// Examples: +// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" +// "/user/{name}/info" matches "/user/jsmith/info" +// "/page/*" matches "/page/intro/latest" +// "/page/*/index" also matches "/page/intro/latest" +// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" +// +package chi + +import "net/http" + +// NewRouter returns a new Mux object that implements the Router interface. +func NewRouter() *Mux { + return NewMux() +} + +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one or more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern`` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // Method and MethodFunc adds routes for `pattern` that matches + // the `method` HTTP method. + Method(method, pattern string, h http.Handler) + MethodFunc(method, pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) + + // MethodNotAllowed defines a handler to respond whenever a method is + // not allowed. + MethodNotAllowed(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the `docgen` subpackage to generation documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares + + // Match searches the routing tree for a handler that matches + // the method/path - similar to routing a http request, but without + // executing the handler thereafter. + Match(rctx *Context, method, path string) bool +} + +// Middlewares type is a slice of standard middleware handlers with methods +// to compose middleware chains and http.Handler's. +type Middlewares []func(http.Handler) http.Handler diff --git a/vendor/github.com/go-chi/chi/context.go b/vendor/github.com/go-chi/chi/context.go new file mode 100644 index 000000000..26c609ea2 --- /dev/null +++ b/vendor/github.com/go-chi/chi/context.go @@ -0,0 +1,172 @@ +package chi + +import ( + "context" + "net" + "net/http" + "strings" +) + +// URLParam returns the url parameter from a http.Request object. +func URLParam(r *http.Request, key string) string { + if rctx := RouteContext(r.Context()); rctx != nil { + return rctx.URLParam(key) + } + return "" +} + +// URLParamFromCtx returns the url parameter from a http.Request Context. +func URLParamFromCtx(ctx context.Context, key string) string { + if rctx := RouteContext(ctx); rctx != nil { + return rctx.URLParam(key) + } + return "" +} + +// RouteContext returns chi's routing Context object from a +// http.Request Context. +func RouteContext(ctx context.Context) *Context { + val, _ := ctx.Value(RouteCtxKey).(*Context) + return val +} + +// ServerBaseContext wraps an http.Handler to set the request context to the +// `baseCtx`. +func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler { + fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + baseCtx := baseCtx + + // Copy over default net/http server context keys + if v, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok { + baseCtx = context.WithValue(baseCtx, http.ServerContextKey, v) + } + if v, ok := ctx.Value(http.LocalAddrContextKey).(net.Addr); ok { + baseCtx = context.WithValue(baseCtx, http.LocalAddrContextKey, v) + } + + h.ServeHTTP(w, r.WithContext(baseCtx)) + }) + return fn +} + +// NewRouteContext returns a new routing Context object. +func NewRouteContext() *Context { + return &Context{} +} + +var ( + // RouteCtxKey is the context.Context key to store the request context. + RouteCtxKey = &contextKey{"RouteContext"} +) + +// Context is the default routing context set on the root node of a +// request context to track route patterns, URL parameters and +// an optional routing path. +type Context struct { + Routes Routes + + // Routing path/method override used during the route search. + // See Mux#routeHTTP method. + RoutePath string + RouteMethod string + + // Routing pattern stack throughout the lifecycle of the request, + // across all connected routers. It is a record of all matching + // patterns across a stack of sub-routers. + RoutePatterns []string + + // URLParams are the stack of routeParams captured during the + // routing lifecycle across a stack of sub-routers. + URLParams RouteParams + + // The endpoint routing pattern that matched the request URI path + // or `RoutePath` of the current sub-router. This value will update + // during the lifecycle of a request passing through a stack of + // sub-routers. + routePattern string + + // Route parameters matched for the current sub-router. It is + // intentionally unexported so it cant be tampered. + routeParams RouteParams + + // methodNotAllowed hint + methodNotAllowed bool +} + +// Reset a routing context to its initial state. +func (x *Context) Reset() { + x.Routes = nil + x.RoutePath = "" + x.RouteMethod = "" + x.RoutePatterns = x.RoutePatterns[:0] + x.URLParams.Keys = x.URLParams.Keys[:0] + x.URLParams.Values = x.URLParams.Values[:0] + + x.routePattern = "" + x.routeParams.Keys = x.routeParams.Keys[:0] + x.routeParams.Values = x.routeParams.Values[:0] + x.methodNotAllowed = false +} + +// URLParam returns the corresponding URL parameter value from the request +// routing context. +func (x *Context) URLParam(key string) string { + for k := len(x.URLParams.Keys) - 1; k >= 0; k-- { + if x.URLParams.Keys[k] == key { + return x.URLParams.Values[k] + } + } + return "" +} + +// RoutePattern builds the routing pattern string for the particular +// request, at the particular point during routing. This means, the value +// will change throughout the execution of a request in a router. That is +// why its advised to only use this value after calling the next handler. +// +// For example, +// +// func Instrument(next http.Handler) http.Handler { +// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// next.ServeHTTP(w, r) +// routePattern := chi.RouteContext(r.Context()).RoutePattern() +// measure(w, r, routePattern) +// }) +// } +func (x *Context) RoutePattern() string { + routePattern := strings.Join(x.RoutePatterns, "") + return replaceWildcards(routePattern) +} + +// replaceWildcards takes a route pattern and recursively replaces all +// occurrences of "/*/" to "/". +func replaceWildcards(p string) string { + if strings.Contains(p, "/*/") { + return replaceWildcards(strings.Replace(p, "/*/", "/", -1)) + } + + return p +} + +// RouteParams is a structure to track URL routing parameters efficiently. +type RouteParams struct { + Keys, Values []string +} + +// Add will append a URL parameter to the end of the route param +func (s *RouteParams) Add(key, value string) { + s.Keys = append(s.Keys, key) + s.Values = append(s.Values, value) +} + +// contextKey is a value for use with context.WithValue. It's used as +// a pointer so it fits in an interface{} without allocation. This technique +// for defining context keys was copied from Go 1.7's new use of context in net/http. +type contextKey struct { + name string +} + +func (k *contextKey) String() string { + return "chi context value " + k.name +} diff --git a/vendor/github.com/go-chi/chi/middleware/basic_auth.go b/vendor/github.com/go-chi/chi/middleware/basic_auth.go new file mode 100644 index 000000000..87b2641a6 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/basic_auth.go @@ -0,0 +1,32 @@ +package middleware + +import ( + "fmt" + "net/http" +) + +// BasicAuth implements a simple middleware handler for adding basic http auth to a route. +func BasicAuth(realm string, creds map[string]string) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + user, pass, ok := r.BasicAuth() + if !ok { + basicAuthFailed(w, realm) + return + } + + credPass, credUserOk := creds[user] + if !credUserOk || pass != credPass { + basicAuthFailed(w, realm) + return + } + + next.ServeHTTP(w, r) + }) + } +} + +func basicAuthFailed(w http.ResponseWriter, realm string) { + w.Header().Add("WWW-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm)) + w.WriteHeader(http.StatusUnauthorized) +} diff --git a/vendor/github.com/go-chi/chi/middleware/compress.go b/vendor/github.com/go-chi/chi/middleware/compress.go new file mode 100644 index 000000000..2f40cc15a --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/compress.go @@ -0,0 +1,399 @@ +package middleware + +import ( + "bufio" + "compress/flate" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "strings" + "sync" +) + +var defaultCompressibleContentTypes = []string{ + "text/html", + "text/css", + "text/plain", + "text/javascript", + "application/javascript", + "application/x-javascript", + "application/json", + "application/atom+xml", + "application/rss+xml", + "image/svg+xml", +} + +// Compress is a middleware that compresses response +// body of a given content types to a data format based +// on Accept-Encoding request header. It uses a given +// compression level. +// +// NOTE: make sure to set the Content-Type header on your response +// otherwise this middleware will not compress the response body. For ex, in +// your handler you should set w.Header().Set("Content-Type", http.DetectContentType(yourBody)) +// or set it manually. +// +// Passing a compression level of 5 is sensible value +func Compress(level int, types ...string) func(next http.Handler) http.Handler { + compressor := NewCompressor(level, types...) + return compressor.Handler +} + +// Compressor represents a set of encoding configurations. +type Compressor struct { + level int // The compression level. + // The mapping of encoder names to encoder functions. + encoders map[string]EncoderFunc + // The mapping of pooled encoders to pools. + pooledEncoders map[string]*sync.Pool + // The set of content types allowed to be compressed. + allowedTypes map[string]struct{} + allowedWildcards map[string]struct{} + // The list of encoders in order of decreasing precedence. + encodingPrecedence []string +} + +// NewCompressor creates a new Compressor that will handle encoding responses. +// +// The level should be one of the ones defined in the flate package. +// The types are the content types that are allowed to be compressed. +func NewCompressor(level int, types ...string) *Compressor { + // If types are provided, set those as the allowed types. If none are + // provided, use the default list. + allowedTypes := make(map[string]struct{}) + allowedWildcards := make(map[string]struct{}) + if len(types) > 0 { + for _, t := range types { + if strings.Contains(strings.TrimSuffix(t, "/*"), "*") { + panic(fmt.Sprintf("middleware/compress: Unsupported content-type wildcard pattern '%s'. Only '/*' supported", t)) + } + if strings.HasSuffix(t, "/*") { + allowedWildcards[strings.TrimSuffix(t, "/*")] = struct{}{} + } else { + allowedTypes[t] = struct{}{} + } + } + } else { + for _, t := range defaultCompressibleContentTypes { + allowedTypes[t] = struct{}{} + } + } + + c := &Compressor{ + level: level, + encoders: make(map[string]EncoderFunc), + pooledEncoders: make(map[string]*sync.Pool), + allowedTypes: allowedTypes, + allowedWildcards: allowedWildcards, + } + + // Set the default encoders. The precedence order uses the reverse + // ordering that the encoders were added. This means adding new encoders + // will move them to the front of the order. + // + // TODO: + // lzma: Opera. + // sdch: Chrome, Android. Gzip output + dictionary header. + // br: Brotli, see https://github.com/go-chi/chi/pull/326 + + // HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951) + // wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32 + // checksum compared to CRC-32 used in "gzip" and thus is faster. + // + // But.. some old browsers (MSIE, Safari 5.1) incorrectly expect + // raw DEFLATE data only, without the mentioned zlib wrapper. + // Because of this major confusion, most modern browsers try it + // both ways, first looking for zlib headers. + // Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548 + // + // The list of browsers having problems is quite big, see: + // http://zoompf.com/blog/2012/02/lose-the-wait-http-compression + // https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results + // + // That's why we prefer gzip over deflate. It's just more reliable + // and not significantly slower than gzip. + c.SetEncoder("deflate", encoderDeflate) + + // TODO: Exception for old MSIE browsers that can't handle non-HTML? + // https://zoompf.com/blog/2012/02/lose-the-wait-http-compression + c.SetEncoder("gzip", encoderGzip) + + // NOTE: Not implemented, intentionally: + // case "compress": // LZW. Deprecated. + // case "bzip2": // Too slow on-the-fly. + // case "zopfli": // Too slow on-the-fly. + // case "xz": // Too slow on-the-fly. + return c +} + +// SetEncoder can be used to set the implementation of a compression algorithm. +// +// The encoding should be a standardised identifier. See: +// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding +// +// For example, add the Brotli algortithm: +// +// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc" +// +// compressor := middleware.NewCompressor(5, "text/html") +// compressor.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer { +// params := brotli_enc.NewBrotliParams() +// params.SetQuality(level) +// return brotli_enc.NewBrotliWriter(params, w) +// }) +func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) { + encoding = strings.ToLower(encoding) + if encoding == "" { + panic("the encoding can not be empty") + } + if fn == nil { + panic("attempted to set a nil encoder function") + } + + // If we are adding a new encoder that is already registered, we have to + // clear that one out first. + if _, ok := c.pooledEncoders[encoding]; ok { + delete(c.pooledEncoders, encoding) + } + if _, ok := c.encoders[encoding]; ok { + delete(c.encoders, encoding) + } + + // If the encoder supports Resetting (IoReseterWriter), then it can be pooled. + encoder := fn(ioutil.Discard, c.level) + if encoder != nil { + if _, ok := encoder.(ioResetterWriter); ok { + pool := &sync.Pool{ + New: func() interface{} { + return fn(ioutil.Discard, c.level) + }, + } + c.pooledEncoders[encoding] = pool + } + } + // If the encoder is not in the pooledEncoders, add it to the normal encoders. + if _, ok := c.pooledEncoders[encoding]; !ok { + c.encoders[encoding] = fn + } + + for i, v := range c.encodingPrecedence { + if v == encoding { + c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...) + } + } + + c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...) +} + +// Handler returns a new middleware that will compress the response based on the +// current Compressor. +func (c *Compressor) Handler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + encoder, encoding, cleanup := c.selectEncoder(r.Header, w) + + cw := &compressResponseWriter{ + ResponseWriter: w, + w: w, + contentTypes: c.allowedTypes, + contentWildcards: c.allowedWildcards, + encoding: encoding, + compressable: false, // determined in post-handler + } + if encoder != nil { + cw.w = encoder + } + // Re-add the encoder to the pool if applicable. + defer cleanup() + defer cw.Close() + + next.ServeHTTP(cw, r) + }) +} + +// selectEncoder returns the encoder, the name of the encoder, and a closer function. +func (c *Compressor) selectEncoder(h http.Header, w io.Writer) (io.Writer, string, func()) { + header := h.Get("Accept-Encoding") + + // Parse the names of all accepted algorithms from the header. + accepted := strings.Split(strings.ToLower(header), ",") + + // Find supported encoder by accepted list by precedence + for _, name := range c.encodingPrecedence { + if matchAcceptEncoding(accepted, name) { + if pool, ok := c.pooledEncoders[name]; ok { + encoder := pool.Get().(ioResetterWriter) + cleanup := func() { + pool.Put(encoder) + } + encoder.Reset(w) + return encoder, name, cleanup + + } + if fn, ok := c.encoders[name]; ok { + return fn(w, c.level), name, func() {} + } + } + + } + + // No encoder found to match the accepted encoding + return nil, "", func() {} +} + +func matchAcceptEncoding(accepted []string, encoding string) bool { + for _, v := range accepted { + if strings.Contains(v, encoding) { + return true + } + } + return false +} + +// An EncoderFunc is a function that wraps the provided io.Writer with a +// streaming compression algorithm and returns it. +// +// In case of failure, the function should return nil. +type EncoderFunc func(w io.Writer, level int) io.Writer + +// Interface for types that allow resetting io.Writers. +type ioResetterWriter interface { + io.Writer + Reset(w io.Writer) +} + +type compressResponseWriter struct { + http.ResponseWriter + + // The streaming encoder writer to be used if there is one. Otherwise, + // this is just the normal writer. + w io.Writer + encoding string + contentTypes map[string]struct{} + contentWildcards map[string]struct{} + wroteHeader bool + compressable bool +} + +func (cw *compressResponseWriter) isCompressable() bool { + // Parse the first part of the Content-Type response header. + contentType := cw.Header().Get("Content-Type") + if idx := strings.Index(contentType, ";"); idx >= 0 { + contentType = contentType[0:idx] + } + + // Is the content type compressable? + if _, ok := cw.contentTypes[contentType]; ok { + return true + } + if idx := strings.Index(contentType, "/"); idx > 0 { + contentType = contentType[0:idx] + _, ok := cw.contentWildcards[contentType] + return ok + } + return false +} + +func (cw *compressResponseWriter) WriteHeader(code int) { + if cw.wroteHeader { + cw.ResponseWriter.WriteHeader(code) // Allow multiple calls to propagate. + return + } + cw.wroteHeader = true + defer cw.ResponseWriter.WriteHeader(code) + + // Already compressed data? + if cw.Header().Get("Content-Encoding") != "" { + return + } + + if !cw.isCompressable() { + cw.compressable = false + return + } + + if cw.encoding != "" { + cw.compressable = true + cw.Header().Set("Content-Encoding", cw.encoding) + cw.Header().Set("Vary", "Accept-Encoding") + + // The content-length after compression is unknown + cw.Header().Del("Content-Length") + } +} + +func (cw *compressResponseWriter) Write(p []byte) (int, error) { + if !cw.wroteHeader { + cw.WriteHeader(http.StatusOK) + } + + return cw.writer().Write(p) +} + +func (cw *compressResponseWriter) writer() io.Writer { + if cw.compressable { + return cw.w + } else { + return cw.ResponseWriter + } +} + +type compressFlusher interface { + Flush() error +} + +func (cw *compressResponseWriter) Flush() { + if f, ok := cw.writer().(http.Flusher); ok { + f.Flush() + } + // If the underlying writer has a compression flush signature, + // call this Flush() method instead + if f, ok := cw.writer().(compressFlusher); ok { + f.Flush() + + // Also flush the underlying response writer + if f, ok := cw.ResponseWriter.(http.Flusher); ok { + f.Flush() + } + } +} + +func (cw *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if hj, ok := cw.writer().(http.Hijacker); ok { + return hj.Hijack() + } + return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer") +} + +func (cw *compressResponseWriter) Push(target string, opts *http.PushOptions) error { + if ps, ok := cw.writer().(http.Pusher); ok { + return ps.Push(target, opts) + } + return errors.New("chi/middleware: http.Pusher is unavailable on the writer") +} + +func (cw *compressResponseWriter) Close() error { + if c, ok := cw.writer().(io.WriteCloser); ok { + return c.Close() + } + return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer") +} + +func encoderGzip(w io.Writer, level int) io.Writer { + gw, err := gzip.NewWriterLevel(w, level) + if err != nil { + return nil + } + return gw +} + +func encoderDeflate(w io.Writer, level int) io.Writer { + dw, err := flate.NewWriter(w, level) + if err != nil { + return nil + } + return dw +} diff --git a/vendor/github.com/go-chi/chi/middleware/content_charset.go b/vendor/github.com/go-chi/chi/middleware/content_charset.go new file mode 100644 index 000000000..07b5ce6f6 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/content_charset.go @@ -0,0 +1,51 @@ +package middleware + +import ( + "net/http" + "strings" +) + +// ContentCharset generates a handler that writes a 415 Unsupported Media Type response if none of the charsets match. +// An empty charset will allow requests with no Content-Type header or no specified charset. +func ContentCharset(charsets ...string) func(next http.Handler) http.Handler { + for i, c := range charsets { + charsets[i] = strings.ToLower(c) + } + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !contentEncoding(r.Header.Get("Content-Type"), charsets...) { + w.WriteHeader(http.StatusUnsupportedMediaType) + return + } + + next.ServeHTTP(w, r) + }) + } +} + +// Check the content encoding against a list of acceptable values. +func contentEncoding(ce string, charsets ...string) bool { + _, ce = split(strings.ToLower(ce), ";") + _, ce = split(ce, "charset=") + ce, _ = split(ce, ";") + for _, c := range charsets { + if ce == c { + return true + } + } + + return false +} + +// Split a string in two parts, cleaning any whitespace. +func split(str, sep string) (string, string) { + var a, b string + var parts = strings.SplitN(str, sep, 2) + a = strings.TrimSpace(parts[0]) + if len(parts) == 2 { + b = strings.TrimSpace(parts[1]) + } + + return a, b +} diff --git a/vendor/github.com/go-chi/chi/middleware/content_encoding.go b/vendor/github.com/go-chi/chi/middleware/content_encoding.go new file mode 100644 index 000000000..e0b9ccc08 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/content_encoding.go @@ -0,0 +1,34 @@ +package middleware + +import ( + "net/http" + "strings" +) + +// AllowContentEncoding enforces a whitelist of request Content-Encoding otherwise responds +// with a 415 Unsupported Media Type status. +func AllowContentEncoding(contentEncoding ...string) func(next http.Handler) http.Handler { + allowedEncodings := make(map[string]struct{}, len(contentEncoding)) + for _, encoding := range contentEncoding { + allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))] = struct{}{} + } + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + requestEncodings := r.Header["Content-Encoding"] + // skip check for empty content body or no Content-Encoding + if r.ContentLength == 0 { + next.ServeHTTP(w, r) + return + } + // All encodings in the request must be allowed + for _, encoding := range requestEncodings { + if _, ok := allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))]; !ok { + w.WriteHeader(http.StatusUnsupportedMediaType) + return + } + } + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } +} diff --git a/vendor/github.com/go-chi/chi/middleware/content_type.go b/vendor/github.com/go-chi/chi/middleware/content_type.go new file mode 100644 index 000000000..ee4957874 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/content_type.go @@ -0,0 +1,51 @@ +package middleware + +import ( + "net/http" + "strings" +) + +// SetHeader is a convenience handler to set a response header key/value +func SetHeader(key, value string) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + w.Header().Set(key, value) + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } +} + +// AllowContentType enforces a whitelist of request Content-Types otherwise responds +// with a 415 Unsupported Media Type status. +func AllowContentType(contentTypes ...string) func(next http.Handler) http.Handler { + cT := []string{} + for _, t := range contentTypes { + cT = append(cT, strings.ToLower(t)) + } + + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + if r.ContentLength == 0 { + // skip check for empty content body + next.ServeHTTP(w, r) + return + } + + s := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Type"))) + if i := strings.Index(s, ";"); i > -1 { + s = s[0:i] + } + + for _, t := range cT { + if t == s { + next.ServeHTTP(w, r) + return + } + } + + w.WriteHeader(http.StatusUnsupportedMediaType) + } + return http.HandlerFunc(fn) + } +} diff --git a/vendor/github.com/go-chi/chi/middleware/get_head.go b/vendor/github.com/go-chi/chi/middleware/get_head.go new file mode 100644 index 000000000..86068a96d --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/get_head.go @@ -0,0 +1,39 @@ +package middleware + +import ( + "net/http" + + "github.com/go-chi/chi" +) + +// GetHead automatically route undefined HEAD requests to GET handlers. +func GetHead(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + rctx := chi.RouteContext(r.Context()) + routePath := rctx.RoutePath + if routePath == "" { + if r.URL.RawPath != "" { + routePath = r.URL.RawPath + } else { + routePath = r.URL.Path + } + } + + // Temporary routing context to look-ahead before routing the request + tctx := chi.NewRouteContext() + + // Attempt to find a HEAD handler for the routing path, if not found, traverse + // the router as through its a GET route, but proceed with the request + // with the HEAD method. + if !rctx.Routes.Match(tctx, "HEAD", routePath) { + rctx.RouteMethod = "GET" + rctx.RoutePath = routePath + next.ServeHTTP(w, r) + return + } + } + + next.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/go-chi/chi/middleware/heartbeat.go b/vendor/github.com/go-chi/chi/middleware/heartbeat.go new file mode 100644 index 000000000..fe822fb53 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/heartbeat.go @@ -0,0 +1,26 @@ +package middleware + +import ( + "net/http" + "strings" +) + +// Heartbeat endpoint middleware useful to setting up a path like +// `/ping` that load balancers or uptime testing external services +// can make a request before hitting any routes. It's also convenient +// to place this above ACL middlewares as well. +func Heartbeat(endpoint string) func(http.Handler) http.Handler { + f := func(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" && strings.EqualFold(r.URL.Path, endpoint) { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + w.Write([]byte(".")) + return + } + h.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } + return f +} diff --git a/vendor/github.com/go-chi/chi/middleware/logger.go b/vendor/github.com/go-chi/chi/middleware/logger.go new file mode 100644 index 000000000..158a6a390 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/logger.go @@ -0,0 +1,155 @@ +package middleware + +import ( + "bytes" + "context" + "log" + "net/http" + "os" + "time" +) + +var ( + // LogEntryCtxKey is the context.Context key to store the request log entry. + LogEntryCtxKey = &contextKey{"LogEntry"} + + // DefaultLogger is called by the Logger middleware handler to log each request. + // Its made a package-level variable so that it can be reconfigured for custom + // logging configurations. + DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags), NoColor: false}) +) + +// Logger is a middleware that logs the start and end of each request, along +// with some useful data about what was requested, what the response status was, +// and how long it took to return. When standard output is a TTY, Logger will +// print in color, otherwise it will print in black and white. Logger prints a +// request ID if one is provided. +// +// Alternatively, look at https://github.com/goware/httplog for a more in-depth +// http logger with structured logging support. +func Logger(next http.Handler) http.Handler { + return DefaultLogger(next) +} + +// RequestLogger returns a logger handler using a custom LogFormatter. +func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + entry := f.NewLogEntry(r) + ww := NewWrapResponseWriter(w, r.ProtoMajor) + + t1 := time.Now() + defer func() { + entry.Write(ww.Status(), ww.BytesWritten(), ww.Header(), time.Since(t1), nil) + }() + + next.ServeHTTP(ww, WithLogEntry(r, entry)) + } + return http.HandlerFunc(fn) + } +} + +// LogFormatter initiates the beginning of a new LogEntry per request. +// See DefaultLogFormatter for an example implementation. +type LogFormatter interface { + NewLogEntry(r *http.Request) LogEntry +} + +// LogEntry records the final log when a request completes. +// See defaultLogEntry for an example implementation. +type LogEntry interface { + Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) + Panic(v interface{}, stack []byte) +} + +// GetLogEntry returns the in-context LogEntry for a request. +func GetLogEntry(r *http.Request) LogEntry { + entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry) + return entry +} + +// WithLogEntry sets the in-context LogEntry for a request. +func WithLogEntry(r *http.Request, entry LogEntry) *http.Request { + r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry)) + return r +} + +// LoggerInterface accepts printing to stdlib logger or compatible logger. +type LoggerInterface interface { + Print(v ...interface{}) +} + +// DefaultLogFormatter is a simple logger that implements a LogFormatter. +type DefaultLogFormatter struct { + Logger LoggerInterface + NoColor bool +} + +// NewLogEntry creates a new LogEntry for the request. +func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry { + useColor := !l.NoColor + entry := &defaultLogEntry{ + DefaultLogFormatter: l, + request: r, + buf: &bytes.Buffer{}, + useColor: useColor, + } + + reqID := GetReqID(r.Context()) + if reqID != "" { + cW(entry.buf, useColor, nYellow, "[%s] ", reqID) + } + cW(entry.buf, useColor, nCyan, "\"") + cW(entry.buf, useColor, bMagenta, "%s ", r.Method) + + scheme := "http" + if r.TLS != nil { + scheme = "https" + } + cW(entry.buf, useColor, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto) + + entry.buf.WriteString("from ") + entry.buf.WriteString(r.RemoteAddr) + entry.buf.WriteString(" - ") + + return entry +} + +type defaultLogEntry struct { + *DefaultLogFormatter + request *http.Request + buf *bytes.Buffer + useColor bool +} + +func (l *defaultLogEntry) Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) { + switch { + case status < 200: + cW(l.buf, l.useColor, bBlue, "%03d", status) + case status < 300: + cW(l.buf, l.useColor, bGreen, "%03d", status) + case status < 400: + cW(l.buf, l.useColor, bCyan, "%03d", status) + case status < 500: + cW(l.buf, l.useColor, bYellow, "%03d", status) + default: + cW(l.buf, l.useColor, bRed, "%03d", status) + } + + cW(l.buf, l.useColor, bBlue, " %dB", bytes) + + l.buf.WriteString(" in ") + if elapsed < 500*time.Millisecond { + cW(l.buf, l.useColor, nGreen, "%s", elapsed) + } else if elapsed < 5*time.Second { + cW(l.buf, l.useColor, nYellow, "%s", elapsed) + } else { + cW(l.buf, l.useColor, nRed, "%s", elapsed) + } + + l.Logger.Print(l.buf.String()) +} + +func (l *defaultLogEntry) Panic(v interface{}, stack []byte) { + PrintPrettyStack(v) +} diff --git a/vendor/github.com/go-chi/chi/middleware/middleware.go b/vendor/github.com/go-chi/chi/middleware/middleware.go new file mode 100644 index 000000000..cc371e00a --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/middleware.go @@ -0,0 +1,23 @@ +package middleware + +import "net/http" + +// New will create a new middleware handler from a http.Handler. +func New(h http.Handler) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h.ServeHTTP(w, r) + }) + } +} + +// contextKey is a value for use with context.WithValue. It's used as +// a pointer so it fits in an interface{} without allocation. This technique +// for defining context keys was copied from Go 1.7's new use of context in net/http. +type contextKey struct { + name string +} + +func (k *contextKey) String() string { + return "chi/middleware context value " + k.name +} diff --git a/vendor/github.com/go-chi/chi/middleware/nocache.go b/vendor/github.com/go-chi/chi/middleware/nocache.go new file mode 100644 index 000000000..2412829e1 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/nocache.go @@ -0,0 +1,58 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "net/http" + "time" +) + +// Unix epoch time +var epoch = time.Unix(0, 0).Format(time.RFC1123) + +// Taken from https://github.com/mytrile/nocache +var noCacheHeaders = map[string]string{ + "Expires": epoch, + "Cache-Control": "no-cache, no-store, no-transform, must-revalidate, private, max-age=0", + "Pragma": "no-cache", + "X-Accel-Expires": "0", +} + +var etagHeaders = []string{ + "ETag", + "If-Modified-Since", + "If-Match", + "If-None-Match", + "If-Range", + "If-Unmodified-Since", +} + +// NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent +// a router (or subrouter) from being cached by an upstream proxy and/or client. +// +// As per http://wiki.nginx.org/HttpProxyModule - NoCache sets: +// Expires: Thu, 01 Jan 1970 00:00:00 UTC +// Cache-Control: no-cache, private, max-age=0 +// X-Accel-Expires: 0 +// Pragma: no-cache (for HTTP/1.0 proxies/clients) +func NoCache(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + + // Delete any ETag headers that may have been set + for _, v := range etagHeaders { + if r.Header.Get(v) != "" { + r.Header.Del(v) + } + } + + // Set our NoCache headers + for k, v := range noCacheHeaders { + w.Header().Set(k, v) + } + + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/go-chi/chi/middleware/profiler.go b/vendor/github.com/go-chi/chi/middleware/profiler.go new file mode 100644 index 000000000..1d44b8259 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/profiler.go @@ -0,0 +1,55 @@ +package middleware + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/go-chi/chi" +) + +// Profiler is a convenient subrouter used for mounting net/http/pprof. ie. +// +// func MyService() http.Handler { +// r := chi.NewRouter() +// // ..middlewares +// r.Mount("/debug", middleware.Profiler()) +// // ..routes +// return r +// } +func Profiler() http.Handler { + r := chi.NewRouter() + r.Use(NoCache) + + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, r.RequestURI+"/pprof/", 301) + }) + r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, r.RequestURI+"/", 301) + }) + + r.HandleFunc("/pprof/*", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/trace", pprof.Trace) + r.HandleFunc("/vars", expVars) + + return r +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/vendor/github.com/go-chi/chi/middleware/realip.go b/vendor/github.com/go-chi/chi/middleware/realip.go new file mode 100644 index 000000000..72db6ca9f --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/realip.go @@ -0,0 +1,54 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "net/http" + "strings" +) + +var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") +var xRealIP = http.CanonicalHeaderKey("X-Real-IP") + +// RealIP is a middleware that sets a http.Request's RemoteAddr to the results +// of parsing either the X-Forwarded-For header or the X-Real-IP header (in that +// order). +// +// This middleware should be inserted fairly early in the middleware stack to +// ensure that subsequent layers (e.g., request loggers) which examine the +// RemoteAddr will see the intended value. +// +// You should only use this middleware if you can trust the headers passed to +// you (in particular, the two headers this middleware uses), for example +// because you have placed a reverse proxy like HAProxy or nginx in front of +// chi. If your reverse proxies are configured to pass along arbitrary header +// values from the client, or if you use this middleware without a reverse +// proxy, malicious clients will be able to make you very sad (or, depending on +// how you're using RemoteAddr, vulnerable to an attack of some sort). +func RealIP(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + if rip := realIP(r); rip != "" { + r.RemoteAddr = rip + } + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} + +func realIP(r *http.Request) string { + var ip string + + if xrip := r.Header.Get(xRealIP); xrip != "" { + ip = xrip + } else if xff := r.Header.Get(xForwardedFor); xff != "" { + i := strings.Index(xff, ", ") + if i == -1 { + i = len(xff) + } + ip = xff[:i] + } + + return ip +} diff --git a/vendor/github.com/go-chi/chi/middleware/recoverer.go b/vendor/github.com/go-chi/chi/middleware/recoverer.go new file mode 100644 index 000000000..785b18c52 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/recoverer.go @@ -0,0 +1,192 @@ +package middleware + +// The original work was derived from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "os" + "runtime/debug" + "strings" +) + +// Recoverer is a middleware that recovers from panics, logs the panic (and a +// backtrace), and returns a HTTP 500 (Internal Server Error) status if +// possible. Recoverer prints a request ID if one is provided. +// +// Alternatively, look at https://github.com/pressly/lg middleware pkgs. +func Recoverer(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + defer func() { + if rvr := recover(); rvr != nil && rvr != http.ErrAbortHandler { + + logEntry := GetLogEntry(r) + if logEntry != nil { + logEntry.Panic(rvr, debug.Stack()) + } else { + PrintPrettyStack(rvr) + } + + w.WriteHeader(http.StatusInternalServerError) + } + }() + + next.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} + +func PrintPrettyStack(rvr interface{}) { + debugStack := debug.Stack() + s := prettyStack{} + out, err := s.parse(debugStack, rvr) + if err == nil { + os.Stderr.Write(out) + } else { + // print stdlib output as a fallback + os.Stderr.Write(debugStack) + } +} + +type prettyStack struct { +} + +func (s prettyStack) parse(debugStack []byte, rvr interface{}) ([]byte, error) { + var err error + useColor := true + buf := &bytes.Buffer{} + + cW(buf, false, bRed, "\n") + cW(buf, useColor, bCyan, " panic: ") + cW(buf, useColor, bBlue, "%v", rvr) + cW(buf, false, bWhite, "\n \n") + + // process debug stack info + stack := strings.Split(string(debugStack), "\n") + lines := []string{} + + // locate panic line, as we may have nested panics + for i := len(stack) - 1; i > 0; i-- { + lines = append(lines, stack[i]) + if strings.HasPrefix(stack[i], "panic(0x") { + lines = lines[0 : len(lines)-2] // remove boilerplate + break + } + } + + // reverse + for i := len(lines)/2 - 1; i >= 0; i-- { + opp := len(lines) - 1 - i + lines[i], lines[opp] = lines[opp], lines[i] + } + + // decorate + for i, line := range lines { + lines[i], err = s.decorateLine(line, useColor, i) + if err != nil { + return nil, err + } + } + + for _, l := range lines { + fmt.Fprintf(buf, "%s", l) + } + return buf.Bytes(), nil +} + +func (s prettyStack) decorateLine(line string, useColor bool, num int) (string, error) { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "\t") || strings.Contains(line, ".go:") { + return s.decorateSourceLine(line, useColor, num) + } else if strings.HasSuffix(line, ")") { + return s.decorateFuncCallLine(line, useColor, num) + } else { + if strings.HasPrefix(line, "\t") { + return strings.Replace(line, "\t", " ", 1), nil + } else { + return fmt.Sprintf(" %s\n", line), nil + } + } +} + +func (s prettyStack) decorateFuncCallLine(line string, useColor bool, num int) (string, error) { + idx := strings.LastIndex(line, "(") + if idx < 0 { + return "", errors.New("not a func call line") + } + + buf := &bytes.Buffer{} + pkg := line[0:idx] + // addr := line[idx:] + method := "" + + idx = strings.LastIndex(pkg, string(os.PathSeparator)) + if idx < 0 { + idx = strings.Index(pkg, ".") + method = pkg[idx:] + pkg = pkg[0:idx] + } else { + method = pkg[idx+1:] + pkg = pkg[0 : idx+1] + idx = strings.Index(method, ".") + pkg += method[0:idx] + method = method[idx:] + } + pkgColor := nYellow + methodColor := bGreen + + if num == 0 { + cW(buf, useColor, bRed, " -> ") + pkgColor = bMagenta + methodColor = bRed + } else { + cW(buf, useColor, bWhite, " ") + } + cW(buf, useColor, pkgColor, "%s", pkg) + cW(buf, useColor, methodColor, "%s\n", method) + // cW(buf, useColor, nBlack, "%s", addr) + return buf.String(), nil +} + +func (s prettyStack) decorateSourceLine(line string, useColor bool, num int) (string, error) { + idx := strings.LastIndex(line, ".go:") + if idx < 0 { + return "", errors.New("not a source line") + } + + buf := &bytes.Buffer{} + path := line[0 : idx+3] + lineno := line[idx+3:] + + idx = strings.LastIndex(path, string(os.PathSeparator)) + dir := path[0 : idx+1] + file := path[idx+1:] + + idx = strings.Index(lineno, " ") + if idx > 0 { + lineno = lineno[0:idx] + } + fileColor := bCyan + lineColor := bGreen + + if num == 1 { + cW(buf, useColor, bRed, " -> ") + fileColor = bRed + lineColor = bMagenta + } else { + cW(buf, false, bWhite, " ") + } + cW(buf, useColor, bWhite, "%s", dir) + cW(buf, useColor, fileColor, "%s", file) + cW(buf, useColor, lineColor, "%s", lineno) + if num == 1 { + cW(buf, false, bWhite, "\n") + } + cW(buf, false, bWhite, "\n") + + return buf.String(), nil +} diff --git a/vendor/github.com/go-chi/chi/middleware/request_id.go b/vendor/github.com/go-chi/chi/middleware/request_id.go new file mode 100644 index 000000000..4903ecc21 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/request_id.go @@ -0,0 +1,96 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "net/http" + "os" + "strings" + "sync/atomic" +) + +// Key to use when setting the request ID. +type ctxKeyRequestID int + +// RequestIDKey is the key that holds the unique request ID in a request context. +const RequestIDKey ctxKeyRequestID = 0 + +// RequestIDHeader is the name of the HTTP Header which contains the request id. +// Exported so that it can be changed by developers +var RequestIDHeader = "X-Request-Id" + +var prefix string +var reqid uint64 + +// A quick note on the statistics here: we're trying to calculate the chance that +// two randomly generated base62 prefixes will collide. We use the formula from +// http://en.wikipedia.org/wiki/Birthday_problem +// +// P[m, n] \approx 1 - e^{-m^2/2n} +// +// We ballpark an upper bound for $m$ by imagining (for whatever reason) a server +// that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$ +// +// For a $k$ character base-62 identifier, we have $n(k) = 62^k$ +// +// Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for +// our purposes, and is surely more than anyone would ever need in practice -- a +// process that is rebooted a handful of times a day for a hundred years has less +// than a millionth of a percent chance of generating two colliding IDs. + +func init() { + hostname, err := os.Hostname() + if hostname == "" || err != nil { + hostname = "localhost" + } + var buf [12]byte + var b64 string + for len(b64) < 10 { + rand.Read(buf[:]) + b64 = base64.StdEncoding.EncodeToString(buf[:]) + b64 = strings.NewReplacer("+", "", "/", "").Replace(b64) + } + + prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10]) +} + +// RequestID is a middleware that injects a request ID into the context of each +// request. A request ID is a string of the form "host.example.com/random-0001", +// where "random" is a base62 random string that uniquely identifies this go +// process, and where the last number is an atomically incremented request +// counter. +func RequestID(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + requestID := r.Header.Get(RequestIDHeader) + if requestID == "" { + myid := atomic.AddUint64(&reqid, 1) + requestID = fmt.Sprintf("%s-%06d", prefix, myid) + } + ctx = context.WithValue(ctx, RequestIDKey, requestID) + next.ServeHTTP(w, r.WithContext(ctx)) + } + return http.HandlerFunc(fn) +} + +// GetReqID returns a request ID from the given context if one is present. +// Returns the empty string if a request ID cannot be found. +func GetReqID(ctx context.Context) string { + if ctx == nil { + return "" + } + if reqID, ok := ctx.Value(RequestIDKey).(string); ok { + return reqID + } + return "" +} + +// NextRequestID generates the next request ID in the sequence. +func NextRequestID() uint64 { + return atomic.AddUint64(&reqid, 1) +} diff --git a/vendor/github.com/go-chi/chi/middleware/route_headers.go b/vendor/github.com/go-chi/chi/middleware/route_headers.go new file mode 100644 index 000000000..7ee30c877 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/route_headers.go @@ -0,0 +1,160 @@ +package middleware + +import ( + "net/http" + "strings" +) + +// RouteHeaders is a neat little header-based router that allows you to direct +// the flow of a request through a middleware stack based on a request header. +// +// For example, lets say you'd like to setup multiple routers depending on the +// request Host header, you could then do something as so: +// +// r := chi.NewRouter() +// rSubdomain := chi.NewRouter() +// +// r.Use(middleware.RouteHeaders(). +// Route("Host", "example.com", middleware.New(r)). +// Route("Host", "*.example.com", middleware.New(rSubdomain)). +// Handler) +// +// r.Get("/", h) +// rSubdomain.Get("/", h2) +// +// +// Another example, imagine you want to setup multiple CORS handlers, where for +// your origin servers you allow authorized requests, but for third-party public +// requests, authorization is disabled. +// +// r := chi.NewRouter() +// +// r.Use(middleware.RouteHeaders(). +// Route("Origin", "https://app.skyweaver.net", cors.Handler(cors.Options{ +// AllowedOrigins: []string{"https://api.skyweaver.net"}, +// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, +// AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"}, +// AllowCredentials: true, // <----------<<< allow credentials +// })). +// Route("Origin", "*", cors.Handler(cors.Options{ +// AllowedOrigins: []string{"*"}, +// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, +// AllowedHeaders: []string{"Accept", "Content-Type"}, +// AllowCredentials: false, // <----------<<< do not allow credentials +// })). +// Handler) +// +func RouteHeaders() HeaderRouter { + return HeaderRouter{} +} + +type HeaderRouter map[string][]HeaderRoute + +func (hr HeaderRouter) Route(header string, match string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter { + header = strings.ToLower(header) + k := hr[header] + if k == nil { + hr[header] = []HeaderRoute{} + } + hr[header] = append(hr[header], HeaderRoute{MatchOne: NewPattern(match), Middleware: middlewareHandler}) + return hr +} + +func (hr HeaderRouter) RouteAny(header string, match []string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter { + header = strings.ToLower(header) + k := hr[header] + if k == nil { + hr[header] = []HeaderRoute{} + } + patterns := []Pattern{} + for _, m := range match { + patterns = append(patterns, NewPattern(m)) + } + hr[header] = append(hr[header], HeaderRoute{MatchAny: patterns, Middleware: middlewareHandler}) + return hr +} + +func (hr HeaderRouter) RouteDefault(handler func(next http.Handler) http.Handler) HeaderRouter { + hr["*"] = []HeaderRoute{{Middleware: handler}} + return hr +} + +func (hr HeaderRouter) Handler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if len(hr) == 0 { + // skip if no routes set + next.ServeHTTP(w, r) + } + + // find first matching header route, and continue + for header, matchers := range hr { + headerValue := r.Header.Get(header) + if headerValue == "" { + continue + } + headerValue = strings.ToLower(headerValue) + for _, matcher := range matchers { + if matcher.IsMatch(headerValue) { + matcher.Middleware(next).ServeHTTP(w, r) + return + } + } + } + + // if no match, check for "*" default route + matcher, ok := hr["*"] + if !ok || matcher[0].Middleware == nil { + next.ServeHTTP(w, r) + return + } + matcher[0].Middleware(next).ServeHTTP(w, r) + }) +} + +type HeaderRoute struct { + MatchAny []Pattern + MatchOne Pattern + Middleware func(next http.Handler) http.Handler +} + +func (r HeaderRoute) IsMatch(value string) bool { + if len(r.MatchAny) > 0 { + for _, m := range r.MatchAny { + if m.Match(value) { + return true + } + } + } else if r.MatchOne.Match(value) { + return true + } + return false +} + +type Pattern struct { + prefix string + suffix string + wildcard bool +} + +func NewPattern(value string) Pattern { + p := Pattern{} + if i := strings.IndexByte(value, '*'); i >= 0 { + p.wildcard = true + p.prefix = value[0:i] + p.suffix = value[i+1:] + } else { + p.prefix = value + } + return p +} + +func (p Pattern) Match(v string) bool { + if !p.wildcard { + if p.prefix == v { + return true + } else { + return false + } + } + return len(v) >= len(p.prefix+p.suffix) && strings.HasPrefix(v, p.prefix) && strings.HasSuffix(v, p.suffix) +} diff --git a/vendor/github.com/go-chi/chi/middleware/strip.go b/vendor/github.com/go-chi/chi/middleware/strip.go new file mode 100644 index 000000000..2b8b1842a --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/strip.go @@ -0,0 +1,56 @@ +package middleware + +import ( + "fmt" + "net/http" + + "github.com/go-chi/chi" +) + +// StripSlashes is a middleware that will match request paths with a trailing +// slash, strip it from the path and continue routing through the mux, if a route +// matches, then it will serve the handler. +func StripSlashes(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + var path string + rctx := chi.RouteContext(r.Context()) + if rctx.RoutePath != "" { + path = rctx.RoutePath + } else { + path = r.URL.Path + } + if len(path) > 1 && path[len(path)-1] == '/' { + rctx.RoutePath = path[:len(path)-1] + } + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) +} + +// RedirectSlashes is a middleware that will match request paths with a trailing +// slash and redirect to the same path, less the trailing slash. +// +// NOTE: RedirectSlashes middleware is *incompatible* with http.FileServer, +// see https://github.com/go-chi/chi/issues/343 +func RedirectSlashes(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + var path string + rctx := chi.RouteContext(r.Context()) + if rctx.RoutePath != "" { + path = rctx.RoutePath + } else { + path = r.URL.Path + } + if len(path) > 1 && path[len(path)-1] == '/' { + if r.URL.RawQuery != "" { + path = fmt.Sprintf("%s?%s", path[:len(path)-1], r.URL.RawQuery) + } else { + path = path[:len(path)-1] + } + http.Redirect(w, r, path, 301) + return + } + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/go-chi/chi/middleware/terminal.go b/vendor/github.com/go-chi/chi/middleware/terminal.go new file mode 100644 index 000000000..5ead7b924 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/terminal.go @@ -0,0 +1,63 @@ +package middleware + +// Ported from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "fmt" + "io" + "os" +) + +var ( + // Normal colors + nBlack = []byte{'\033', '[', '3', '0', 'm'} + nRed = []byte{'\033', '[', '3', '1', 'm'} + nGreen = []byte{'\033', '[', '3', '2', 'm'} + nYellow = []byte{'\033', '[', '3', '3', 'm'} + nBlue = []byte{'\033', '[', '3', '4', 'm'} + nMagenta = []byte{'\033', '[', '3', '5', 'm'} + nCyan = []byte{'\033', '[', '3', '6', 'm'} + nWhite = []byte{'\033', '[', '3', '7', 'm'} + // Bright colors + bBlack = []byte{'\033', '[', '3', '0', ';', '1', 'm'} + bRed = []byte{'\033', '[', '3', '1', ';', '1', 'm'} + bGreen = []byte{'\033', '[', '3', '2', ';', '1', 'm'} + bYellow = []byte{'\033', '[', '3', '3', ';', '1', 'm'} + bBlue = []byte{'\033', '[', '3', '4', ';', '1', 'm'} + bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'} + bCyan = []byte{'\033', '[', '3', '6', ';', '1', 'm'} + bWhite = []byte{'\033', '[', '3', '7', ';', '1', 'm'} + + reset = []byte{'\033', '[', '0', 'm'} +) + +var IsTTY bool + +func init() { + // This is sort of cheating: if stdout is a character device, we assume + // that means it's a TTY. Unfortunately, there are many non-TTY + // character devices, but fortunately stdout is rarely set to any of + // them. + // + // We could solve this properly by pulling in a dependency on + // code.google.com/p/go.crypto/ssh/terminal, for instance, but as a + // heuristic for whether to print in color or in black-and-white, I'd + // really rather not. + fi, err := os.Stdout.Stat() + if err == nil { + m := os.ModeDevice | os.ModeCharDevice + IsTTY = fi.Mode()&m == m + } +} + +// colorWrite +func cW(w io.Writer, useColor bool, color []byte, s string, args ...interface{}) { + if IsTTY && useColor { + w.Write(color) + } + fmt.Fprintf(w, s, args...) + if IsTTY && useColor { + w.Write(reset) + } +} diff --git a/vendor/github.com/go-chi/chi/middleware/throttle.go b/vendor/github.com/go-chi/chi/middleware/throttle.go new file mode 100644 index 000000000..fdedd3c12 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/throttle.go @@ -0,0 +1,132 @@ +package middleware + +import ( + "net/http" + "strconv" + "time" +) + +const ( + errCapacityExceeded = "Server capacity exceeded." + errTimedOut = "Timed out while waiting for a pending request to complete." + errContextCanceled = "Context was canceled." +) + +var ( + defaultBacklogTimeout = time.Second * 60 +) + +// ThrottleOpts represents a set of throttling options. +type ThrottleOpts struct { + Limit int + BacklogLimit int + BacklogTimeout time.Duration + RetryAfterFn func(ctxDone bool) time.Duration +} + +// Throttle is a middleware that limits number of currently processed requests +// at a time across all users. Note: Throttle is not a rate-limiter per user, +// instead it just puts a ceiling on the number of currentl in-flight requests +// being processed from the point from where the Throttle middleware is mounted. +func Throttle(limit int) func(http.Handler) http.Handler { + return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogTimeout: defaultBacklogTimeout}) +} + +// ThrottleBacklog is a middleware that limits number of currently processed +// requests at a time and provides a backlog for holding a finite number of +// pending requests. +func ThrottleBacklog(limit int, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler { + return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogLimit: backlogLimit, BacklogTimeout: backlogTimeout}) +} + +// ThrottleWithOpts is a middleware that limits number of currently processed requests using passed ThrottleOpts. +func ThrottleWithOpts(opts ThrottleOpts) func(http.Handler) http.Handler { + if opts.Limit < 1 { + panic("chi/middleware: Throttle expects limit > 0") + } + + if opts.BacklogLimit < 0 { + panic("chi/middleware: Throttle expects backlogLimit to be positive") + } + + t := throttler{ + tokens: make(chan token, opts.Limit), + backlogTokens: make(chan token, opts.Limit+opts.BacklogLimit), + backlogTimeout: opts.BacklogTimeout, + retryAfterFn: opts.RetryAfterFn, + } + + // Filling tokens. + for i := 0; i < opts.Limit+opts.BacklogLimit; i++ { + if i < opts.Limit { + t.tokens <- token{} + } + t.backlogTokens <- token{} + } + + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + select { + + case <-ctx.Done(): + t.setRetryAfterHeaderIfNeeded(w, true) + http.Error(w, errContextCanceled, http.StatusServiceUnavailable) + return + + case btok := <-t.backlogTokens: + timer := time.NewTimer(t.backlogTimeout) + + defer func() { + t.backlogTokens <- btok + }() + + select { + case <-timer.C: + t.setRetryAfterHeaderIfNeeded(w, false) + http.Error(w, errTimedOut, http.StatusServiceUnavailable) + return + case <-ctx.Done(): + timer.Stop() + t.setRetryAfterHeaderIfNeeded(w, true) + http.Error(w, errContextCanceled, http.StatusServiceUnavailable) + return + case tok := <-t.tokens: + defer func() { + timer.Stop() + t.tokens <- tok + }() + next.ServeHTTP(w, r) + } + return + + default: + t.setRetryAfterHeaderIfNeeded(w, false) + http.Error(w, errCapacityExceeded, http.StatusServiceUnavailable) + return + } + } + + return http.HandlerFunc(fn) + } +} + +// token represents a request that is being processed. +type token struct{} + +// throttler limits number of currently processed requests at a time. +type throttler struct { + tokens chan token + backlogTokens chan token + backlogTimeout time.Duration + retryAfterFn func(ctxDone bool) time.Duration +} + +// setRetryAfterHeaderIfNeeded sets Retry-After HTTP header if corresponding retryAfterFn option of throttler is initialized. +func (t throttler) setRetryAfterHeaderIfNeeded(w http.ResponseWriter, ctxDone bool) { + if t.retryAfterFn == nil { + return + } + w.Header().Set("Retry-After", strconv.Itoa(int(t.retryAfterFn(ctxDone).Seconds()))) +} diff --git a/vendor/github.com/go-chi/chi/middleware/timeout.go b/vendor/github.com/go-chi/chi/middleware/timeout.go new file mode 100644 index 000000000..8e373536c --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/timeout.go @@ -0,0 +1,49 @@ +package middleware + +import ( + "context" + "net/http" + "time" +) + +// Timeout is a middleware that cancels ctx after a given timeout and return +// a 504 Gateway Timeout error to the client. +// +// It's required that you select the ctx.Done() channel to check for the signal +// if the context has reached its deadline and return, otherwise the timeout +// signal will be just ignored. +// +// ie. a route/handler may look like: +// +// r.Get("/long", func(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// processTime := time.Duration(rand.Intn(4)+1) * time.Second +// +// select { +// case <-ctx.Done(): +// return +// +// case <-time.After(processTime): +// // The above channel simulates some hard work. +// } +// +// w.Write([]byte("done")) +// }) +// +func Timeout(timeout time.Duration) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), timeout) + defer func() { + cancel() + if ctx.Err() == context.DeadlineExceeded { + w.WriteHeader(http.StatusGatewayTimeout) + } + }() + + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } +} diff --git a/vendor/github.com/go-chi/chi/middleware/url_format.go b/vendor/github.com/go-chi/chi/middleware/url_format.go new file mode 100644 index 000000000..5749e4f32 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/url_format.go @@ -0,0 +1,72 @@ +package middleware + +import ( + "context" + "net/http" + "strings" + + "github.com/go-chi/chi" +) + +var ( + // URLFormatCtxKey is the context.Context key to store the URL format data + // for a request. + URLFormatCtxKey = &contextKey{"URLFormat"} +) + +// URLFormat is a middleware that parses the url extension from a request path and stores it +// on the context as a string under the key `middleware.URLFormatCtxKey`. The middleware will +// trim the suffix from the routing path and continue routing. +// +// Routers should not include a url parameter for the suffix when using this middleware. +// +// Sample usage.. for url paths: `/articles/1`, `/articles/1.json` and `/articles/1.xml` +// +// func routes() http.Handler { +// r := chi.NewRouter() +// r.Use(middleware.URLFormat) +// +// r.Get("/articles/{id}", ListArticles) +// +// return r +// } +// +// func ListArticles(w http.ResponseWriter, r *http.Request) { +// urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string) +// +// switch urlFormat { +// case "json": +// render.JSON(w, r, articles) +// case "xml:" +// render.XML(w, r, articles) +// default: +// render.JSON(w, r, articles) +// } +// } +// +func URLFormat(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var format string + path := r.URL.Path + + if strings.Index(path, ".") > 0 { + base := strings.LastIndex(path, "/") + idx := strings.Index(path[base:], ".") + + if idx > 0 { + idx += base + format = path[idx+1:] + + rctx := chi.RouteContext(r.Context()) + rctx.RoutePath = path[:idx] + } + } + + r = r.WithContext(context.WithValue(ctx, URLFormatCtxKey, format)) + + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/go-chi/chi/middleware/value.go b/vendor/github.com/go-chi/chi/middleware/value.go new file mode 100644 index 000000000..fbbd0393f --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/value.go @@ -0,0 +1,17 @@ +package middleware + +import ( + "context" + "net/http" +) + +// WithValue is a middleware that sets a given key/value in a context chain. +func WithValue(key interface{}, val interface{}) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + r = r.WithContext(context.WithValue(r.Context(), key, val)) + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } +} diff --git a/vendor/github.com/go-chi/chi/middleware/wrap_writer.go b/vendor/github.com/go-chi/chi/middleware/wrap_writer.go new file mode 100644 index 000000000..382a523e4 --- /dev/null +++ b/vendor/github.com/go-chi/chi/middleware/wrap_writer.go @@ -0,0 +1,180 @@ +package middleware + +// The original work was derived from Goji's middleware, source: +// https://github.com/zenazn/goji/tree/master/web/middleware + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to +// hook into various parts of the response process. +func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter { + _, fl := w.(http.Flusher) + + bw := basicWriter{ResponseWriter: w} + + if protoMajor == 2 { + _, ps := w.(http.Pusher) + if fl && ps { + return &http2FancyWriter{bw} + } + } else { + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + if fl && hj && rf { + return &httpFancyWriter{bw} + } + } + if fl { + return &flushWriter{bw} + } + + return &bw +} + +// WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook +// into various parts of the response process. +type WrapResponseWriter interface { + http.ResponseWriter + // Status returns the HTTP status of the request, or 0 if one has not + // yet been sent. + Status() int + // BytesWritten returns the total number of bytes sent to the client. + BytesWritten() int + // Tee causes the response body to be written to the given io.Writer in + // addition to proxying the writes through. Only one io.Writer can be + // tee'd to at once: setting a second one will overwrite the first. + // Writes will be sent to the proxy before being written to this + // io.Writer. It is illegal for the tee'd writer to be modified + // concurrently with writes. + Tee(io.Writer) + // Unwrap returns the original proxied target. + Unwrap() http.ResponseWriter +} + +// basicWriter wraps a http.ResponseWriter that implements the minimal +// http.ResponseWriter interface. +type basicWriter struct { + http.ResponseWriter + wroteHeader bool + code int + bytes int + tee io.Writer +} + +func (b *basicWriter) WriteHeader(code int) { + if !b.wroteHeader { + b.code = code + b.wroteHeader = true + b.ResponseWriter.WriteHeader(code) + } +} + +func (b *basicWriter) Write(buf []byte) (int, error) { + b.maybeWriteHeader() + n, err := b.ResponseWriter.Write(buf) + if b.tee != nil { + _, err2 := b.tee.Write(buf[:n]) + // Prefer errors generated by the proxied writer. + if err == nil { + err = err2 + } + } + b.bytes += n + return n, err +} + +func (b *basicWriter) maybeWriteHeader() { + if !b.wroteHeader { + b.WriteHeader(http.StatusOK) + } +} + +func (b *basicWriter) Status() int { + return b.code +} + +func (b *basicWriter) BytesWritten() int { + return b.bytes +} + +func (b *basicWriter) Tee(w io.Writer) { + b.tee = w +} + +func (b *basicWriter) Unwrap() http.ResponseWriter { + return b.ResponseWriter +} + +type flushWriter struct { + basicWriter +} + +func (f *flushWriter) Flush() { + f.wroteHeader = true + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} + +var _ http.Flusher = &flushWriter{} + +// httpFancyWriter is a HTTP writer that additionally satisfies +// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case +// of wrapping the http.ResponseWriter that package http gives you, in order to +// make the proxied object support the full method set of the proxied object. +type httpFancyWriter struct { + basicWriter +} + +func (f *httpFancyWriter) Flush() { + f.wroteHeader = true + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} + +func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hj := f.basicWriter.ResponseWriter.(http.Hijacker) + return hj.Hijack() +} + +func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error { + return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts) +} + +func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) { + if f.basicWriter.tee != nil { + n, err := io.Copy(&f.basicWriter, r) + f.basicWriter.bytes += int(n) + return n, err + } + rf := f.basicWriter.ResponseWriter.(io.ReaderFrom) + f.basicWriter.maybeWriteHeader() + n, err := rf.ReadFrom(r) + f.basicWriter.bytes += int(n) + return n, err +} + +var _ http.Flusher = &httpFancyWriter{} +var _ http.Hijacker = &httpFancyWriter{} +var _ http.Pusher = &http2FancyWriter{} +var _ io.ReaderFrom = &httpFancyWriter{} + +// http2FancyWriter is a HTTP2 writer that additionally satisfies +// http.Flusher, and io.ReaderFrom. It exists for the common case +// of wrapping the http.ResponseWriter that package http gives you, in order to +// make the proxied object support the full method set of the proxied object. +type http2FancyWriter struct { + basicWriter +} + +func (f *http2FancyWriter) Flush() { + f.wroteHeader = true + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} + +var _ http.Flusher = &http2FancyWriter{} diff --git a/vendor/github.com/go-chi/chi/mux.go b/vendor/github.com/go-chi/chi/mux.go new file mode 100644 index 000000000..52950e97b --- /dev/null +++ b/vendor/github.com/go-chi/chi/mux.go @@ -0,0 +1,466 @@ +package chi + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" +) + +var _ Router = &Mux{} + +// Mux is a simple HTTP route multiplexer that parses a request path, +// records any URL params, and executes an end handler. It implements +// the http.Handler interface and is friendly with the standard library. +// +// Mux is designed to be fast, minimal and offer a powerful API for building +// modular and composable HTTP services with a large set of handlers. It's +// particularly useful for writing large REST API services that break a handler +// into many smaller parts composed of middlewares and end handlers. +type Mux struct { + // The radix trie router + tree *node + + // The middleware stack + middlewares []func(http.Handler) http.Handler + + // Controls the behaviour of middleware chain generation when a mux + // is registered as an inline group inside another mux. + inline bool + parent *Mux + + // The computed mux handler made of the chained middleware stack and + // the tree router + handler http.Handler + + // Routing context pool + pool *sync.Pool + + // Custom route not found handler + notFoundHandler http.HandlerFunc + + // Custom method not allowed handler + methodNotAllowedHandler http.HandlerFunc +} + +// NewMux returns a newly initialized Mux object that implements the Router +// interface. +func NewMux() *Mux { + mux := &Mux{tree: &node{}, pool: &sync.Pool{}} + mux.pool.New = func() interface{} { + return NewRouteContext() + } + return mux +} + +// ServeHTTP is the single method of the http.Handler interface that makes +// Mux interoperable with the standard library. It uses a sync.Pool to get and +// reuse routing contexts for each request. +func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Ensure the mux has some routes defined on the mux + if mx.handler == nil { + mx.NotFoundHandler().ServeHTTP(w, r) + return + } + + // Check if a routing context already exists from a parent router. + rctx, _ := r.Context().Value(RouteCtxKey).(*Context) + if rctx != nil { + mx.handler.ServeHTTP(w, r) + return + } + + // Fetch a RouteContext object from the sync pool, and call the computed + // mx.handler that is comprised of mx.middlewares + mx.routeHTTP. + // Once the request is finished, reset the routing context and put it back + // into the pool for reuse from another request. + rctx = mx.pool.Get().(*Context) + rctx.Reset() + rctx.Routes = mx + + // NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation + r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) + + // Serve the request and once its done, put the request context back in the sync pool + mx.handler.ServeHTTP(w, r) + mx.pool.Put(rctx) +} + +// Use appends a middleware handler to the Mux middleware stack. +// +// The middleware stack for any Mux will execute before searching for a matching +// route to a specific handler, which provides opportunity to respond early, +// change the course of the request execution, or set request-scoped values for +// the next http.Handler. +func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) { + if mx.handler != nil { + panic("chi: all middlewares must be defined before routes on a mux") + } + mx.middlewares = append(mx.middlewares, middlewares...) +} + +// Handle adds the route `pattern` that matches any http method to +// execute the `handler` http.Handler. +func (mx *Mux) Handle(pattern string, handler http.Handler) { + mx.handle(mALL, pattern, handler) +} + +// HandleFunc adds the route `pattern` that matches any http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mALL, pattern, handlerFn) +} + +// Method adds the route `pattern` that matches `method` http method to +// execute the `handler` http.Handler. +func (mx *Mux) Method(method, pattern string, handler http.Handler) { + m, ok := methodMap[strings.ToUpper(method)] + if !ok { + panic(fmt.Sprintf("chi: '%s' http method is not supported.", method)) + } + mx.handle(m, pattern, handler) +} + +// MethodFunc adds the route `pattern` that matches `method` http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) { + mx.Method(method, pattern, handlerFn) +} + +// Connect adds the route `pattern` that matches a CONNECT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mCONNECT, pattern, handlerFn) +} + +// Delete adds the route `pattern` that matches a DELETE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mDELETE, pattern, handlerFn) +} + +// Get adds the route `pattern` that matches a GET http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mGET, pattern, handlerFn) +} + +// Head adds the route `pattern` that matches a HEAD http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mHEAD, pattern, handlerFn) +} + +// Options adds the route `pattern` that matches a OPTIONS http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mOPTIONS, pattern, handlerFn) +} + +// Patch adds the route `pattern` that matches a PATCH http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPATCH, pattern, handlerFn) +} + +// Post adds the route `pattern` that matches a POST http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPOST, pattern, handlerFn) +} + +// Put adds the route `pattern` that matches a PUT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPUT, pattern, handlerFn) +} + +// Trace adds the route `pattern` that matches a TRACE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mTRACE, pattern, handlerFn) +} + +// NotFound sets a custom http.HandlerFunc for routing paths that could +// not be found. The default 404 handler is `http.NotFound`. +func (mx *Mux) NotFound(handlerFn http.HandlerFunc) { + // Build NotFound handler chain + m := mx + hFn := handlerFn + if mx.inline && mx.parent != nil { + m = mx.parent + hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP + } + + // Update the notFoundHandler from this point forward + m.notFoundHandler = hFn + m.updateSubRoutes(func(subMux *Mux) { + if subMux.notFoundHandler == nil { + subMux.NotFound(hFn) + } + }) +} + +// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the +// method is unresolved. The default handler returns a 405 with an empty body. +func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) { + // Build MethodNotAllowed handler chain + m := mx + hFn := handlerFn + if mx.inline && mx.parent != nil { + m = mx.parent + hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP + } + + // Update the methodNotAllowedHandler from this point forward + m.methodNotAllowedHandler = hFn + m.updateSubRoutes(func(subMux *Mux) { + if subMux.methodNotAllowedHandler == nil { + subMux.MethodNotAllowed(hFn) + } + }) +} + +// With adds inline middlewares for an endpoint handler. +func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router { + // Similarly as in handle(), we must build the mux handler once additional + // middleware registration isn't allowed for this stack, like now. + if !mx.inline && mx.handler == nil { + mx.buildRouteHandler() + } + + // Copy middlewares from parent inline muxs + var mws Middlewares + if mx.inline { + mws = make(Middlewares, len(mx.middlewares)) + copy(mws, mx.middlewares) + } + mws = append(mws, middlewares...) + + im := &Mux{ + pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws, + notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler, + } + + return im +} + +// Group creates a new inline-Mux with a fresh middleware stack. It's useful +// for a group of handlers along the same routing path that use an additional +// set of middlewares. See _examples/. +func (mx *Mux) Group(fn func(r Router)) Router { + im := mx.With().(*Mux) + if fn != nil { + fn(im) + } + return im +} + +// Route creates a new Mux with a fresh middleware stack and mounts it +// along the `pattern` as a subrouter. Effectively, this is a short-hand +// call to Mount. See _examples/. +func (mx *Mux) Route(pattern string, fn func(r Router)) Router { + subRouter := NewRouter() + if fn != nil { + fn(subRouter) + } + mx.Mount(pattern, subRouter) + return subRouter +} + +// Mount attaches another http.Handler or chi Router as a subrouter along a routing +// path. It's very useful to split up a large API as many independent routers and +// compose them as a single service using Mount. See _examples/. +// +// Note that Mount() simply sets a wildcard along the `pattern` that will continue +// routing at the `handler`, which in most cases is another chi.Router. As a result, +// if you define two Mount() routes on the exact same pattern the mount will panic. +func (mx *Mux) Mount(pattern string, handler http.Handler) { + // Provide runtime safety for ensuring a pattern isn't mounted on an existing + // routing pattern. + if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") { + panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern)) + } + + // Assign sub-Router's with the parent not found & method not allowed handler if not specified. + subr, ok := handler.(*Mux) + if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil { + subr.NotFound(mx.notFoundHandler) + } + if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil { + subr.MethodNotAllowed(mx.methodNotAllowedHandler) + } + + mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rctx := RouteContext(r.Context()) + rctx.RoutePath = mx.nextRoutePath(rctx) + handler.ServeHTTP(w, r) + }) + + if pattern == "" || pattern[len(pattern)-1] != '/' { + mx.handle(mALL|mSTUB, pattern, mountHandler) + mx.handle(mALL|mSTUB, pattern+"/", mountHandler) + pattern += "/" + } + + method := mALL + subroutes, _ := handler.(Routes) + if subroutes != nil { + method |= mSTUB + } + n := mx.handle(method, pattern+"*", mountHandler) + + if subroutes != nil { + n.subroutes = subroutes + } +} + +// Routes returns a slice of routing information from the tree, +// useful for traversing available routes of a router. +func (mx *Mux) Routes() []Route { + return mx.tree.routes() +} + +// Middlewares returns a slice of middleware handler functions. +func (mx *Mux) Middlewares() Middlewares { + return mx.middlewares +} + +// Match searches the routing tree for a handler that matches the method/path. +// It's similar to routing a http request, but without executing the handler +// thereafter. +// +// Note: the *Context state is updated during execution, so manage +// the state carefully or make a NewRouteContext(). +func (mx *Mux) Match(rctx *Context, method, path string) bool { + m, ok := methodMap[method] + if !ok { + return false + } + + node, _, h := mx.tree.FindRoute(rctx, m, path) + + if node != nil && node.subroutes != nil { + rctx.RoutePath = mx.nextRoutePath(rctx) + return node.subroutes.Match(rctx, method, rctx.RoutePath) + } + + return h != nil +} + +// NotFoundHandler returns the default Mux 404 responder whenever a route +// cannot be found. +func (mx *Mux) NotFoundHandler() http.HandlerFunc { + if mx.notFoundHandler != nil { + return mx.notFoundHandler + } + return http.NotFound +} + +// MethodNotAllowedHandler returns the default Mux 405 responder whenever +// a method cannot be resolved for a route. +func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc { + if mx.methodNotAllowedHandler != nil { + return mx.methodNotAllowedHandler + } + return methodNotAllowedHandler +} + +// buildRouteHandler builds the single mux handler that is a chain of the middleware +// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this +// point, no other middlewares can be registered on this Mux's stack. But you can still +// compose additional middlewares via Group()'s or using a chained middleware handler. +func (mx *Mux) buildRouteHandler() { + mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP)) +} + +// handle registers a http.Handler in the routing tree for a particular http method +// and routing pattern. +func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node { + if len(pattern) == 0 || pattern[0] != '/' { + panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern)) + } + + // Build the computed routing handler for this routing pattern. + if !mx.inline && mx.handler == nil { + mx.buildRouteHandler() + } + + // Build endpoint handler with inline middlewares for the route + var h http.Handler + if mx.inline { + mx.handler = http.HandlerFunc(mx.routeHTTP) + h = Chain(mx.middlewares...).Handler(handler) + } else { + h = handler + } + + // Add the endpoint to the tree and return the node + return mx.tree.InsertRoute(method, pattern, h) +} + +// routeHTTP routes a http.Request through the Mux routing tree to serve +// the matching handler for a particular http method. +func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) { + // Grab the route context object + rctx := r.Context().Value(RouteCtxKey).(*Context) + + // The request routing path + routePath := rctx.RoutePath + if routePath == "" { + if r.URL.RawPath != "" { + routePath = r.URL.RawPath + } else { + routePath = r.URL.Path + } + } + + // Check if method is supported by chi + if rctx.RouteMethod == "" { + rctx.RouteMethod = r.Method + } + method, ok := methodMap[rctx.RouteMethod] + if !ok { + mx.MethodNotAllowedHandler().ServeHTTP(w, r) + return + } + + // Find the route + if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil { + h.ServeHTTP(w, r) + return + } + if rctx.methodNotAllowed { + mx.MethodNotAllowedHandler().ServeHTTP(w, r) + } else { + mx.NotFoundHandler().ServeHTTP(w, r) + } +} + +func (mx *Mux) nextRoutePath(rctx *Context) string { + routePath := "/" + nx := len(rctx.routeParams.Keys) - 1 // index of last param in list + if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx { + routePath = "/" + rctx.routeParams.Values[nx] + } + return routePath +} + +// Recursively update data on child routers. +func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) { + for _, r := range mx.tree.routes() { + subMux, ok := r.SubRoutes.(*Mux) + if !ok { + continue + } + fn(subMux) + } +} + +// methodNotAllowedHandler is a helper function to respond with a 405, +// method not allowed. +func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(405) + w.Write(nil) +} diff --git a/vendor/github.com/go-chi/chi/tree.go b/vendor/github.com/go-chi/chi/tree.go new file mode 100644 index 000000000..59b5b5f7b --- /dev/null +++ b/vendor/github.com/go-chi/chi/tree.go @@ -0,0 +1,865 @@ +package chi + +// Radix tree implementation below is a based on the original work by +// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go +// (MIT licensed). It's been heavily modified for use as a HTTP routing tree. + +import ( + "fmt" + "math" + "net/http" + "regexp" + "sort" + "strconv" + "strings" +) + +type methodTyp int + +const ( + mSTUB methodTyp = 1 << iota + mCONNECT + mDELETE + mGET + mHEAD + mOPTIONS + mPATCH + mPOST + mPUT + mTRACE +) + +var mALL = mCONNECT | mDELETE | mGET | mHEAD | + mOPTIONS | mPATCH | mPOST | mPUT | mTRACE + +var methodMap = map[string]methodTyp{ + http.MethodConnect: mCONNECT, + http.MethodDelete: mDELETE, + http.MethodGet: mGET, + http.MethodHead: mHEAD, + http.MethodOptions: mOPTIONS, + http.MethodPatch: mPATCH, + http.MethodPost: mPOST, + http.MethodPut: mPUT, + http.MethodTrace: mTRACE, +} + +// RegisterMethod adds support for custom HTTP method handlers, available +// via Router#Method and Router#MethodFunc +func RegisterMethod(method string) { + if method == "" { + return + } + method = strings.ToUpper(method) + if _, ok := methodMap[method]; ok { + return + } + n := len(methodMap) + if n > strconv.IntSize { + panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize)) + } + mt := methodTyp(math.Exp2(float64(n))) + methodMap[method] = mt + mALL |= mt +} + +type nodeTyp uint8 + +const ( + ntStatic nodeTyp = iota // /home + ntRegexp // /{id:[0-9]+} + ntParam // /{user} + ntCatchAll // /api/v1/* +) + +type node struct { + // node type: static, regexp, param, catchAll + typ nodeTyp + + // first byte of the prefix + label byte + + // first byte of the child prefix + tail byte + + // prefix is the common prefix we ignore + prefix string + + // regexp matcher for regexp nodes + rex *regexp.Regexp + + // HTTP handler endpoints on the leaf node + endpoints endpoints + + // subroutes on the leaf node + subroutes Routes + + // child nodes should be stored in-order for iteration, + // in groups of the node type. + children [ntCatchAll + 1]nodes +} + +// endpoints is a mapping of http method constants to handlers +// for a given route. +type endpoints map[methodTyp]*endpoint + +type endpoint struct { + // endpoint handler + handler http.Handler + + // pattern is the routing pattern for handler nodes + pattern string + + // parameter keys recorded on handler nodes + paramKeys []string +} + +func (s endpoints) Value(method methodTyp) *endpoint { + mh, ok := s[method] + if !ok { + mh = &endpoint{} + s[method] = mh + } + return mh +} + +func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node { + var parent *node + search := pattern + + for { + // Handle key exhaustion + if len(search) == 0 { + // Insert or update the node's leaf handler + n.setEndpoint(method, handler, pattern) + return n + } + + // We're going to be searching for a wild node next, + // in this case, we need to get the tail + var label = search[0] + var segTail byte + var segEndIdx int + var segTyp nodeTyp + var segRexpat string + if label == '{' || label == '*' { + segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search) + } + + var prefix string + if segTyp == ntRegexp { + prefix = segRexpat + } + + // Look for the edge to attach to + parent = n + n = n.getEdge(segTyp, label, segTail, prefix) + + // No edge, create one + if n == nil { + child := &node{label: label, tail: segTail, prefix: search} + hn := parent.addChild(child, search) + hn.setEndpoint(method, handler, pattern) + + return hn + } + + // Found an edge to match the pattern + + if n.typ > ntStatic { + // We found a param node, trim the param from the search path and continue. + // This param/wild pattern segment would already be on the tree from a previous + // call to addChild when creating a new node. + search = search[segEndIdx:] + continue + } + + // Static nodes fall below here. + // Determine longest prefix of the search key on match. + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + // the common prefix is as long as the current node's prefix we're attempting to insert. + // keep the search going. + search = search[commonPrefix:] + continue + } + + // Split the node + child := &node{ + typ: ntStatic, + prefix: search[:commonPrefix], + } + parent.replaceChild(search[0], segTail, child) + + // Restore the existing node + n.label = n.prefix[commonPrefix] + n.prefix = n.prefix[commonPrefix:] + child.addChild(n, n.prefix) + + // If the new key is a subset, set the method/handler on this node and finish. + search = search[commonPrefix:] + if len(search) == 0 { + child.setEndpoint(method, handler, pattern) + return child + } + + // Create a new edge for the node + subchild := &node{ + typ: ntStatic, + label: search[0], + prefix: search, + } + hn := child.addChild(subchild, search) + hn.setEndpoint(method, handler, pattern) + return hn + } +} + +// addChild appends the new `child` node to the tree using the `pattern` as the trie key. +// For a URL router like chi's, we split the static, param, regexp and wildcard segments +// into different nodes. In addition, addChild will recursively call itself until every +// pattern segment is added to the url pattern tree as individual nodes, depending on type. +func (n *node) addChild(child *node, prefix string) *node { + search := prefix + + // handler leaf node added to the tree is the child. + // this may be overridden later down the flow + hn := child + + // Parse next segment + segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search) + + // Add child depending on next up segment + switch segTyp { + + case ntStatic: + // Search prefix is all static (that is, has no params in path) + // noop + + default: + // Search prefix contains a param, regexp or wildcard + + if segTyp == ntRegexp { + rex, err := regexp.Compile(segRexpat) + if err != nil { + panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat)) + } + child.prefix = segRexpat + child.rex = rex + } + + if segStartIdx == 0 { + // Route starts with a param + child.typ = segTyp + + if segTyp == ntCatchAll { + segStartIdx = -1 + } else { + segStartIdx = segEndIdx + } + if segStartIdx < 0 { + segStartIdx = len(search) + } + child.tail = segTail // for params, we set the tail + + if segStartIdx != len(search) { + // add static edge for the remaining part, split the end. + // its not possible to have adjacent param nodes, so its certainly + // going to be a static node next. + + search = search[segStartIdx:] // advance search position + + nn := &node{ + typ: ntStatic, + label: search[0], + prefix: search, + } + hn = child.addChild(nn, search) + } + + } else if segStartIdx > 0 { + // Route has some param + + // starts with a static segment + child.typ = ntStatic + child.prefix = search[:segStartIdx] + child.rex = nil + + // add the param edge node + search = search[segStartIdx:] + + nn := &node{ + typ: segTyp, + label: search[0], + tail: segTail, + } + hn = child.addChild(nn, search) + + } + } + + n.children[child.typ] = append(n.children[child.typ], child) + n.children[child.typ].Sort() + return hn +} + +func (n *node) replaceChild(label, tail byte, child *node) { + for i := 0; i < len(n.children[child.typ]); i++ { + if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail { + n.children[child.typ][i] = child + n.children[child.typ][i].label = label + n.children[child.typ][i].tail = tail + return + } + } + panic("chi: replacing missing child") +} + +func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node { + nds := n.children[ntyp] + for i := 0; i < len(nds); i++ { + if nds[i].label == label && nds[i].tail == tail { + if ntyp == ntRegexp && nds[i].prefix != prefix { + continue + } + return nds[i] + } + } + return nil +} + +func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) { + // Set the handler for the method type on the node + if n.endpoints == nil { + n.endpoints = make(endpoints) + } + + paramKeys := patParamKeys(pattern) + + if method&mSTUB == mSTUB { + n.endpoints.Value(mSTUB).handler = handler + } + if method&mALL == mALL { + h := n.endpoints.Value(mALL) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + for _, m := range methodMap { + h := n.endpoints.Value(m) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + } + } else { + h := n.endpoints.Value(method) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + } +} + +func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) { + // Reset the context routing pattern and params + rctx.routePattern = "" + rctx.routeParams.Keys = rctx.routeParams.Keys[:0] + rctx.routeParams.Values = rctx.routeParams.Values[:0] + + // Find the routing handlers for the path + rn := n.findRoute(rctx, method, path) + if rn == nil { + return nil, nil, nil + } + + // Record the routing params in the request lifecycle + rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...) + rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...) + + // Record the routing pattern in the request lifecycle + if rn.endpoints[method].pattern != "" { + rctx.routePattern = rn.endpoints[method].pattern + rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern) + } + + return rn, rn.endpoints, rn.endpoints[method].handler +} + +// Recursive edge traversal by checking all nodeTyp groups along the way. +// It's like searching through a multi-dimensional radix trie. +func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node { + nn := n + search := path + + for t, nds := range nn.children { + ntyp := nodeTyp(t) + if len(nds) == 0 { + continue + } + + var xn *node + xsearch := search + + var label byte + if search != "" { + label = search[0] + } + + switch ntyp { + case ntStatic: + xn = nds.findEdge(label) + if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) { + continue + } + xsearch = xsearch[len(xn.prefix):] + + case ntParam, ntRegexp: + // short-circuit and return no matching route for empty param values + if xsearch == "" { + continue + } + + // serially loop through each node grouped by the tail delimiter + for idx := 0; idx < len(nds); idx++ { + xn = nds[idx] + + // label for param nodes is the delimiter byte + p := strings.IndexByte(xsearch, xn.tail) + + if p < 0 { + if xn.tail == '/' { + p = len(xsearch) + } else { + continue + } + } + + if ntyp == ntRegexp && xn.rex != nil { + if !xn.rex.Match([]byte(xsearch[:p])) { + continue + } + } else if strings.IndexByte(xsearch[:p], '/') != -1 { + // avoid a match across path segments + continue + } + + prevlen := len(rctx.routeParams.Values) + rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p]) + xsearch = xsearch[p:] + + if len(xsearch) == 0 { + if xn.isLeaf() { + h := xn.endpoints[method] + if h != nil && h.handler != nil { + rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) + return xn + } + + // flag that the routing context found a route, but not a corresponding + // supported method + rctx.methodNotAllowed = true + } + } + + // recursively find the next node on this branch + fin := xn.findRoute(rctx, method, xsearch) + if fin != nil { + return fin + } + + // not found on this branch, reset vars + rctx.routeParams.Values = rctx.routeParams.Values[:prevlen] + xsearch = search + } + + rctx.routeParams.Values = append(rctx.routeParams.Values, "") + + default: + // catch-all nodes + rctx.routeParams.Values = append(rctx.routeParams.Values, search) + xn = nds[0] + xsearch = "" + } + + if xn == nil { + continue + } + + // did we find it yet? + if len(xsearch) == 0 { + if xn.isLeaf() { + h := xn.endpoints[method] + if h != nil && h.handler != nil { + rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) + return xn + } + + // flag that the routing context found a route, but not a corresponding + // supported method + rctx.methodNotAllowed = true + } + } + + // recursively find the next node.. + fin := xn.findRoute(rctx, method, xsearch) + if fin != nil { + return fin + } + + // Did not find final handler, let's remove the param here if it was set + if xn.typ > ntStatic { + if len(rctx.routeParams.Values) > 0 { + rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1] + } + } + + } + + return nil +} + +func (n *node) findEdge(ntyp nodeTyp, label byte) *node { + nds := n.children[ntyp] + num := len(nds) + idx := 0 + + switch ntyp { + case ntStatic, ntParam, ntRegexp: + i, j := 0, num-1 + for i <= j { + idx = i + (j-i)/2 + if label > nds[idx].label { + i = idx + 1 + } else if label < nds[idx].label { + j = idx - 1 + } else { + i = num // breaks cond + } + } + if nds[idx].label != label { + return nil + } + return nds[idx] + + default: // catch all + return nds[idx] + } +} + +func (n *node) isLeaf() bool { + return n.endpoints != nil +} + +func (n *node) findPattern(pattern string) bool { + nn := n + for _, nds := range nn.children { + if len(nds) == 0 { + continue + } + + n = nn.findEdge(nds[0].typ, pattern[0]) + if n == nil { + continue + } + + var idx int + var xpattern string + + switch n.typ { + case ntStatic: + idx = longestPrefix(pattern, n.prefix) + if idx < len(n.prefix) { + continue + } + + case ntParam, ntRegexp: + idx = strings.IndexByte(pattern, '}') + 1 + + case ntCatchAll: + idx = longestPrefix(pattern, "*") + + default: + panic("chi: unknown node type") + } + + xpattern = pattern[idx:] + if len(xpattern) == 0 { + return true + } + + return n.findPattern(xpattern) + } + return false +} + +func (n *node) routes() []Route { + rts := []Route{} + + n.walk(func(eps endpoints, subroutes Routes) bool { + if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil { + return false + } + + // Group methodHandlers by unique patterns + pats := make(map[string]endpoints) + + for mt, h := range eps { + if h.pattern == "" { + continue + } + p, ok := pats[h.pattern] + if !ok { + p = endpoints{} + pats[h.pattern] = p + } + p[mt] = h + } + + for p, mh := range pats { + hs := make(map[string]http.Handler) + if mh[mALL] != nil && mh[mALL].handler != nil { + hs["*"] = mh[mALL].handler + } + + for mt, h := range mh { + if h.handler == nil { + continue + } + m := methodTypString(mt) + if m == "" { + continue + } + hs[m] = h.handler + } + + rt := Route{p, hs, subroutes} + rts = append(rts, rt) + } + + return false + }) + + return rts +} + +func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool { + // Visit the leaf values if any + if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) { + return true + } + + // Recurse on the children + for _, ns := range n.children { + for _, cn := range ns { + if cn.walk(fn) { + return true + } + } + } + return false +} + +// patNextSegment returns the next segment details from a pattern: +// node type, param key, regexp string, param tail byte, param starting index, param ending index +func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) { + ps := strings.Index(pattern, "{") + ws := strings.Index(pattern, "*") + + if ps < 0 && ws < 0 { + return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing + } + + // Sanity check + if ps >= 0 && ws >= 0 && ws < ps { + panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'") + } + + var tail byte = '/' // Default endpoint tail to / byte + + if ps >= 0 { + // Param/Regexp pattern is next + nt := ntParam + + // Read to closing } taking into account opens and closes in curl count (cc) + cc := 0 + pe := ps + for i, c := range pattern[ps:] { + if c == '{' { + cc++ + } else if c == '}' { + cc-- + if cc == 0 { + pe = ps + i + break + } + } + } + if pe == ps { + panic("chi: route param closing delimiter '}' is missing") + } + + key := pattern[ps+1 : pe] + pe++ // set end to next position + + if pe < len(pattern) { + tail = pattern[pe] + } + + var rexpat string + if idx := strings.Index(key, ":"); idx >= 0 { + nt = ntRegexp + rexpat = key[idx+1:] + key = key[:idx] + } + + if len(rexpat) > 0 { + if rexpat[0] != '^' { + rexpat = "^" + rexpat + } + if rexpat[len(rexpat)-1] != '$' { + rexpat += "$" + } + } + + return nt, key, rexpat, tail, ps, pe + } + + // Wildcard pattern as finale + if ws < len(pattern)-1 { + panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead") + } + return ntCatchAll, "*", "", 0, ws, len(pattern) +} + +func patParamKeys(pattern string) []string { + pat := pattern + paramKeys := []string{} + for { + ptyp, paramKey, _, _, _, e := patNextSegment(pat) + if ptyp == ntStatic { + return paramKeys + } + for i := 0; i < len(paramKeys); i++ { + if paramKeys[i] == paramKey { + panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey)) + } + } + paramKeys = append(paramKeys, paramKey) + pat = pat[e:] + } +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +func methodTypString(method methodTyp) string { + for s, t := range methodMap { + if method == t { + return s + } + } + return "" +} + +type nodes []*node + +// Sort the list of nodes by label +func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() } +func (ns nodes) Len() int { return len(ns) } +func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } +func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label } + +// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes. +// The list order determines the traversal order. +func (ns nodes) tailSort() { + for i := len(ns) - 1; i >= 0; i-- { + if ns[i].typ > ntStatic && ns[i].tail == '/' { + ns.Swap(i, len(ns)-1) + return + } + } +} + +func (ns nodes) findEdge(label byte) *node { + num := len(ns) + idx := 0 + i, j := 0, num-1 + for i <= j { + idx = i + (j-i)/2 + if label > ns[idx].label { + i = idx + 1 + } else if label < ns[idx].label { + j = idx - 1 + } else { + i = num // breaks cond + } + } + if ns[idx].label != label { + return nil + } + return ns[idx] +} + +// Route describes the details of a routing handler. +// Handlers map key is an HTTP method +type Route struct { + Pattern string + Handlers map[string]http.Handler + SubRoutes Routes +} + +// WalkFunc is the type of the function called for each method and route visited by Walk. +type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error + +// Walk walks any router tree that implements Routes interface. +func Walk(r Routes, walkFn WalkFunc) error { + return walk(r, walkFn, "") +} + +func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error { + for _, route := range r.Routes() { + mws := make([]func(http.Handler) http.Handler, len(parentMw)) + copy(mws, parentMw) + mws = append(mws, r.Middlewares()...) + + if route.SubRoutes != nil { + if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil { + return err + } + continue + } + + for method, handler := range route.Handlers { + if method == "*" { + // Ignore a "catchAll" method, since we pass down all the specific methods for each route. + continue + } + + fullRoute := parentRoute + route.Pattern + fullRoute = strings.Replace(fullRoute, "/*/", "/", -1) + + if chain, ok := handler.(*ChainHandler); ok { + if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil { + return err + } + } else { + if err := walkFn(method, fullRoute, handler, mws...); err != nil { + return err + } + } + } + } + + return nil +} diff --git a/vendor/github.com/go-redis/redis/.gitignore b/vendor/github.com/go-redis/redis/.gitignore new file mode 100644 index 000000000..ebfe903bc --- /dev/null +++ b/vendor/github.com/go-redis/redis/.gitignore @@ -0,0 +1,2 @@ +*.rdb +testdata/*/ diff --git a/vendor/github.com/go-redis/redis/.travis.yml b/vendor/github.com/go-redis/redis/.travis.yml new file mode 100644 index 000000000..06d7897b4 --- /dev/null +++ b/vendor/github.com/go-redis/redis/.travis.yml @@ -0,0 +1,20 @@ +sudo: false +language: go + +services: + - redis-server + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get github.com/onsi/ginkgo + - go get github.com/onsi/gomega diff --git a/vendor/github.com/go-redis/redis/CHANGELOG.md b/vendor/github.com/go-redis/redis/CHANGELOG.md new file mode 100644 index 000000000..19645661a --- /dev/null +++ b/vendor/github.com/go-redis/redis/CHANGELOG.md @@ -0,0 +1,25 @@ +# Changelog + +## Unreleased + +- Cluster and Ring pipelines process commands for each node in its own goroutine. + +## 6.14 + +- Added Options.MinIdleConns. +- Added Options.MaxConnAge. +- PoolStats.FreeConns is renamed to PoolStats.IdleConns. +- Add Client.Do to simplify creating custom commands. +- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. +- Lower memory usage. + +## v6.13 + +- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards. +- Cluster client was optimized to use much less memory when reloading cluster state. +- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead. +- Dialer.KeepAlive is set to 5 minutes by default. + +## v6.12 + +- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/vendor/github.com/go-redis/redis/LICENSE b/vendor/github.com/go-redis/redis/LICENSE new file mode 100644 index 000000000..298bed9be --- /dev/null +++ b/vendor/github.com/go-redis/redis/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/go-redis/redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-redis/redis/Makefile b/vendor/github.com/go-redis/redis/Makefile new file mode 100644 index 000000000..fa3b4e004 --- /dev/null +++ b/vendor/github.com/go-redis/redis/Makefile @@ -0,0 +1,22 @@ +all: testdeps + go test ./... + go test ./... -short -race + env GOOS=linux GOARCH=386 go test ./... + go vet + go get github.com/gordonklaus/ineffassign + ineffassign . + +testdeps: testdata/redis/src/redis-server + +bench: testdeps + go test ./... -test.run=NONE -test.bench=. -test.benchmem + +.PHONY: all test testdeps bench + +testdata/redis: + mkdir -p $@ + wget -qO- https://github.com/antirez/redis/archive/5.0.tar.gz | tar xvz --strip-components=1 -C $@ + +testdata/redis/src/redis-server: testdata/redis + sed -i.bak 's/libjemalloc.a/libjemalloc.a -lrt/g' $ +} + +func ExampleClient() { + err := client.Set("key", "value", 0).Err() + if err != nil { + panic(err) + } + + val, err := client.Get("key").Result() + if err != nil { + panic(err) + } + fmt.Println("key", val) + + val2, err := client.Get("key2").Result() + if err == redis.Nil { + fmt.Println("key2 does not exist") + } else if err != nil { + panic(err) + } else { + fmt.Println("key2", val2) + } + // Output: key value + // key2 does not exist +} +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package. + +## Look and feel + +Some corner cases: + +```go +// SET key value EX 10 NX +set, err := client.SetNX("key", "value", 10*time.Second).Result() + +// SORT list LIMIT 0 2 ASC +vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + +// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 +vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, +}).Result() + +// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM +vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() + +// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" +vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() +``` + +## Benchmark + +go-redis vs redigo: + +``` +BenchmarkSetGoRedis10Conns64Bytes-4 200000 7621 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns64Bytes-4 200000 7554 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns1KB-4 200000 7697 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns1KB-4 200000 7688 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns10KB-4 200000 9214 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns10KB-4 200000 9181 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns1MB-4 2000 583242 ns/op 2337 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns1MB-4 2000 583089 ns/op 2338 B/op 6 allocs/op +BenchmarkSetRedigo10Conns64Bytes-4 200000 7576 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns64Bytes-4 200000 7782 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns1KB-4 200000 7958 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns1KB-4 200000 7725 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns10KB-4 100000 18442 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns10KB-4 100000 18818 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns1MB-4 2000 668829 ns/op 226 B/op 7 allocs/op +BenchmarkSetRedigo100Conns1MB-4 2000 679542 ns/op 226 B/op 7 allocs/op +``` + +Redis Cluster: + +``` +BenchmarkRedisPing-4 200000 6983 ns/op 116 B/op 4 allocs/op +BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op 4 allocs/op +``` + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang msgpack](https://github.com/vmihailenco/msgpack) +- [Golang message task queue](https://github.com/vmihailenco/taskq) diff --git a/vendor/github.com/go-redis/redis/cluster.go b/vendor/github.com/go-redis/redis/cluster.go new file mode 100644 index 000000000..ab2c76f05 --- /dev/null +++ b/vendor/github.com/go-redis/redis/cluster.go @@ -0,0 +1,1627 @@ +package redis + +import ( + "context" + "crypto/tls" + "fmt" + "math" + "math/rand" + "net" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/internal" + "github.com/go-redis/redis/internal/hashtag" + "github.com/go-redis/redis/internal/pool" + "github.com/go-redis/redis/internal/proto" +) + +var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // The maximum number of retries before giving up. Command is retried + // on network errors and MOVED/ASK redirects. + // Default is 8 retries. + MaxRedirects int + + // Enables read-only commands on slave nodes. + ReadOnly bool + // Allows routing read-only commands to the closest master or slave node. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // It automatically enables ReadOnly. + RouteRandomly bool + + // Optional function that returns cluster slots information. + // It is useful to manually create cluster of standalone Redis servers + // and load-balance read/write operations between master and slaves. + // It can use service like ZooKeeper to maintain configuration information + // and Cluster.ReloadState to manually trigger state reloading. + ClusterSlots func() ([]ClusterSlot, error) + + // Optional hook that is called when a new node is created. + OnNewNode func(*Client) + + // Following options are copied from Options struct. + + OnConnect func(*Conn) error + + Password string + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolSize applies per cluster node and not for the whole cluster. + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *ClusterOptions) init() { + if opt.MaxRedirects == -1 { + opt.MaxRedirects = 0 + } else if opt.MaxRedirects == 0 { + opt.MaxRedirects = 8 + } + + if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil { + opt.ReadOnly = true + } + + if opt.PoolSize == 0 { + opt.PoolSize = 5 * runtime.NumCPU() + } + + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } +} + +func (opt *ClusterOptions) clientOptions() *Options { + const disableIdleCheck = -1 + + return &Options{ + OnConnect: opt.OnConnect, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + Password: opt.Password, + readOnly: opt.ReadOnly, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: disableIdleCheck, + + TLSConfig: opt.TLSConfig, + } +} + +//------------------------------------------------------------------------------ + +type clusterNode struct { + Client *Client + + latency uint32 // atomic + generation uint32 // atomic + loading uint32 // atomic +} + +func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { + opt := clOpt.clientOptions() + opt.Addr = addr + node := clusterNode{ + Client: NewClient(opt), + } + + node.latency = math.MaxUint32 + if clOpt.RouteByLatency { + go node.updateLatency() + } + + if clOpt.OnNewNode != nil { + clOpt.OnNewNode(node.Client) + } + + return &node +} + +func (n *clusterNode) String() string { + return n.Client.String() +} + +func (n *clusterNode) Close() error { + return n.Client.Close() +} + +func (n *clusterNode) updateLatency() { + const probes = 10 + + var latency uint32 + for i := 0; i < probes; i++ { + start := time.Now() + n.Client.Ping() + probe := uint32(time.Since(start) / time.Microsecond) + latency = (latency + probe) / 2 + } + atomic.StoreUint32(&n.latency, latency) +} + +func (n *clusterNode) Latency() time.Duration { + latency := atomic.LoadUint32(&n.latency) + return time.Duration(latency) * time.Microsecond +} + +func (n *clusterNode) MarkAsLoading() { + atomic.StoreUint32(&n.loading, uint32(time.Now().Unix())) +} + +func (n *clusterNode) Loading() bool { + const minute = int64(time.Minute / time.Second) + + loading := atomic.LoadUint32(&n.loading) + if loading == 0 { + return false + } + if time.Now().Unix()-int64(loading) < minute { + return true + } + atomic.StoreUint32(&n.loading, 0) + return false +} + +func (n *clusterNode) Generation() uint32 { + return atomic.LoadUint32(&n.generation) +} + +func (n *clusterNode) SetGeneration(gen uint32) { + for { + v := atomic.LoadUint32(&n.generation) + if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { + break + } + } +} + +//------------------------------------------------------------------------------ + +type clusterNodes struct { + opt *ClusterOptions + + mu sync.RWMutex + allAddrs []string + allNodes map[string]*clusterNode + clusterAddrs []string + closed bool + + _generation uint32 // atomic +} + +func newClusterNodes(opt *ClusterOptions) *clusterNodes { + return &clusterNodes{ + opt: opt, + + allAddrs: opt.Addrs, + allNodes: make(map[string]*clusterNode), + } +} + +func (c *clusterNodes) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, node := range c.allNodes { + if err := node.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + c.allNodes = nil + c.clusterAddrs = nil + + return firstErr +} + +func (c *clusterNodes) Addrs() ([]string, error) { + var addrs []string + c.mu.RLock() + closed := c.closed + if !closed { + if len(c.clusterAddrs) > 0 { + addrs = c.clusterAddrs + } else { + addrs = c.allAddrs + } + } + c.mu.RUnlock() + + if closed { + return nil, pool.ErrClosed + } + if len(addrs) == 0 { + return nil, errClusterNoNodes + } + return addrs, nil +} + +func (c *clusterNodes) NextGeneration() uint32 { + return atomic.AddUint32(&c._generation, 1) +} + +// GC removes unused nodes. +func (c *clusterNodes) GC(generation uint32) { + var collected []*clusterNode + c.mu.Lock() + for addr, node := range c.allNodes { + if node.Generation() >= generation { + continue + } + + c.clusterAddrs = remove(c.clusterAddrs, addr) + delete(c.allNodes, addr) + collected = append(collected, node) + } + c.mu.Unlock() + + for _, node := range collected { + _ = node.Client.Close() + } +} + +func (c *clusterNodes) Get(addr string) (*clusterNode, error) { + var node *clusterNode + var err error + c.mu.RLock() + if c.closed { + err = pool.ErrClosed + } else { + node = c.allNodes[addr] + } + c.mu.RUnlock() + return node, err +} + +func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { + node, err := c.Get(addr) + if err != nil { + return nil, err + } + if node != nil { + return node, nil + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + node, ok := c.allNodes[addr] + if ok { + return node, err + } + + node = newClusterNode(c.opt, addr) + + c.allAddrs = appendIfNotExists(c.allAddrs, addr) + c.clusterAddrs = append(c.clusterAddrs, addr) + c.allNodes[addr] = node + + return node, err +} + +func (c *clusterNodes) All() ([]*clusterNode, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.closed { + return nil, pool.ErrClosed + } + + cp := make([]*clusterNode, 0, len(c.allNodes)) + for _, node := range c.allNodes { + cp = append(cp, node) + } + return cp, nil +} + +func (c *clusterNodes) Random() (*clusterNode, error) { + addrs, err := c.Addrs() + if err != nil { + return nil, err + } + + n := rand.Intn(len(addrs)) + return c.GetOrCreate(addrs[n]) +} + +//------------------------------------------------------------------------------ + +type clusterSlot struct { + start, end int + nodes []*clusterNode +} + +type clusterSlotSlice []*clusterSlot + +func (p clusterSlotSlice) Len() int { + return len(p) +} + +func (p clusterSlotSlice) Less(i, j int) bool { + return p[i].start < p[j].start +} + +func (p clusterSlotSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type clusterState struct { + nodes *clusterNodes + Masters []*clusterNode + Slaves []*clusterNode + + slots []*clusterSlot + + generation uint32 + createdAt time.Time +} + +func newClusterState( + nodes *clusterNodes, slots []ClusterSlot, origin string, +) (*clusterState, error) { + c := clusterState{ + nodes: nodes, + + slots: make([]*clusterSlot, 0, len(slots)), + + generation: nodes.NextGeneration(), + createdAt: time.Now(), + } + + originHost, _, _ := net.SplitHostPort(origin) + isLoopbackOrigin := isLoopback(originHost) + + for _, slot := range slots { + var nodes []*clusterNode + for i, slotNode := range slot.Nodes { + addr := slotNode.Addr + if !isLoopbackOrigin { + addr = replaceLoopbackHost(addr, originHost) + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return nil, err + } + + node.SetGeneration(c.generation) + nodes = append(nodes, node) + + if i == 0 { + c.Masters = appendUniqueNode(c.Masters, node) + } else { + c.Slaves = appendUniqueNode(c.Slaves, node) + } + } + + c.slots = append(c.slots, &clusterSlot{ + start: slot.Start, + end: slot.End, + nodes: nodes, + }) + } + + sort.Sort(clusterSlotSlice(c.slots)) + + time.AfterFunc(time.Minute, func() { + nodes.GC(c.generation) + }) + + return &c, nil +} + +func replaceLoopbackHost(nodeAddr, originHost string) string { + nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) + if err != nil { + return nodeAddr + } + + nodeIP := net.ParseIP(nodeHost) + if nodeIP == nil { + return nodeAddr + } + + if !nodeIP.IsLoopback() { + return nodeAddr + } + + // Use origin host which is not loopback and node port. + return net.JoinHostPort(originHost, nodePort) +} + +func isLoopback(host string) bool { + ip := net.ParseIP(host) + if ip == nil { + return true + } + return ip.IsLoopback() +} + +func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + switch len(nodes) { + case 0: + return c.nodes.Random() + case 1: + return nodes[0], nil + case 2: + if slave := nodes[1]; !slave.Loading() { + return slave, nil + } + return nodes[0], nil + default: + var slave *clusterNode + for i := 0; i < 10; i++ { + n := rand.Intn(len(nodes)-1) + 1 + slave = nodes[n] + if !slave.Loading() { + return slave, nil + } + } + + // All slaves are loading - use master. + return nodes[0], nil + } +} + +func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { + const threshold = time.Millisecond + + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + var node *clusterNode + for _, n := range nodes { + if n.Loading() { + continue + } + if node == nil || node.Latency()-n.Latency() > threshold { + node = n + } + } + return node, nil +} + +func (c *clusterState) slotRandomNode(slot int) *clusterNode { + nodes := c.slotNodes(slot) + n := rand.Intn(len(nodes)) + return nodes[n] +} + +func (c *clusterState) slotNodes(slot int) []*clusterNode { + i := sort.Search(len(c.slots), func(i int) bool { + return c.slots[i].end >= slot + }) + if i >= len(c.slots) { + return nil + } + x := c.slots[i] + if slot >= x.start && slot <= x.end { + return x.nodes + } + return nil +} + +//------------------------------------------------------------------------------ + +type clusterStateHolder struct { + load func() (*clusterState, error) + + state atomic.Value + reloading uint32 // atomic +} + +func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder { + return &clusterStateHolder{ + load: fn, + } +} + +func (c *clusterStateHolder) Reload() (*clusterState, error) { + state, err := c.load() + if err != nil { + return nil, err + } + c.state.Store(state) + return state, nil +} + +func (c *clusterStateHolder) LazyReload() { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go func() { + defer atomic.StoreUint32(&c.reloading, 0) + + _, err := c.Reload() + if err != nil { + return + } + time.Sleep(100 * time.Millisecond) + }() +} + +func (c *clusterStateHolder) Get() (*clusterState, error) { + v := c.state.Load() + if v != nil { + state := v.(*clusterState) + if time.Since(state.createdAt) > time.Minute { + c.LazyReload() + } + return state, nil + } + return c.Reload() +} + +func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) { + state, err := c.Reload() + if err == nil { + return state, nil + } + return c.Get() +} + +//------------------------------------------------------------------------------ + +// ClusterClient is a Redis Cluster client representing a pool of zero +// or more underlying connections. It's safe for concurrent use by +// multiple goroutines. +type ClusterClient struct { + cmdable + + ctx context.Context + + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder + cmdsInfoCache *cmdsInfoCache + + process func(Cmder) error + processPipeline func([]Cmder) error + processTxPipeline func([]Cmder) error +} + +// NewClusterClient returns a Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + opt.init() + + c := &ClusterClient{ + opt: opt, + nodes: newClusterNodes(opt), + } + c.state = newClusterStateHolder(c.loadState) + c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) + + c.process = c.defaultProcess + c.processPipeline = c.defaultProcessPipeline + c.processTxPipeline = c.defaultProcessTxPipeline + + c.init() + if opt.IdleCheckFrequency > 0 { + go c.reaper(opt.IdleCheckFrequency) + } + + return c +} + +func (c *ClusterClient) init() { + c.cmdable.setProcessor(c.Process) +} + +// ReloadState reloads cluster state. If available it calls ClusterSlots func +// to get cluster slots information. +func (c *ClusterClient) ReloadState() error { + _, err := c.state.Reload() + return err +} + +func (c *ClusterClient) Context() context.Context { + if c.ctx != nil { + return c.ctx + } + return context.Background() +} + +func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { + if ctx == nil { + panic("nil context") + } + c2 := c.clone() + c2.ctx = ctx + return c2 +} + +func (c *ClusterClient) clone() *ClusterClient { + cp := *c + cp.init() + return &cp +} + +// Options returns read-only Options that were used to create the client. +func (c *ClusterClient) Options() *ClusterOptions { + return c.opt +} + +func (c *ClusterClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) { + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + for _, addr := range addrs { + node, err := c.nodes.Get(addr) + if err != nil { + return nil, err + } + if node == nil { + continue + } + + info, err := node.Client.Command().Result() + if err == nil { + return info, nil + } + if firstErr == nil { + firstErr = err + } + } + return nil, firstErr +} + +func (c *ClusterClient) cmdInfo(name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get() + if err != nil { + return nil + } + + info := cmdsInfo[name] + if info == nil { + internal.Logf("info for cmd=%s not found", name) + } + return info +} + +func cmdSlot(cmd Cmder, pos int) int { + if pos == 0 { + return hashtag.RandomSlot() + } + firstKey := cmd.stringArg(pos) + return hashtag.Slot(firstKey) +} + +func (c *ClusterClient) cmdSlot(cmd Cmder) int { + args := cmd.Args() + if args[0] == "cluster" && args[1] == "getkeysinslot" { + return args[2].(int) + } + + cmdInfo := c.cmdInfo(cmd.Name()) + return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) +} + +func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) { + state, err := c.state.Get() + if err != nil { + return 0, nil, err + } + + cmdInfo := c.cmdInfo(cmd.Name()) + slot := c.cmdSlot(cmd) + + if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly { + if c.opt.RouteByLatency { + node, err := state.slotClosestNode(slot) + return slot, node, err + } + + if c.opt.RouteRandomly { + node := state.slotRandomNode(slot) + return slot, node, nil + } + + node, err := state.slotSlaveNode(slot) + return slot, node, err + } + + node, err := state.slotMasterNode(slot) + return slot, node, err +} + +func (c *ClusterClient) slotMasterNode(slot int) (*clusterNode, error) { + state, err := c.state.Get() + if err != nil { + return nil, err + } + + nodes := state.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + slot := hashtag.Slot(keys[0]) + for _, key := range keys[1:] { + if hashtag.Slot(key) != slot { + err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") + return err + } + } + + node, err := c.slotMasterNode(slot) + if err != nil { + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + err = node.Client.Watch(fn, keys...) + if err == nil { + break + } + if err != Nil { + c.state.LazyReload() + } + + moved, ask, addr := internal.IsMovedError(err) + if moved || ask { + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if err == pool.ErrClosed || internal.IsReadOnlyError(err) { + node, err = c.slotMasterNode(slot) + if err != nil { + return err + } + continue + } + + if internal.IsRetryableError(err, true) { + continue + } + + return err + } + + return err +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a ClusterClient, as the ClusterClient is meant +// to be long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + return c.nodes.Close() +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *ClusterClient) Do(args ...interface{}) *Cmd { + cmd := NewCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *ClusterClient) WrapProcess( + fn func(oldProcess func(Cmder) error) func(Cmder) error, +) { + c.process = fn(c.process) +} + +func (c *ClusterClient) Process(cmd Cmder) error { + return c.process(cmd) +} + +func (c *ClusterClient) defaultProcess(cmd Cmder) error { + var node *clusterNode + var ask bool + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + if node == nil { + var err error + _, node, err = c.cmdSlotAndNode(cmd) + if err != nil { + cmd.setErr(err) + break + } + } + + var err error + if ask { + pipe := node.Client.Pipeline() + _ = pipe.Process(NewCmd("ASKING")) + _ = pipe.Process(cmd) + _, err = pipe.Exec() + _ = pipe.Close() + ask = false + } else { + err = node.Client.Process(cmd) + } + + // If there is no error - we are done. + if err == nil { + break + } + if err != Nil { + c.state.LazyReload() + } + + // If slave is loading - pick another node. + if c.opt.ReadOnly && internal.IsLoadingError(err) { + node.MarkAsLoading() + node = nil + continue + } + + var moved bool + var addr string + moved, ask, addr = internal.IsMovedError(err) + if moved || ask { + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + break + } + continue + } + + if err == pool.ErrClosed || internal.IsReadOnlyError(err) { + node = nil + continue + } + + if internal.IsRetryableError(err, true) { + // First retry the same node. + if attempt == 0 { + continue + } + + // Second try random node. + node, err = c.nodes.Random() + if err != nil { + break + } + continue + } + + break + } + + return cmd.Err() +} + +// ForEachMaster concurrently calls the fn on each master node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, master := range state.Masters { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(master) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachSlave concurrently calls the fn on each slave node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, slave := range state.Slaves { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(slave) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachNode concurrently calls the fn on each known node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + worker := func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + } + + for _, node := range state.Masters { + wg.Add(1) + go worker(node) + } + for _, node := range state.Slaves { + wg.Add(1) + go worker(node) + } + + wg.Wait() + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// PoolStats returns accumulated connection pool stats. +func (c *ClusterClient) PoolStats() *PoolStats { + var acc PoolStats + + state, _ := c.state.Get() + if state == nil { + return &acc + } + + for _, node := range state.Masters { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + for _, node := range state.Slaves { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + return &acc +} + +func (c *ClusterClient) loadState() (*clusterState, error) { + if c.opt.ClusterSlots != nil { + slots, err := c.opt.ClusterSlots() + if err != nil { + return nil, err + } + return newClusterState(c.nodes, slots, "") + } + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + for _, addr := range addrs { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + slots, err := node.Client.ClusterSlots().Result() + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + return newClusterState(c.nodes, slots, node.Client.opt.Addr) + } + + return nil, firstErr +} + +// reaper closes idle connections to the cluster. +func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { + ticker := time.NewTicker(idleCheckFrequency) + defer ticker.Stop() + + for range ticker.C { + nodes, err := c.nodes.All() + if err != nil { + break + } + + for _, node := range nodes { + _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() + if err != nil { + internal.Logf("ReapStaleConns failed: %s", err) + } + } + } +} + +func (c *ClusterClient) Pipeline() Pipeliner { + pipe := Pipeline{ + exec: c.processPipeline, + } + pipe.statefulCmdable.setProcessor(pipe.Process) + return &pipe +} + +func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(fn) +} + +func (c *ClusterClient) WrapProcessPipeline( + fn func(oldProcess func([]Cmder) error) func([]Cmder) error, +) { + c.processPipeline = fn(c.processPipeline) + c.processTxPipeline = fn(c.processTxPipeline) +} + +func (c *ClusterClient) defaultProcessPipeline(cmds []Cmder) error { + cmdsMap := newCmdsMap() + err := c.mapCmdsByNode(cmds, cmdsMap) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap.m { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + cn, err := node.Client.getConn() + if err != nil { + if err == pool.ErrClosed { + c.mapCmdsByNode(cmds, failedCmds) + } else { + setCmdsErr(cmds, err) + } + return + } + + err = c.pipelineProcessCmds(node, cn, cmds, failedCmds) + node.Client.releaseConnStrict(cn, err) + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds + } + + return cmdsFirstErr(cmds) +} + +type cmdsMap struct { + mu sync.Mutex + m map[*clusterNode][]Cmder +} + +func newCmdsMap() *cmdsMap { + return &cmdsMap{ + m: make(map[*clusterNode][]Cmder), + } +} + +func (c *ClusterClient) mapCmdsByNode(cmds []Cmder, cmdsMap *cmdsMap) error { + state, err := c.state.Get() + if err != nil { + setCmdsErr(cmds, err) + return err + } + + cmdsAreReadOnly := c.cmdsAreReadOnly(cmds) + for _, cmd := range cmds { + var node *clusterNode + var err error + if cmdsAreReadOnly { + _, node, err = c.cmdSlotAndNode(cmd) + } else { + slot := c.cmdSlot(cmd) + node, err = state.slotMasterNode(slot) + } + if err != nil { + return err + } + cmdsMap.mu.Lock() + cmdsMap.m[node] = append(cmdsMap.m[node], cmd) + cmdsMap.mu.Unlock() + } + return nil +} + +func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(cmd.Name()) + if cmdInfo == nil || !cmdInfo.ReadOnly { + return false + } + } + return true +} + +func (c *ClusterClient) pipelineProcessCmds( + node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, +) error { + err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmd(wr, cmds...) + }) + if err != nil { + setCmdsErr(cmds, err) + failedCmds.mu.Lock() + failedCmds.m[node] = cmds + failedCmds.mu.Unlock() + return err + } + + err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error { + return c.pipelineReadCmds(node, rd, cmds, failedCmds) + }) + return err +} + +func (c *ClusterClient) pipelineReadCmds( + node *clusterNode, rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap, +) error { + var firstErr error + for _, cmd := range cmds { + err := cmd.readReply(rd) + if err == nil { + continue + } + + if c.checkMovedErr(cmd, err, failedCmds) { + continue + } + + if internal.IsRedisError(err) { + continue + } + + failedCmds.mu.Lock() + failedCmds.m[node] = append(failedCmds.m[node], cmd) + failedCmds.mu.Unlock() + if firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func (c *ClusterClient) checkMovedErr( + cmd Cmder, err error, failedCmds *cmdsMap, +) bool { + moved, ask, addr := internal.IsMovedError(err) + + if moved { + c.state.LazyReload() + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + failedCmds.mu.Lock() + failedCmds.m[node] = append(failedCmds.m[node], cmd) + failedCmds.mu.Unlock() + return true + } + + if ask { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + failedCmds.mu.Lock() + failedCmds.m[node] = append(failedCmds.m[node], NewCmd("ASKING"), cmd) + failedCmds.mu.Unlock() + return true + } + + return false +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *ClusterClient) TxPipeline() Pipeliner { + pipe := Pipeline{ + exec: c.processTxPipeline, + } + pipe.statefulCmdable.setProcessor(pipe.Process) + return &pipe +} + +func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(fn) +} + +func (c *ClusterClient) defaultProcessTxPipeline(cmds []Cmder) error { + state, err := c.state.Get() + if err != nil { + return err + } + + cmdsMap := c.mapCmdsBySlot(cmds) + for slot, cmds := range cmdsMap { + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + continue + } + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + cn, err := node.Client.getConn() + if err != nil { + if err == pool.ErrClosed { + c.mapCmdsByNode(cmds, failedCmds) + } else { + setCmdsErr(cmds, err) + } + return + } + + err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds) + node.Client.releaseConnStrict(cn, err) + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds.m + } + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { + cmdsMap := make(map[int][]Cmder) + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + cmdsMap[slot] = append(cmdsMap[slot], cmd) + } + return cmdsMap +} + +func (c *ClusterClient) txPipelineProcessCmds( + node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, +) error { + err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error { + return txPipelineWriteMulti(wr, cmds) + }) + if err != nil { + setCmdsErr(cmds, err) + failedCmds.mu.Lock() + failedCmds.m[node] = cmds + failedCmds.mu.Unlock() + return err + } + + err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error { + err := c.txPipelineReadQueued(rd, cmds, failedCmds) + if err != nil { + setCmdsErr(cmds, err) + return err + } + return pipelineReadCmds(rd, cmds) + }) + return err +} + +func (c *ClusterClient) txPipelineReadQueued( + rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap, +) error { + // Parse queued replies. + var statusCmd StatusCmd + if err := statusCmd.readReply(rd); err != nil { + return err + } + + for _, cmd := range cmds { + err := statusCmd.readReply(rd) + if err == nil { + continue + } + + if c.checkMovedErr(cmd, err, failedCmds) || internal.IsRedisError(err) { + continue + } + + return err + } + + // Parse number of replies. + line, err := rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + err := proto.ParseErrorReply(line) + for _, cmd := range cmds { + if !c.checkMovedErr(cmd, err, failedCmds) { + break + } + } + return err + case proto.ArrayReply: + // ok + default: + err := fmt.Errorf("redis: expected '*', but got line %q", line) + return err + } + + return nil +} + +func (c *ClusterClient) pubSub() *PubSub { + var node *clusterNode + pubsub := &PubSub{ + opt: c.opt.clientOptions(), + + newConn: func(channels []string) (*pool.Conn, error) { + if node != nil { + panic("node != nil") + } + + var err error + if len(channels) > 0 { + slot := hashtag.Slot(channels[0]) + node, err = c.slotMasterNode(slot) + } else { + node, err = c.nodes.Random() + } + if err != nil { + return nil, err + } + + cn, err := node.Client.newConn() + if err != nil { + node = nil + + return nil, err + } + + return cn, nil + }, + closeConn: func(cn *pool.Conn) error { + err := node.Client.connPool.CloseConn(cn) + node = nil + return err + }, + } + pubsub.init() + + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *ClusterClient) Subscribe(channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *ClusterClient) PSubscribe(channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(channels...) + } + return pubsub +} + +func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { + for _, n := range nodes { + if n == node { + return nodes + } + } + return append(nodes, node) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} + +func remove(ss []string, es ...string) []string { + if len(es) == 0 { + return ss[:0] + } + for _, e := range es { + for i, s := range ss { + if s == e { + ss = append(ss[:i], ss[i+1:]...) + break + } + } + } + return ss +} diff --git a/vendor/github.com/go-redis/redis/cluster_commands.go b/vendor/github.com/go-redis/redis/cluster_commands.go new file mode 100644 index 000000000..dff62c902 --- /dev/null +++ b/vendor/github.com/go-redis/redis/cluster_commands.go @@ -0,0 +1,22 @@ +package redis + +import "sync/atomic" + +func (c *ClusterClient) DBSize() *IntCmd { + cmd := NewIntCmd("dbsize") + var size int64 + err := c.ForEachMaster(func(master *Client) error { + n, err := master.DBSize().Result() + if err != nil { + return err + } + atomic.AddInt64(&size, n) + return nil + }) + if err != nil { + cmd.setErr(err) + return cmd + } + cmd.val = size + return cmd +} diff --git a/vendor/github.com/go-redis/redis/command.go b/vendor/github.com/go-redis/redis/command.go new file mode 100644 index 000000000..c70973d3b --- /dev/null +++ b/vendor/github.com/go-redis/redis/command.go @@ -0,0 +1,1972 @@ +package redis + +import ( + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/go-redis/redis/internal" + "github.com/go-redis/redis/internal/proto" +) + +type Cmder interface { + Name() string + Args() []interface{} + stringArg(int) string + + readReply(rd *proto.Reader) error + setErr(error) + + readTimeout() *time.Duration + + Err() error +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.setErr(e) + } + } +} + +func cmdsFirstErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmd(wr *proto.Writer, cmds ...Cmder) error { + for _, cmd := range cmds { + err := wr.WriteArgs(cmd.Args()) + if err != nil { + return err + } + } + return nil +} + +func cmdString(cmd Cmder, val interface{}) string { + var ss []string + for _, arg := range cmd.Args() { + ss = append(ss, fmt.Sprint(arg)) + } + s := strings.Join(ss, " ") + if err := cmd.Err(); err != nil { + return s + ": " + err.Error() + } + if val != nil { + switch vv := val.(type) { + case []byte: + return s + ": " + string(vv) + default: + return s + ": " + fmt.Sprint(val) + } + } + return s + +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + switch cmd.Name() { + case "eval", "evalsha": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + } + if info == nil { + return 0 + } + return int(info.FirstKeyPos) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + _args []interface{} + err error + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd._args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd._args) { + return "" + } + s, _ := cmd._args[pos].(string) + return s +} + +func (cmd *baseCmd) Name() string { + if len(cmd._args) > 0 { + // Cmd name must be lower cased. + s := internal.ToLower(cmd.stringArg(0)) + cmd._args[0] = s + return s + } + return "" +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +func (cmd *baseCmd) setErr(e error) { + cmd.err = e +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) String() (string, error) { + if cmd.err != nil { + return "", cmd.err + } + switch val := cmd.val.(type) { + case string: + return val, nil + default: + err := fmt.Errorf("redis: unexpected type=%T for String", val) + return "", err + } +} + +func (cmd *Cmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return int(val), nil + case string: + return strconv.Atoi(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int", val) + return 0, err + } +} + +func (cmd *Cmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return val, nil + case string: + return strconv.ParseInt(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int64", val) + return 0, err + } +} + +func (cmd *Cmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return uint64(val), nil + case string: + return strconv.ParseUint(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) + return 0, err + } +} + +func (cmd *Cmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return float32(val), nil + case string: + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil + default: + err := fmt.Errorf("redis: unexpected type=%T for Float32", val) + return 0, err + } +} + +func (cmd *Cmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return float64(val), nil + case string: + return strconv.ParseFloat(val, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Float64", val) + return 0, err + } +} + +func (cmd *Cmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return val != 0, nil + case string: + return strconv.ParseBool(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Bool", val) + return false, err + } +} + +func (cmd *Cmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadReply(sliceParser) + return cmd.err +} + +// Implements proto.MultiBulkParse +func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { + vals := make([]interface{}, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(sliceParser) + if err != nil { + if err == Nil { + vals = append(vals, nil) + continue + } + if err, ok := err.(proto.RedisError); ok { + vals = append(vals, err) + continue + } + return nil, err + } + + switch v := v.(type) { + case string: + vals = append(vals, v) + default: + vals = append(vals, v) + } + } + return vals, nil +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(sliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadString() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadIntReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{_args: args}, + precision: precision, + } +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(rd *proto.Reader) error { + var n int64 + n, cmd.err = rd.ReadIntReply() + if cmd.err != nil { + return cmd.err + } + cmd.val = time.Duration(n) * cmd.precision + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(timeParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(time.Time) + return nil +} + +// Implements proto.MultiBulkParse +func timeParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d elements, expected 2", n) + } + + sec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + microsec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + return time.Unix(sec, microsec*1000), nil +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadReply(nil) + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + // TODO: is this okay? + if cmd.err == Nil { + cmd.val = false + cmd.err = nil + return nil + } + if cmd.err != nil { + return cmd.err + } + switch v := v.(type) { + case int64: + cmd.val = v == 1 + return nil + case string: + cmd.val = v == "OK" + return nil + default: + cmd.err = fmt.Errorf("got %T, wanted int64 or string", v) + return cmd.err + } +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return []byte(cmd.val), cmd.err +} + +func (cmd *StringCmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.Atoi(cmd.Val()) +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + f, err := strconv.ParseFloat(cmd.Val(), 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan([]byte(cmd.val), val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadString() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadFloatReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]string) + return nil +} + +// Implements proto.MultiBulkParse +func stringSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + ss := make([]string, 0, n) + for i := int64(0); i < n; i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + ss = append(ss, "") + case err != nil: + return nil, err + default: + ss = append(ss, s) + } + } + return ss, nil +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(boolSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]bool) + return nil +} + +// Implements proto.MultiBulkParse +func boolSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + bools := make([]bool, 0, n) + for i := int64(0); i < n; i++ { + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + bools = append(bools, n == 1) + } + return bools, nil +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*StringStringMapCmd)(nil) + +func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringStringMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]string) + return nil +} + +// Implements proto.MultiBulkParse +func stringStringMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]string, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type StringIntMapCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*StringIntMapCmd)(nil) + +func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd { + return &StringIntMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringIntMapCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *StringIntMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringIntMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]int64) + return nil +} + +// Implements proto.MultiBulkParse +func stringIntMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]int64, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + m[key] = n + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringStructMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]struct{}) + return nil +} + +// Implements proto.MultiBulkParse +func stringStructMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]struct{}, n) + for i := int64(0); i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = struct{}{} + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XMessage struct { + ID string + Values map[string]interface{} +} + +type XMessageSliceCmd struct { + baseCmd + + val []XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XMessageSliceCmd) Val() []XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(xMessageSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]XMessage) + return nil +} + +// Implements proto.MultiBulkParse +func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + msgs := make([]XMessage, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + var values map[string]interface{} + + v, err := rd.ReadArrayReply(stringInterfaceMapParser) + if err != nil { + if err != proto.Nil { + return nil, err + } + } else { + values = v.(map[string]interface{}) + } + + msgs = append(msgs, XMessage{ + ID: id, + Values: values, + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return msgs, nil +} + +// Implements proto.MultiBulkParse +func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]interface{}, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []XMessage +} + +type XStreamSliceCmd struct { + baseCmd + + val []XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XStreamSliceCmd) Val() []XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(xStreamSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]XStream) + return nil +} + +// Implements proto.MultiBulkParse +func xStreamSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + ret := make([]XStream, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + stream, err := rd.ReadString() + if err != nil { + return nil, err + } + + v, err := rd.ReadArrayReply(xMessageSliceParser) + if err != nil { + return nil, err + } + + ret = append(ret, XStream{ + Stream: stream, + Messages: v.([]XMessage), + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return ret, nil +} + +//------------------------------------------------------------------------------ + +type XPending struct { + Count int64 + Lower string + Higher string + Consumers map[string]int64 +} + +type XPendingCmd struct { + baseCmd + val *XPending +} + +var _ Cmder = (*XPendingCmd)(nil) + +func NewXPendingCmd(args ...interface{}) *XPendingCmd { + return &XPendingCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XPendingCmd) Val() *XPending { + return cmd.val +} + +func (cmd *XPendingCmd) Result() (*XPending, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { + var info interface{} + info, cmd.err = rd.ReadArrayReply(xPendingParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = info.(*XPending) + return nil +} + +func xPendingParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + count, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + lower, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + higher, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + pending := &XPending{ + Count: count, + Lower: lower, + Higher: higher, + } + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + for i := int64(0); i < n; i++ { + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + consumerName, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumerPending, err := rd.ReadInt() + if err != nil { + return nil, err + } + + if pending.Consumers == nil { + pending.Consumers = make(map[string]int64) + } + pending.Consumers[consumerName] = consumerPending + + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + if err != nil && err != Nil { + return nil, err + } + + return pending, nil +} + +//------------------------------------------------------------------------------ + +type XPendingExt struct { + Id string + Consumer string + Idle time.Duration + RetryCount int64 +} + +type XPendingExtCmd struct { + baseCmd + val []XPendingExt +} + +var _ Cmder = (*XPendingExtCmd)(nil) + +func NewXPendingExtCmd(args ...interface{}) *XPendingExtCmd { + return &XPendingExtCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XPendingExtCmd) Val() []XPendingExt { + return cmd.val +} + +func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingExtCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { + var info interface{} + info, cmd.err = rd.ReadArrayReply(xPendingExtSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = info.([]XPendingExt) + return nil +} + +func xPendingExtSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + ret := make([]XPendingExt, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumer, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + idle, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + retryCount, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + ret = append(ret, XPendingExt{ + Id: id, + Consumer: consumer, + Idle: time.Duration(idle) * time.Millisecond, + RetryCount: retryCount, + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return ret, nil +} + +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(zSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]Z) + return nil +} + +// Implements proto.MultiBulkParse +func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + zz := make([]Z, n/2) + for i := int64(0); i < n; i += 2 { + var err error + + z := &zz[i/2] + + z.Member, err = rd.ReadString() + if err != nil { + return nil, err + } + + z.Score, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + return zz, nil +} + +//------------------------------------------------------------------------------ + +type ZWithKeyCmd struct { + baseCmd + + val ZWithKey +} + +var _ Cmder = (*ZWithKeyCmd)(nil) + +func NewZWithKeyCmd(args ...interface{}) *ZWithKeyCmd { + return &ZWithKeyCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ZWithKeyCmd) Val() ZWithKey { + return cmd.val +} + +func (cmd *ZWithKeyCmd) Result() (ZWithKey, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ZWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(zWithKeyParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(ZWithKey) + return nil +} + +// Implements proto.MultiBulkParse +func zWithKeyParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 3 { + return nil, fmt.Errorf("got %d elements, expected 3", n) + } + + var z ZWithKey + var err error + + z.Key, err = rd.ReadString() + if err != nil { + return nil, err + } + z.Member, err = rd.ReadString() + if err != nil { + return nil, err + } + z.Score, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + return z, nil +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process func(cmd Cmder) error +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{_args: args}, + process: process, + } +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(rd *proto.Reader) error { + cmd.page, cmd.cursor, cmd.err = rd.ReadScanReply() + return cmd.err +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + Id string + Addr string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(clusterSlotsParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]ClusterSlot) + return nil +} + +// Implements proto.MultiBulkParse +func clusterSlotsParser(rd *proto.Reader, n int64) (interface{}, error) { + slots := make([]ClusterSlot, n) + for i := 0; i < len(slots); i++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n < 2 { + err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) + return nil, err + } + + start, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + end, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + nodes := make([]ClusterNode, n-2) + for j := 0; j < len(nodes); j++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 && n != 3 { + err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) + return nil, err + } + + ip, err := rd.ReadString() + if err != nil { + return nil, err + } + + port, err := rd.ReadString() + if err != nil { + return nil, err + } + + nodes[j].Addr = net.JoinHostPort(ip, port) + + if n == 3 { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + nodes[j].Id = id + } + } + + slots[i] = ClusterSlot{ + Start: int(start), + End: int(end), + Nodes: nodes, + } + } + return slots, nil +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithGeoHash { + args = append(args, "withhash") + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return &GeoLocationCmd{ + baseCmd: baseCmd{_args: args}, + q: q, + } +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) + if cmd.err != nil { + return cmd.err + } + cmd.locations = v.([]GeoLocation) + return nil +} + +func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + var loc GeoLocation + var err error + + loc.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + if q.WithDist { + loc.Dist, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + if q.WithGeoHash { + loc.GeoHash, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + } + if q.WithCoord { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 { + return nil, fmt.Errorf("got %d coordinates, expected 2", n) + } + + loc.Longitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + loc.Latitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + + return &loc, nil + } +} + +func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + locs := make([]GeoLocation, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(newGeoLocationParser(q)) + if err != nil { + return nil, err + } + switch vv := v.(type) { + case string: + locs = append(locs, GeoLocation{ + Name: vv, + }) + case *GeoLocation: + locs = append(locs, *vv) + default: + return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) + } + } + return locs, nil + } +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + positions []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.positions +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.positions) +} + +func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(geoPosSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.positions = v.([]*GeoPos) + return nil +} + +func geoPosSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + positions := make([]*GeoPos, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(geoPosParser) + if err != nil { + if err == Nil { + positions = append(positions, nil) + continue + } + return nil, err + } + switch v := v.(type) { + case *GeoPos: + positions = append(positions, v) + default: + return nil, fmt.Errorf("got %T, expected *GeoPos", v) + } + } + return positions, nil +} + +func geoPosParser(rd *proto.Reader, n int64) (interface{}, error) { + var pos GeoPos + var err error + + pos.Longitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + + pos.Latitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + + return &pos, nil +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(commandInfoSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]*CommandInfo) + return nil +} + +// Implements proto.MultiBulkParse +func commandInfoSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]*CommandInfo, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(commandInfoParser) + if err != nil { + return nil, err + } + vv := v.(*CommandInfo) + m[vv.Name] = vv + + } + return m, nil +} + +func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + var cmd CommandInfo + var err error + + if n != 6 { + return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6", n) + } + + cmd.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + + arity, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.Arity = int8(arity) + + flags, err := rd.ReadReply(stringSliceParser) + if err != nil { + return nil, err + } + cmd.Flags = flags.([]string) + + firstKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.FirstKeyPos = int8(firstKeyPos) + + lastKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.LastKeyPos = int8(lastKeyPos) + + stepCount, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.StepCount = int8(stepCount) + + for _, flag := range cmd.Flags { + if flag == "readonly" { + cmd.ReadOnly = true + break + } + } + + return &cmd, nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func() (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func() (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn() + if err != nil { + return err + } + c.cmds = cmds + return nil + }) + return c.cmds, err +} diff --git a/vendor/github.com/go-redis/redis/commands.go b/vendor/github.com/go-redis/redis/commands.go new file mode 100644 index 000000000..653e4abe9 --- /dev/null +++ b/vendor/github.com/go-redis/redis/commands.go @@ -0,0 +1,2583 @@ +package redis + +import ( + "errors" + "io" + "time" + + "github.com/go-redis/redis/internal" +) + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(dur time.Duration) int64 { + if dur > 0 && dur < time.Millisecond { + internal.Logf( + "specified duration is %s, but minimal supported value is %s", + dur, time.Millisecond, + ) + } + return int64(dur / time.Millisecond) +} + +func formatSec(dur time.Duration) int64 { + if dur > 0 && dur < time.Second { + internal.Logf( + "specified duration is %s, but minimal supported value is %s", + dur, time.Second, + ) + } + return int64(dur / time.Second) +} + +func appendArgs(dst, src []interface{}) []interface{} { + if len(src) == 1 { + if ss, ok := src[0].([]string); ok { + for _, s := range ss { + dst = append(dst, s) + } + return dst + } + } + + for _, v := range src { + dst = append(dst, v) + } + return dst +} + +type Cmdable interface { + Pipeline() Pipeliner + Pipelined(fn func(Pipeliner) error) ([]Cmder, error) + + TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) + TxPipeline() Pipeliner + + Command() *CommandsInfoCmd + ClientGetName() *StringCmd + Echo(message interface{}) *StringCmd + Ping() *StatusCmd + Quit() *StatusCmd + Del(keys ...string) *IntCmd + Unlink(keys ...string) *IntCmd + Dump(key string) *StringCmd + Exists(keys ...string) *IntCmd + Expire(key string, expiration time.Duration) *BoolCmd + ExpireAt(key string, tm time.Time) *BoolCmd + Keys(pattern string) *StringSliceCmd + Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd + Move(key string, db int64) *BoolCmd + ObjectRefCount(key string) *IntCmd + ObjectEncoding(key string) *StringCmd + ObjectIdleTime(key string) *DurationCmd + Persist(key string) *BoolCmd + PExpire(key string, expiration time.Duration) *BoolCmd + PExpireAt(key string, tm time.Time) *BoolCmd + PTTL(key string) *DurationCmd + RandomKey() *StringCmd + Rename(key, newkey string) *StatusCmd + RenameNX(key, newkey string) *BoolCmd + Restore(key string, ttl time.Duration, value string) *StatusCmd + RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd + Sort(key string, sort *Sort) *StringSliceCmd + SortStore(key, store string, sort *Sort) *IntCmd + SortInterfaces(key string, sort *Sort) *SliceCmd + Touch(keys ...string) *IntCmd + TTL(key string) *DurationCmd + Type(key string) *StatusCmd + Scan(cursor uint64, match string, count int64) *ScanCmd + SScan(key string, cursor uint64, match string, count int64) *ScanCmd + HScan(key string, cursor uint64, match string, count int64) *ScanCmd + ZScan(key string, cursor uint64, match string, count int64) *ScanCmd + Append(key, value string) *IntCmd + BitCount(key string, bitCount *BitCount) *IntCmd + BitOpAnd(destKey string, keys ...string) *IntCmd + BitOpOr(destKey string, keys ...string) *IntCmd + BitOpXor(destKey string, keys ...string) *IntCmd + BitOpNot(destKey string, key string) *IntCmd + BitPos(key string, bit int64, pos ...int64) *IntCmd + Decr(key string) *IntCmd + DecrBy(key string, decrement int64) *IntCmd + Get(key string) *StringCmd + GetBit(key string, offset int64) *IntCmd + GetRange(key string, start, end int64) *StringCmd + GetSet(key string, value interface{}) *StringCmd + Incr(key string) *IntCmd + IncrBy(key string, value int64) *IntCmd + IncrByFloat(key string, value float64) *FloatCmd + MGet(keys ...string) *SliceCmd + MSet(pairs ...interface{}) *StatusCmd + MSetNX(pairs ...interface{}) *BoolCmd + Set(key string, value interface{}, expiration time.Duration) *StatusCmd + SetBit(key string, offset int64, value int) *IntCmd + SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd + SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd + SetRange(key string, offset int64, value string) *IntCmd + StrLen(key string) *IntCmd + HDel(key string, fields ...string) *IntCmd + HExists(key, field string) *BoolCmd + HGet(key, field string) *StringCmd + HGetAll(key string) *StringStringMapCmd + HIncrBy(key, field string, incr int64) *IntCmd + HIncrByFloat(key, field string, incr float64) *FloatCmd + HKeys(key string) *StringSliceCmd + HLen(key string) *IntCmd + HMGet(key string, fields ...string) *SliceCmd + HMSet(key string, fields map[string]interface{}) *StatusCmd + HSet(key, field string, value interface{}) *BoolCmd + HSetNX(key, field string, value interface{}) *BoolCmd + HVals(key string) *StringSliceCmd + BLPop(timeout time.Duration, keys ...string) *StringSliceCmd + BRPop(timeout time.Duration, keys ...string) *StringSliceCmd + BRPopLPush(source, destination string, timeout time.Duration) *StringCmd + LIndex(key string, index int64) *StringCmd + LInsert(key, op string, pivot, value interface{}) *IntCmd + LInsertBefore(key string, pivot, value interface{}) *IntCmd + LInsertAfter(key string, pivot, value interface{}) *IntCmd + LLen(key string) *IntCmd + LPop(key string) *StringCmd + LPush(key string, values ...interface{}) *IntCmd + LPushX(key string, value interface{}) *IntCmd + LRange(key string, start, stop int64) *StringSliceCmd + LRem(key string, count int64, value interface{}) *IntCmd + LSet(key string, index int64, value interface{}) *StatusCmd + LTrim(key string, start, stop int64) *StatusCmd + RPop(key string) *StringCmd + RPopLPush(source, destination string) *StringCmd + RPush(key string, values ...interface{}) *IntCmd + RPushX(key string, value interface{}) *IntCmd + SAdd(key string, members ...interface{}) *IntCmd + SCard(key string) *IntCmd + SDiff(keys ...string) *StringSliceCmd + SDiffStore(destination string, keys ...string) *IntCmd + SInter(keys ...string) *StringSliceCmd + SInterStore(destination string, keys ...string) *IntCmd + SIsMember(key string, member interface{}) *BoolCmd + SMembers(key string) *StringSliceCmd + SMembersMap(key string) *StringStructMapCmd + SMove(source, destination string, member interface{}) *BoolCmd + SPop(key string) *StringCmd + SPopN(key string, count int64) *StringSliceCmd + SRandMember(key string) *StringCmd + SRandMemberN(key string, count int64) *StringSliceCmd + SRem(key string, members ...interface{}) *IntCmd + SUnion(keys ...string) *StringSliceCmd + SUnionStore(destination string, keys ...string) *IntCmd + XAdd(a *XAddArgs) *StringCmd + XDel(stream string, ids ...string) *IntCmd + XLen(stream string) *IntCmd + XRange(stream, start, stop string) *XMessageSliceCmd + XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd + XRevRange(stream string, start, stop string) *XMessageSliceCmd + XRevRangeN(stream string, start, stop string, count int64) *XMessageSliceCmd + XRead(a *XReadArgs) *XStreamSliceCmd + XReadStreams(streams ...string) *XStreamSliceCmd + XGroupCreate(stream, group, start string) *StatusCmd + XGroupCreateMkStream(stream, group, start string) *StatusCmd + XGroupSetID(stream, group, start string) *StatusCmd + XGroupDestroy(stream, group string) *IntCmd + XGroupDelConsumer(stream, group, consumer string) *IntCmd + XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd + XAck(stream, group string, ids ...string) *IntCmd + XPending(stream, group string) *XPendingCmd + XPendingExt(a *XPendingExtArgs) *XPendingExtCmd + XClaim(a *XClaimArgs) *XMessageSliceCmd + XClaimJustID(a *XClaimArgs) *StringSliceCmd + XTrim(key string, maxLen int64) *IntCmd + XTrimApprox(key string, maxLen int64) *IntCmd + BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd + BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd + ZAdd(key string, members ...Z) *IntCmd + ZAddNX(key string, members ...Z) *IntCmd + ZAddXX(key string, members ...Z) *IntCmd + ZAddCh(key string, members ...Z) *IntCmd + ZAddNXCh(key string, members ...Z) *IntCmd + ZAddXXCh(key string, members ...Z) *IntCmd + ZIncr(key string, member Z) *FloatCmd + ZIncrNX(key string, member Z) *FloatCmd + ZIncrXX(key string, member Z) *FloatCmd + ZCard(key string) *IntCmd + ZCount(key, min, max string) *IntCmd + ZLexCount(key, min, max string) *IntCmd + ZIncrBy(key string, increment float64, member string) *FloatCmd + ZInterStore(destination string, store ZStore, keys ...string) *IntCmd + ZPopMax(key string, count ...int64) *ZSliceCmd + ZPopMin(key string, count ...int64) *ZSliceCmd + ZRange(key string, start, stop int64) *StringSliceCmd + ZRangeWithScores(key string, start, stop int64) *ZSliceCmd + ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd + ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd + ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd + ZRank(key, member string) *IntCmd + ZRem(key string, members ...interface{}) *IntCmd + ZRemRangeByRank(key string, start, stop int64) *IntCmd + ZRemRangeByScore(key, min, max string) *IntCmd + ZRemRangeByLex(key, min, max string) *IntCmd + ZRevRange(key string, start, stop int64) *StringSliceCmd + ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd + ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd + ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd + ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd + ZRevRank(key, member string) *IntCmd + ZScore(key, member string) *FloatCmd + ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd + PFAdd(key string, els ...interface{}) *IntCmd + PFCount(keys ...string) *IntCmd + PFMerge(dest string, keys ...string) *StatusCmd + BgRewriteAOF() *StatusCmd + BgSave() *StatusCmd + ClientKill(ipPort string) *StatusCmd + ClientKillByFilter(keys ...string) *IntCmd + ClientList() *StringCmd + ClientPause(dur time.Duration) *BoolCmd + ClientID() *IntCmd + ConfigGet(parameter string) *SliceCmd + ConfigResetStat() *StatusCmd + ConfigSet(parameter, value string) *StatusCmd + ConfigRewrite() *StatusCmd + DBSize() *IntCmd + FlushAll() *StatusCmd + FlushAllAsync() *StatusCmd + FlushDB() *StatusCmd + FlushDBAsync() *StatusCmd + Info(section ...string) *StringCmd + LastSave() *IntCmd + Save() *StatusCmd + Shutdown() *StatusCmd + ShutdownSave() *StatusCmd + ShutdownNoSave() *StatusCmd + SlaveOf(host, port string) *StatusCmd + Time() *TimeCmd + Eval(script string, keys []string, args ...interface{}) *Cmd + EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(hashes ...string) *BoolSliceCmd + ScriptFlush() *StatusCmd + ScriptKill() *StatusCmd + ScriptLoad(script string) *StringCmd + DebugObject(key string) *StringCmd + Publish(channel string, message interface{}) *IntCmd + PubSubChannels(pattern string) *StringSliceCmd + PubSubNumSub(channels ...string) *StringIntMapCmd + PubSubNumPat() *IntCmd + ClusterSlots() *ClusterSlotsCmd + ClusterNodes() *StringCmd + ClusterMeet(host, port string) *StatusCmd + ClusterForget(nodeID string) *StatusCmd + ClusterReplicate(nodeID string) *StatusCmd + ClusterResetSoft() *StatusCmd + ClusterResetHard() *StatusCmd + ClusterInfo() *StringCmd + ClusterKeySlot(key string) *IntCmd + ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd + ClusterCountFailureReports(nodeID string) *IntCmd + ClusterCountKeysInSlot(slot int) *IntCmd + ClusterDelSlots(slots ...int) *StatusCmd + ClusterDelSlotsRange(min, max int) *StatusCmd + ClusterSaveConfig() *StatusCmd + ClusterSlaves(nodeID string) *StringSliceCmd + ClusterFailover() *StatusCmd + ClusterAddSlots(slots ...int) *StatusCmd + ClusterAddSlotsRange(min, max int) *StatusCmd + GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd + GeoPos(key string, members ...string) *GeoPosCmd + GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusRO(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMemberRO(key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoDist(key string, member1, member2, unit string) *FloatCmd + GeoHash(key string, members ...string) *StringSliceCmd + ReadOnly() *StatusCmd + ReadWrite() *StatusCmd + MemoryUsage(key string, samples ...int) *IntCmd +} + +type StatefulCmdable interface { + Cmdable + Auth(password string) *StatusCmd + Select(index int) *StatusCmd + SwapDB(index1, index2 int) *StatusCmd + ClientSetName(name string) *BoolCmd +} + +var _ Cmdable = (*Client)(nil) +var _ Cmdable = (*Tx)(nil) +var _ Cmdable = (*Ring)(nil) +var _ Cmdable = (*ClusterClient)(nil) + +type cmdable struct { + process func(cmd Cmder) error +} + +func (c *cmdable) setProcessor(fn func(Cmder) error) { + c.process = fn +} + +type statefulCmdable struct { + cmdable + process func(cmd Cmder) error +} + +func (c *statefulCmdable) setProcessor(fn func(Cmder) error) { + c.process = fn + c.cmdable.setProcessor(fn) +} + +//------------------------------------------------------------------------------ + +func (c *statefulCmdable) Auth(password string) *StatusCmd { + cmd := NewStatusCmd("auth", password) + c.process(cmd) + return cmd +} + +func (c *cmdable) Echo(message interface{}) *StringCmd { + cmd := NewStringCmd("echo", message) + c.process(cmd) + return cmd +} + +func (c *cmdable) Ping() *StatusCmd { + cmd := NewStatusCmd("ping") + c.process(cmd) + return cmd +} + +func (c *cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd { + cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond)) + c.process(cmd) + return cmd +} + +func (c *cmdable) Quit() *StatusCmd { + panic("not implemented") +} + +func (c *statefulCmdable) Select(index int) *StatusCmd { + cmd := NewStatusCmd("select", index) + c.process(cmd) + return cmd +} + +func (c *statefulCmdable) SwapDB(index1, index2 int) *StatusCmd { + cmd := NewStatusCmd("swapdb", index1, index2) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) Command() *CommandsInfoCmd { + cmd := NewCommandsInfoCmd("command") + c.process(cmd) + return cmd +} + +func (c *cmdable) Del(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "del" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Unlink(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unlink" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Dump(key string) *StringCmd { + cmd := NewStringCmd("dump", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Exists(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "exists" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Expire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("expire", key, formatSec(expiration)) + c.process(cmd) + return cmd +} + +func (c *cmdable) ExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd("expireat", key, tm.Unix()) + c.process(cmd) + return cmd +} + +func (c *cmdable) Keys(pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd("keys", pattern) + c.process(cmd) + return cmd +} + +func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + "migrate", + host, + port, + key, + db, + formatMs(timeout), + ) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) Move(key string, db int64) *BoolCmd { + cmd := NewBoolCmd("move", key, db) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectRefCount(key string) *IntCmd { + cmd := NewIntCmd("object", "refcount", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectEncoding(key string) *StringCmd { + cmd := NewStringCmd("object", "encoding", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectIdleTime(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "object", "idletime", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Persist(key string) *BoolCmd { + cmd := NewBoolCmd("persist", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) PExpire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("pexpire", key, formatMs(expiration)) + c.process(cmd) + return cmd +} + +func (c *cmdable) PExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + "pexpireat", + key, + tm.UnixNano()/int64(time.Millisecond), + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) PTTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Millisecond, "pttl", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) RandomKey() *StringCmd { + cmd := NewStringCmd("randomkey") + c.process(cmd) + return cmd +} + +func (c *cmdable) Rename(key, newkey string) *StatusCmd { + cmd := NewStatusCmd("rename", key, newkey) + c.process(cmd) + return cmd +} + +func (c *cmdable) RenameNX(key, newkey string) *BoolCmd { + cmd := NewBoolCmd("renamenx", key, newkey) + c.process(cmd) + return cmd +} + +func (c *cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "restore", + key, + formatMs(ttl), + value, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "restore", + key, + formatMs(ttl), + value, + "replace", + ) + c.process(cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count int64 + Get []string + Order string + Alpha bool +} + +func (sort *Sort) args(key string) []interface{} { + args := []interface{}{"sort", key} + if sort.By != "" { + args = append(args, "by", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "limit", sort.Offset, sort.Count) + } + for _, get := range sort.Get { + args = append(args, "get", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.Alpha { + args = append(args, "alpha") + } + return args +} + +func (c *cmdable) Sort(key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(sort.args(key)...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SortStore(key, store string, sort *Sort) *IntCmd { + args := sort.args(key) + if store != "" { + args = append(args, "store", store) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SortInterfaces(key string, sort *Sort) *SliceCmd { + cmd := NewSliceCmd(sort.args(key)...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Touch(keys ...string) *IntCmd { + args := make([]interface{}, len(keys)+1) + args[0] = "touch" + for i, key := range keys { + args[i+1] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) TTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "ttl", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Type(key string) *StatusCmd { + cmd := NewStatusCmd("type", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"sscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"zscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) Append(key, value string) *IntCmd { + cmd := NewIntCmd("append", key, value) + c.process(cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c *cmdable) BitCount(key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"bitcount", key} + if bitCount != nil { + args = append( + args, + bitCount.Start, + bitCount.End, + ) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) bitOp(op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "bitop" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd { + return c.bitOp("and", destKey, keys...) +} + +func (c *cmdable) BitOpOr(destKey string, keys ...string) *IntCmd { + return c.bitOp("or", destKey, keys...) +} + +func (c *cmdable) BitOpXor(destKey string, keys ...string) *IntCmd { + return c.bitOp("xor", destKey, keys...) +} + +func (c *cmdable) BitOpNot(destKey string, key string) *IntCmd { + return c.bitOp("not", destKey, key) +} + +func (c *cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "bitpos" + args[1] = key + args[2] = bit + switch len(pos) { + case 0: + case 1: + args[3] = pos[0] + case 2: + args[3] = pos[0] + args[4] = pos[1] + default: + panic("too many arguments") + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Decr(key string) *IntCmd { + cmd := NewIntCmd("decr", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd { + cmd := NewIntCmd("decrby", key, decrement) + c.process(cmd) + return cmd +} + +// Redis `GET key` command. It returns redis.Nil error when key does not exist. +func (c *cmdable) Get(key string) *StringCmd { + cmd := NewStringCmd("get", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetBit(key string, offset int64) *IntCmd { + cmd := NewIntCmd("getbit", key, offset) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetRange(key string, start, end int64) *StringCmd { + cmd := NewStringCmd("getrange", key, start, end) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetSet(key string, value interface{}) *StringCmd { + cmd := NewStringCmd("getset", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) Incr(key string) *IntCmd { + cmd := NewIntCmd("incr", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) IncrBy(key string, value int64) *IntCmd { + cmd := NewIntCmd("incrby", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) IncrByFloat(key string, value float64) *FloatCmd { + cmd := NewFloatCmd("incrbyfloat", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) MGet(keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "mget" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) MSet(pairs ...interface{}) *StatusCmd { + args := make([]interface{}, 1, 1+len(pairs)) + args[0] = "mset" + args = appendArgs(args, pairs) + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) MSetNX(pairs ...interface{}) *BoolCmd { + args := make([]interface{}, 1, 1+len(pairs)) + args[0] = "msetnx" + args = appendArgs(args, pairs) + cmd := NewBoolCmd(args...) + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration]` command. +// +// Use expiration for `SETEX`-like behavior. +// Zero expiration means the key has no expiration time. +func (c *cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 4) + args[0] = "set" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(expiration)) + } else { + args = append(args, "ex", formatSec(expiration)) + } + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SetBit(key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + "setbit", + key, + offset, + value, + ) + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +func (c *cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd("setnx", key, value) + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx") + } else { + cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx") + } + } + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +func (c *cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + cmd = NewBoolCmd("set", key, value, "xx") + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx") + } else { + cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx") + } + } + c.process(cmd) + return cmd +} + +func (c *cmdable) SetRange(key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd("setrange", key, offset, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) StrLen(key string) *IntCmd { + cmd := NewIntCmd("strlen", key) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) HDel(key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hdel" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HExists(key, field string) *BoolCmd { + cmd := NewBoolCmd("hexists", key, field) + c.process(cmd) + return cmd +} + +func (c *cmdable) HGet(key, field string) *StringCmd { + cmd := NewStringCmd("hget", key, field) + c.process(cmd) + return cmd +} + +func (c *cmdable) HGetAll(key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd("hgetall", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HIncrBy(key, field string, incr int64) *IntCmd { + cmd := NewIntCmd("hincrby", key, field, incr) + c.process(cmd) + return cmd +} + +func (c *cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd("hincrbyfloat", key, field, incr) + c.process(cmd) + return cmd +} + +func (c *cmdable) HKeys(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("hkeys", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HLen(key string) *IntCmd { + cmd := NewIntCmd("hlen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HMGet(key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hmget" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HMSet(key string, fields map[string]interface{}) *StatusCmd { + args := make([]interface{}, 2+len(fields)*2) + args[0] = "hmset" + args[1] = key + i := 2 + for k, v := range fields { + args[i] = k + args[i+1] = v + i += 2 + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HSet(key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd("hset", key, field, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) HSetNX(key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd("hsetnx", key, field, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) HVals(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("hvals", key) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "blpop" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "brpop" + for i, key := range keys { + args[1+i] = key + } + args[len(keys)+1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + "brpoplpush", + source, + destination, + formatSec(timeout), + ) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) LIndex(key string, index int64) *StringCmd { + cmd := NewStringCmd("lindex", key, index) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, op, pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, "before", pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, "after", pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LLen(key string) *IntCmd { + cmd := NewIntCmd("llen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPop(key string) *StringCmd { + cmd := NewStringCmd("lpop", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPush(key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPushX(key string, value interface{}) *IntCmd { + cmd := NewIntCmd("lpushx", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + "lrange", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) LRem(key string, count int64, value interface{}) *IntCmd { + cmd := NewIntCmd("lrem", key, count, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LSet(key string, index int64, value interface{}) *StatusCmd { + cmd := NewStatusCmd("lset", key, index, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LTrim(key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + "ltrim", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPop(key string) *StringCmd { + cmd := NewStringCmd("rpop", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPopLPush(source, destination string) *StringCmd { + cmd := NewStringCmd("rpoplpush", source, destination) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPush(key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPushX(key string, value interface{}) *IntCmd { + cmd := NewIntCmd("rpushx", key, value) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) SAdd(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "sadd" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SCard(key string) *IntCmd { + cmd := NewIntCmd("scard", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) SDiff(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sdiff" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SDiffStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sdiffstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SInter(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sinter" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SInterStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sinterstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SIsMember(key string, member interface{}) *BoolCmd { + cmd := NewBoolCmd("sismember", key, member) + c.process(cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a slice +func (c *cmdable) SMembers(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("smembers", key) + c.process(cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a map +func (c *cmdable) SMembersMap(key string) *StringStructMapCmd { + cmd := NewStringStructMapCmd("smembers", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) SMove(source, destination string, member interface{}) *BoolCmd { + cmd := NewBoolCmd("smove", source, destination, member) + c.process(cmd) + return cmd +} + +// Redis `SPOP key` command. +func (c *cmdable) SPop(key string) *StringCmd { + cmd := NewStringCmd("spop", key) + c.process(cmd) + return cmd +} + +// Redis `SPOP key count` command. +func (c *cmdable) SPopN(key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd("spop", key, count) + c.process(cmd) + return cmd +} + +// Redis `SRANDMEMBER key` command. +func (c *cmdable) SRandMember(key string) *StringCmd { + cmd := NewStringCmd("srandmember", key) + c.process(cmd) + return cmd +} + +// Redis `SRANDMEMBER key count` command. +func (c *cmdable) SRandMemberN(key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd("srandmember", key, count) + c.process(cmd) + return cmd +} + +func (c *cmdable) SRem(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "srem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SUnion(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sunion" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SUnionStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sunionstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +type XAddArgs struct { + Stream string + MaxLen int64 // MAXLEN N + MaxLenApprox int64 // MAXLEN ~ N + ID string + Values map[string]interface{} +} + +func (c *cmdable) XAdd(a *XAddArgs) *StringCmd { + args := make([]interface{}, 0, 6+len(a.Values)*2) + args = append(args, "xadd") + args = append(args, a.Stream) + if a.MaxLen > 0 { + args = append(args, "maxlen", a.MaxLen) + } else if a.MaxLenApprox > 0 { + args = append(args, "maxlen", "~", a.MaxLenApprox) + } + if a.ID != "" { + args = append(args, a.ID) + } else { + args = append(args, "*") + } + for k, v := range a.Values { + args = append(args, k) + args = append(args, v) + } + + cmd := NewStringCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XDel(stream string, ids ...string) *IntCmd { + args := []interface{}{"xdel", stream} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XLen(stream string) *IntCmd { + cmd := NewIntCmd("xlen", stream) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRange(stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrange", stream, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrange", stream, start, stop, "count", count) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRevRange(stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRevRangeN(stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop, "count", count) + c.process(cmd) + return cmd +} + +type XReadArgs struct { + Streams []string + Count int64 + Block time.Duration +} + +func (c *cmdable) XRead(a *XReadArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 5+len(a.Streams)) + args = append(args, "xread") + if a.Count > 0 { + args = append(args, "count") + args = append(args, a.Count) + } + if a.Block >= 0 { + args = append(args, "block") + args = append(args, int64(a.Block/time.Millisecond)) + } + args = append(args, "streams") + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + c.process(cmd) + return cmd +} + +func (c *cmdable) XReadStreams(streams ...string) *XStreamSliceCmd { + return c.XRead(&XReadArgs{ + Streams: streams, + Block: -1, + }) +} + +func (c *cmdable) XGroupCreate(stream, group, start string) *StatusCmd { + cmd := NewStatusCmd("xgroup", "create", stream, group, start) + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupCreateMkStream(stream, group, start string) *StatusCmd { + cmd := NewStatusCmd("xgroup", "create", stream, group, start, "mkstream") + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupSetID(stream, group, start string) *StatusCmd { + cmd := NewStatusCmd("xgroup", "setid", stream, group, start) + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupDestroy(stream, group string) *IntCmd { + cmd := NewIntCmd("xgroup", "destroy", stream, group) + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupDelConsumer(stream, group, consumer string) *IntCmd { + cmd := NewIntCmd("xgroup", "delconsumer", stream, group, consumer) + c.process(cmd) + return cmd +} + +type XReadGroupArgs struct { + Group string + Consumer string + // List of streams and ids. + Streams []string + Count int64 + Block time.Duration + NoAck bool +} + +func (c *cmdable) XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 8+len(a.Streams)) + args = append(args, "xreadgroup", "group", a.Group, a.Consumer) + if a.Count > 0 { + args = append(args, "count", a.Count) + } + if a.Block >= 0 { + args = append(args, "block", int64(a.Block/time.Millisecond)) + } + if a.NoAck { + args = append(args, "noack") + } + args = append(args, "streams") + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + c.process(cmd) + return cmd +} + +func (c *cmdable) XAck(stream, group string, ids ...string) *IntCmd { + args := []interface{}{"xack", stream, group} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XPending(stream, group string) *XPendingCmd { + cmd := NewXPendingCmd("xpending", stream, group) + c.process(cmd) + return cmd +} + +type XPendingExtArgs struct { + Stream string + Group string + Start string + End string + Count int64 + Consumer string +} + +func (c *cmdable) XPendingExt(a *XPendingExtArgs) *XPendingExtCmd { + args := make([]interface{}, 0, 7) + args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count) + if a.Consumer != "" { + args = append(args, a.Consumer) + } + cmd := NewXPendingExtCmd(args...) + c.process(cmd) + return cmd +} + +type XClaimArgs struct { + Stream string + Group string + Consumer string + MinIdle time.Duration + Messages []string +} + +func (c *cmdable) XClaim(a *XClaimArgs) *XMessageSliceCmd { + args := xClaimArgs(a) + cmd := NewXMessageSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XClaimJustID(a *XClaimArgs) *StringSliceCmd { + args := xClaimArgs(a) + args = append(args, "justid") + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func xClaimArgs(a *XClaimArgs) []interface{} { + args := make([]interface{}, 0, 4+len(a.Messages)) + args = append(args, + "xclaim", + a.Stream, + a.Group, a.Consumer, + int64(a.MinIdle/time.Millisecond)) + for _, id := range a.Messages { + args = append(args, id) + } + return args +} + +func (c *cmdable) XTrim(key string, maxLen int64) *IntCmd { + cmd := NewIntCmd("xtrim", key, "maxlen", maxLen) + c.process(cmd) + return cmd +} + +func (c *cmdable) XTrimApprox(key string, maxLen int64) *IntCmd { + cmd := NewIntCmd("xtrim", key, "maxlen", "~", maxLen) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// Z represents sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// ZWithKey represents sorted set member including the name of the key where it was popped. +type ZWithKey struct { + Z + Key string +} + +// ZStore is used as an arg to ZInterStore and ZUnionStore. +type ZStore struct { + Weights []float64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +// Redis `BZPOPMAX key [key ...] timeout` command. +func (c *cmdable) BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmax" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewZWithKeyCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +// Redis `BZPOPMIN key [key ...] timeout` command. +func (c *cmdable) BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmin" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewZWithKeyCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) zAdd(a []interface{}, n int, members ...Z) *IntCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewIntCmd(a...) + c.process(cmd) + return cmd +} + +// Redis `ZADD key score member [score member ...]` command. +func (c *cmdable) ZAdd(key string, members ...Z) *IntCmd { + const n = 2 + a := make([]interface{}, n+2*len(members)) + a[0], a[1] = "zadd", key + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key NX score member [score member ...]` command. +func (c *cmdable) ZAddNX(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "nx" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key XX score member [score member ...]` command. +func (c *cmdable) ZAddXX(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "xx" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key CH score member [score member ...]` command. +func (c *cmdable) ZAddCh(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "ch" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key NX CH score member [score member ...]` command. +func (c *cmdable) ZAddNXCh(key string, members ...Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key XX CH score member [score member ...]` command. +func (c *cmdable) ZAddXXCh(key string, members ...Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch" + return c.zAdd(a, n, members...) +} + +func (c *cmdable) zIncr(a []interface{}, n int, members ...Z) *FloatCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewFloatCmd(a...) + c.process(cmd) + return cmd +} + +// Redis `ZADD key INCR score member` command. +func (c *cmdable) ZIncr(key string, member Z) *FloatCmd { + const n = 3 + a := make([]interface{}, n+2) + a[0], a[1], a[2] = "zadd", key, "incr" + return c.zIncr(a, n, member) +} + +// Redis `ZADD key NX INCR score member` command. +func (c *cmdable) ZIncrNX(key string, member Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx" + return c.zIncr(a, n, member) +} + +// Redis `ZADD key XX INCR score member` command. +func (c *cmdable) ZIncrXX(key string, member Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx" + return c.zIncr(a, n, member) +} + +func (c *cmdable) ZCard(key string) *IntCmd { + cmd := NewIntCmd("zcard", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("zcount", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZLexCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("zlexcount", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd("zincrby", key, increment, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZInterStore(destination string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zinterstore" + args[1] = destination + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZPopMax(key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmax", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZPopMin(key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmin", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd { + args := []interface{}{ + "zrange", + key, + start, + stop, + } + if withScores { + args = append(args, "withscores") + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRange(key string, start, stop int64) *StringSliceCmd { + return c.zRange(key, start, stop, false) +} + +func (c *cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("zrange", key, start, stop, "withscores") + c.process(cmd) + return cmd +} + +type ZRangeBy struct { + Min, Max string + Offset, Count int64 +} + +func (c *cmdable) zRangeBy(zcmd, key string, opt ZRangeBy, withScores bool) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Min, opt.Max} + if withScores { + args = append(args, "withscores") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRangeBy("zrangebyscore", key, opt, false) +} + +func (c *cmdable) ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRangeBy("zrangebylex", key, opt, false) +} + +func (c *cmdable) ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRank(key, member string) *IntCmd { + cmd := NewIntCmd("zrank", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRem(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "zrem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + "zremrangebyrank", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByScore(key, min, max string) *IntCmd { + cmd := NewIntCmd("zremrangebyscore", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByLex(key, min, max string) *IntCmd { + cmd := NewIntCmd("zremrangebylex", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd("zrevrange", key, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores") + c.process(cmd) + return cmd +} + +func (c *cmdable) zRevRangeBy(zcmd, key string, opt ZRangeBy) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy("zrevrangebyscore", key, opt) +} + +func (c *cmdable) ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy("zrevrangebylex", key, opt) +} + +func (c *cmdable) ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRank(key, member string) *IntCmd { + cmd := NewIntCmd("zrevrank", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZScore(key, member string) *FloatCmd { + cmd := NewFloatCmd("zscore", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zunionstore" + args[1] = dest + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) PFAdd(key string, els ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(els)) + args[0] = "pfadd" + args[1] = key + args = appendArgs(args, els) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) PFCount(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "pfcount" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) PFMerge(dest string, keys ...string) *StatusCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "pfmerge" + args[1] = dest + for i, key := range keys { + args[2+i] = key + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) BgRewriteAOF() *StatusCmd { + cmd := NewStatusCmd("bgrewriteaof") + c.process(cmd) + return cmd +} + +func (c *cmdable) BgSave() *StatusCmd { + cmd := NewStatusCmd("bgsave") + c.process(cmd) + return cmd +} + +func (c *cmdable) ClientKill(ipPort string) *StatusCmd { + cmd := NewStatusCmd("client", "kill", ipPort) + c.process(cmd) + return cmd +} + +// ClientKillByFilter is new style synx, while the ClientKill is old +// CLIENT KILL