From cd24f6e8488aececb75c59402eea4d3451a506f2 Mon Sep 17 00:00:00 2001 From: mitchell Date: Wed, 22 May 2019 08:22:40 -0700 Subject: [PATCH] Implemented encryption functionality of spc and password generation; refactors on spc and server --- Dockerfile | 15 +- Makefile | 18 +- cmd/server/server.go | 22 +- cmd/spc/cmd/init.go | 95 ++ cmd/spc/cmd/root.go | 147 +++ cmd/spc/main.go | 13 + credentials/cmds/cmds.go | 21 + credentials/cmds/create.go | 146 +++ credentials/cmds/get.go | 63 ++ credentials/cmds/list.go | 58 + credentials/protobuf/service.pb.go | 130 +-- credentials/protobuf/service.proto | 5 +- credentials/repositories/grpc_client.go | 115 ++ credentials/repositories/redis.go | 60 +- credentials/service/service.go | 19 +- credentials/transport/encoding.go | 121 ++- credentials/transport/grpc_server.go | 2 +- credentials/types/credential.go | 6 +- credentials/types/interfaces.go | 10 + crypto/cbc.go | 99 ++ docker-compose.yml | 21 + dual-entry | 36 - go.mod | 24 +- go.sum | 108 +- grpcurl.sh | 2 +- redis.conf | 1317 ----------------------- 26 files changed, 1151 insertions(+), 1522 deletions(-) create mode 100644 cmd/spc/cmd/init.go create mode 100644 cmd/spc/cmd/root.go create mode 100644 cmd/spc/main.go create mode 100644 credentials/cmds/cmds.go create mode 100644 credentials/cmds/create.go create mode 100644 credentials/cmds/get.go create mode 100644 credentials/cmds/list.go create mode 100644 credentials/repositories/grpc_client.go create mode 100644 crypto/cbc.go create mode 100644 docker-compose.yml delete mode 100755 dual-entry delete mode 100644 redis.conf diff --git a/Dockerfile b/Dockerfile index 4e3b325..90d5513 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,22 +1,11 @@ FROM golang:1.11.5 as build WORKDIR /go/src/github.com/mitchell/selfpass COPY . . -RUN go get -u golang.org/x/tools/cmd/goimports ENV GO111MODULE=on RUN make build FROM debian:stable-20190326-slim -RUN printf "deb http://httpredir.debian.org/debian stretch-backports main non-free\ndeb-src http://httpredir.debian.org/debian stretch-backports main non-free" > /etc/apt/sources.list.d/backports.list -RUN apt-get update && apt-get install -t stretch-backports -y --no-install-recommends redis-server=5:5.0.3-3~bpo9+2 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -WORKDIR /usr/bin/selfpass/ +WORKDIR /usr/bin COPY --from=build /go/src/github.com/mitchell/selfpass/bin/server . -COPY --from=build /go/src/github.com/mitchell/selfpass/redis.conf . -COPY --from=build /go/src/github.com/mitchell/selfpass/db/dump.rdb ./db/dump.rdb -COPY --from=build /go/src/github.com/mitchell/selfpass/certs/ca.pem ./certs/ca.pem -COPY --from=build /go/src/github.com/mitchell/selfpass/certs/server.pem ./certs/server.pem -COPY --from=build /go/src/github.com/mitchell/selfpass/certs/server-key.pem ./certs/server-key.pem -COPY --from=build /go/src/github.com/mitchell/selfpass/dual-entry ./dual-entry -ENTRYPOINT ./dual-entry +ENTRYPOINT ["server"] EXPOSE 8080 diff --git a/Makefile b/Makefile index 0c91b8e..d0c0d1b 100644 --- a/Makefile +++ b/Makefile @@ -1,23 +1,23 @@ .PHONY: all build clean format test docker-build build: clean format test - go build -o ./bin/server ./cmd/server/server.go - -docker-build: - docker build -t selfpass . + go build --o ./bin/server ./cmd/server/server.go clean: rm -rf ./bin go mod tidy -dev: docker-build - docker run -i -t -p 8080:8080 selfpass -v -dev +docker: + docker-compose build -local: docker-build - docker run -i -t -p 8080:8080 selfpass +start: + docker-compose up format: - goimports -w -l . + gofmt -w -s -l . + +install-spc: + go install ./cmd/spc gen-protoc: protoc --go_out=plugins=grpc:. \ diff --git a/cmd/server/server.go b/cmd/server/server.go index 3d0ae12..c63c4d8 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -10,16 +10,18 @@ import ( "net" "os" "os/signal" + "syscall" "github.com/go-kit/kit/log" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "github.com/mitchell/selfpass/credentials/middleware" "github.com/mitchell/selfpass/credentials/protobuf" "github.com/mitchell/selfpass/credentials/repositories" "github.com/mitchell/selfpass/credentials/service" "github.com/mitchell/selfpass/credentials/transport" "github.com/mitchell/selfpass/credentials/types" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" ) var logger log.Logger @@ -29,9 +31,9 @@ func main() { stop = make(chan os.Signal, 1) dev = flag.Bool("dev", false, "enables dev mode logging") port = flag.String("port", "8080", "specify the port to listen on") - crtFile = flag.String("cert", "./certs/server.pem", "specify the cert file") - keyFile = flag.String("key", "./certs/server-key.pem", "specify the private key file") - caFile = flag.String("ca", "./certs/ca.pem", "specify the ca cert file") + crtFile = flag.String("cert", "/run/secrets/server_cert", "specify the cert file") + keyFile = flag.String("key", "/run/secrets/server_key", "specify the private key file") + caFile = flag.String("ca", "/run/secrets/ca_cert", "specify the ca cert file") verbose = flag.Bool("v", false, "be more verbose") // tableName = flag.String( // "credential-table-name", @@ -39,10 +41,12 @@ func main() { // "specify the credential table name on AWS", // ) ) - - signal.Notify(stop, os.Interrupt) flag.Parse() + signal.Notify(stop, syscall.SIGINT) + signal.Notify(stop, syscall.SIGKILL) + signal.Notify(stop, syscall.SIGTERM) + logger = newLogger(os.Stdout, *dev) keypair, err := tls.LoadX509KeyPair(*crtFile, *keyFile) @@ -61,9 +65,7 @@ func main() { }) // db := repositories.NewDynamoTable(*tableName) - db, err := repositories.NewRedisConn( - repositories.ConnConfig{NetworkType: "tcp", Address: "localhost:6379", Size: 2}, - ) + db, err := repositories.NewRedisConn("tcp", "redis:6379", 2) check(err) var svc types.Service diff --git a/cmd/spc/cmd/init.go b/cmd/spc/cmd/init.go new file mode 100644 index 0000000..6d10dc3 --- /dev/null +++ b/cmd/spc/cmd/init.go @@ -0,0 +1,95 @@ +package cmd + +import ( + "fmt" + "io/ioutil" + "strings" + + "github.com/google/uuid" + "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "gopkg.in/AlecAivazis/survey.v1" + + "github.com/mitchell/selfpass/credentials/cmds" +) + +func makeInitCmd(cfg *viper.Viper) *cobra.Command { + initCmd := &cobra.Command{ + Use: "init", + Short: "This command initializes SPC for the first time", + Long: `This command initializes SPC for the first time. Writing to the user configuration +the users private key, and server certificates. (All of which will be encrypted)`, + Run: func(cmd *cobra.Command, args []string) { + var ( + hasPK bool + masterpass string + cmasterpass string + target string + caFile string + certFile string + keyFile string + prompt survey.Prompt + privateKey = strings.Replace(uuid.New().String(), "-", "", -1) + ) + + prompt = &survey.Password{Message: "Master password:"} + check(survey.AskOne(prompt, &masterpass, nil)) + + prompt = &survey.Password{Message: "Confirm master password:"} + check(survey.AskOne(prompt, &cmasterpass, nil)) + if masterpass != cmasterpass { + check(fmt.Errorf("master passwords didn't match")) + } + + prompt = &survey.Input{Message: "Selfpass server address:"} + check(survey.AskOne(prompt, &target, nil)) + + prompt = &survey.Confirm{Message: "Do you have a private key?"} + check(survey.AskOne(prompt, &hasPK, nil)) + + if hasPK { + prompt = &survey.Input{Message: "Private key:"} + check(survey.AskOne(prompt, &privateKey, nil)) + privateKey = strings.Replace(privateKey, "-", "", -1) + } + + prompt = &survey.Input{Message: "CA certificate file:"} + check(survey.AskOne(prompt, &caFile, nil)) + ca, err := ioutil.ReadFile(caFile) + check(err) + + prompt = &survey.Input{Message: "Client certificate file:"} + check(survey.AskOne(prompt, &certFile, nil)) + cert, err := ioutil.ReadFile(certFile) + check(err) + + prompt = &survey.Input{Message: "Client key file:"} + check(survey.AskOne(prompt, &keyFile, nil)) + key, err := ioutil.ReadFile(keyFile) + check(err) + + cfg.Set(cmds.KeyConnConfig, map[string]string{ + "target": target, + "ca": string(ca), + "cert": string(cert), + "key": string(key), + }) + + cfg.Set(cmds.KeyPrivateKey, privateKey) + + if err := cfg.WriteConfig(); err != nil { + home, err := homedir.Dir() + check(err) + + check(cfg.WriteConfigAs(home + "/.spc.toml")) + cfg.SetConfigFile(home + "/.spc.toml") + fmt.Println("Wrote new config to: " + home + "/.spc.toml") + } + + encryptConfig(masterpass, cfg) + }, + } + + return initCmd +} diff --git a/cmd/spc/cmd/root.go b/cmd/spc/cmd/root.go new file mode 100644 index 0000000..0768f0f --- /dev/null +++ b/cmd/spc/cmd/root.go @@ -0,0 +1,147 @@ +package cmd + +import ( + "context" + "fmt" + "io/ioutil" + "os" + + "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "gopkg.in/AlecAivazis/survey.v1" + + "github.com/mitchell/selfpass/credentials/cmds" + "github.com/mitchell/selfpass/credentials/types" + "github.com/mitchell/selfpass/crypto" +) + +func Execute(ctx context.Context, initClient types.CredentialClientInit) { + rootCmd := &cobra.Command{ + Use: "spc", + Short: "This is the CLI client for Selfpass.", + Long: `This is the CLI client for Selfpass, the self-hosted password manager. With this tool you +can interact with the entire Selfpass API.`, + } + + cfgFile := rootCmd.PersistentFlags().String("config", "", "config file (default is $HOME/.spc.toml)") + rootCmd.PersistentFlags().Parse(os.Args) + + decryptCfg := rootCmd.Flags().Bool("decrypt-cfg", false, "unencrypt config file") + rootCmd.Flags().Parse(os.Args) + + encryptCfg := !*decryptCfg + masterpass, cfg := openConfig(*cfgFile) + if encryptCfg && masterpass != "" { + defer encryptConfig(masterpass, cfg) + } + if *decryptCfg { + fmt.Println("Decrypting config file. It will auto-encrypt when you next run of spc.") + return + } + + rootCmd.AddCommand(makeInitCmd(cfg)) + rootCmd.AddCommand(cmds.MakeListCmd(makeInitClient(cfg, initClient))) + rootCmd.AddCommand(cmds.MakeCreateCmd(masterpass, cfg, makeInitClient(cfg, initClient))) + rootCmd.AddCommand(cmds.MakeGetCmd(masterpass, cfg, makeInitClient(cfg, initClient))) + + check(rootCmd.Execute()) +} + +func makeInitClient(cfg *viper.Viper, initClient types.CredentialClientInit) cmds.CredentialClientInit { + return func(ctx context.Context) types.CredentialClient { + connConfig := cfg.GetStringMapString(cmds.KeyConnConfig) + + client, err := initClient( + ctx, + connConfig["target"], + connConfig["ca"], + connConfig["cert"], + connConfig["key"], + ) + if err != nil { + fmt.Printf("Please run 'init' command before running API commands.\nError Message: %s\n", err) + os.Exit(1) + } + + return client + } +} + +func openConfig(cfgFile string) (masterpass string, v *viper.Viper) { + v = viper.New() + v.SetConfigType("toml") + + if cfgFile != "" { + // Use config file from the flag. + v.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := homedir.Dir() + check(err) + + // Search config in home directory with name ".spc" (without extension). + v.AddConfigPath(home) + v.SetConfigName(".spc") + + cfgFile = home + "/.spc.toml" + } + + if _, err := os.Open(cfgFile); !os.IsNotExist(err) { + prompt := &survey.Password{Message: "Master password:"} + check(survey.AskOne(prompt, &masterpass, nil)) + + decryptConfig(masterpass, cfgFile) + } + + //v.AutomaticEnv() // read in environment variables that match + + // If a config file is found, read it in. + if err := v.ReadInConfig(); err == nil { + fmt.Println("Using config file:", v.ConfigFileUsed()) + } + + return masterpass, v +} + +func decryptConfig(masterpass string, cfgFile string) { + contents, err := ioutil.ReadFile(cfgFile) + check(err) + + passkey, err := crypto.GenerateKeyFromPassword([]byte(masterpass)) + check(err) + + contents, err = crypto.CBCDecrypt(passkey, contents) + if err != nil && err.Error() == "Padding incorrect" { + fmt.Println("incorrect master password") + os.Exit(1) + } else if err != nil && err.Error() == "ciphertext is not a multiple of the block size" { + fmt.Println("Config wasn't encrypted.") + return + } + check(err) + + check(ioutil.WriteFile(cfgFile, contents, 0600)) +} + +func encryptConfig(masterpass string, cfg *viper.Viper) { + contents, err := ioutil.ReadFile(cfg.ConfigFileUsed()) + if os.IsNotExist(err) { + return + } + + keypass, err := crypto.GenerateKeyFromPassword([]byte(masterpass)) + check(err) + + contents, err = crypto.CBCEncrypt(keypass, contents) + check(err) + + check(ioutil.WriteFile(cfg.ConfigFileUsed(), contents, 0600)) +} + +func check(err error) { + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/cmd/spc/main.go b/cmd/spc/main.go new file mode 100644 index 0000000..f0b3a75 --- /dev/null +++ b/cmd/spc/main.go @@ -0,0 +1,13 @@ +package main + +import ( + "context" + + "github.com/mitchell/selfpass/cmd/spc/cmd" + "github.com/mitchell/selfpass/credentials/repositories" +) + +func main() { + ctx := context.Background() + cmd.Execute(ctx, repositories.NewCredentialServiceClient) +} diff --git a/credentials/cmds/cmds.go b/credentials/cmds/cmds.go new file mode 100644 index 0000000..63f58e4 --- /dev/null +++ b/credentials/cmds/cmds.go @@ -0,0 +1,21 @@ +package cmds + +import ( + "context" + "fmt" + "os" + + "github.com/mitchell/selfpass/credentials/types" +) + +type CredentialClientInit func(ctx context.Context) (c types.CredentialClient) + +func check(err error) { + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +const KeyConnConfig = "connection" +const KeyPrivateKey = "private_key" diff --git a/credentials/cmds/create.go b/credentials/cmds/create.go new file mode 100644 index 0000000..5929394 --- /dev/null +++ b/credentials/cmds/create.go @@ -0,0 +1,146 @@ +package cmds + +import ( + "context" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "math/rand" + "os" + "time" + + "github.com/atotto/clipboard" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "gopkg.in/AlecAivazis/survey.v1" + + "github.com/mitchell/selfpass/credentials/types" + "github.com/mitchell/selfpass/crypto" +) + +func MakeCreateCmd(masterpass string, cfg *viper.Viper, initClient CredentialClientInit) *cobra.Command { + createCmd := &cobra.Command{ + Use: "create", + Short: "Create a credential in Selfpass", + Long: `Create a credential in Selfpass, and save it to the server after encrypting the +password.`, + + Run: func(_ *cobra.Command, args []string) { + mdqs := []*survey.Question{ + { + Name: "primary", + Prompt: &survey.Input{Message: "Primary user key:"}, + }, + { + Name: "sourceHost", + Prompt: &survey.Input{Message: "Source host:"}, + }, + { + Name: "loginURL", + Prompt: &survey.Input{Message: "Login url:"}, + }, + { + Name: "tag", + Prompt: &survey.Input{Message: "Tag:"}, + }, + } + cqs := []*survey.Question{ + { + Name: "username", + Prompt: &survey.Input{Message: "Username:"}, + }, + { + Name: "email", + Prompt: &survey.Input{Message: "Email:"}, + }, + } + var ci types.CredentialInput + + check(survey.Ask(mdqs, &ci.MetadataInput)) + check(survey.Ask(cqs, &ci)) + + var newpass bool + prompt := &survey.Confirm{Message: "Do you want a random password?", Default: true} + check(survey.AskOne(prompt, &newpass, nil)) + + if newpass { + ci.Password = generatePassword(16, true, true) + + var copypass bool + prompt = &survey.Confirm{Message: "Copy new pass to clipboard?", Default: true} + check(survey.AskOne(prompt, ©pass, nil)) + + if copypass { + check(clipboard.WriteAll(ci.Password)) + } + } else { + prompt := &survey.Password{Message: "Password: "} + check(survey.AskOne(prompt, &ci.Password, nil)) + + var cpass string + prompt = &survey.Password{Message: "Confirm assword: "} + check(survey.AskOne(prompt, &cpass, nil)) + + if ci.Password != cpass { + fmt.Println("passwords didn't match'") + os.Exit(1) + } + } + + key, err := hex.DecodeString(cfg.GetString(KeyPrivateKey)) + check(err) + + keypass, err := crypto.CombinePasswordAndKey([]byte(masterpass), []byte(key)) + check(err) + + cipherpass, err := crypto.CBCEncrypt(keypass, []byte(ci.Password)) + check(err) + + ci.Password = base64.StdEncoding.EncodeToString(cipherpass) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + c, err := initClient(ctx).Create(ctx, ci) + check(err) + + mdjson, err := json.MarshalIndent(c.Metadata, "", " ") + check(err) + fmt.Println(string(mdjson)) + }, + } + + return createCmd +} + +const alphas = "abcdefghijklmnopqrstuvABCDEFGHIJKLMNOPQRSTUV" +const alphanumerics = "abcdefghijklmnopqrstuvABCDEFGHIJKLMNOPQRSTUV1234567890" +const alphasAndSpecials = "abcdefghijklmnopqrstuvABCDEFGHIJKLMNOPQRSTUV1234567890!@#$%^&*()" +const alphanumericsAndSpecials = "abcdefghijklmnopqrstuvABCDEFGHIJKLMNOPQRSTUV1234567890!@#$%^&*()" + +func generatePassword(length int, numbers, specials bool) string { + rand.Seed(time.Now().UnixNano()) + pass := make([]byte, length) + + switch { + case numbers && specials: + for idx := 0; idx < length; idx++ { + pass[idx] = alphanumericsAndSpecials[rand.Int63()%int64(len(alphanumericsAndSpecials))] + } + case numbers: + for idx := 0; idx < length; idx++ { + pass[idx] = alphanumerics[rand.Int63()%int64(len(alphanumerics))] + } + case specials: + for idx := 0; idx < length; idx++ { + pass[idx] = alphasAndSpecials[rand.Int63()%int64(len(alphasAndSpecials))] + } + default: + for idx := 0; idx < length; idx++ { + pass[idx] = alphas[rand.Int63()%int64(len(alphas))] + } + } + + return string(pass) +} diff --git a/credentials/cmds/get.go b/credentials/cmds/get.go new file mode 100644 index 0000000..fd7462c --- /dev/null +++ b/credentials/cmds/get.go @@ -0,0 +1,63 @@ +package cmds + +import ( + "context" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "time" + + "github.com/atotto/clipboard" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "github.com/mitchell/selfpass/crypto" +) + +func MakeGetCmd(masterpass string, cfg *viper.Viper, initClient CredentialClientInit) *cobra.Command { + getCmd := &cobra.Command{ + Use: "get [id]", + Short: "Get a credential info and copy password to clipboard", + Long: `Get a credential's info and copy password to clipboard, from Selfpass server, after +decrypting password.`, + Args: cobra.ExactArgs(1), + + Run: func(cmd *cobra.Command, args []string) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() + + cbcontents, err := clipboard.ReadAll() + check(err) + + restore := func(cbcontents string) { + time.Sleep(time.Second * 5) + clipboard.WriteAll(cbcontents) + } + + cred, err := initClient(ctx).Get(ctx, args[0]) + check(err) + + key, err := hex.DecodeString(cfg.GetString(KeyPrivateKey)) + check(err) + + passkey, err := crypto.CombinePasswordAndKey([]byte(masterpass), key) + check(err) + + passbytes, err := base64.StdEncoding.DecodeString(cred.Password) + check(err) + + plainpass, err := crypto.CBCDecrypt(passkey, passbytes) + + check(clipboard.WriteAll(string(plainpass))) + go restore(cbcontents) + + cjson, err := json.MarshalIndent(cred, "", " ") + check(err) + fmt.Println(string(cjson)) + fmt.Println("Wrote password to clipboard.") + }, + } + + return getCmd +} diff --git a/credentials/cmds/list.go b/credentials/cmds/list.go new file mode 100644 index 0000000..84999fa --- /dev/null +++ b/credentials/cmds/list.go @@ -0,0 +1,58 @@ +package cmds + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/spf13/cobra" +) + +func MakeListCmd(initClient CredentialClientInit) *cobra.Command { + var sourceHost string + + listCmd := &cobra.Command{ + Use: "list", + Short: "List the metadata for all credentials", + Long: `List the metadata for all credentials, with the option to filter by source host. Metadata +includes almost all the information but the most sensitive.`, + + Run: func(cmd *cobra.Command, args []string) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() + + mdch, errch := initClient(ctx).GetAllMetadata(ctx, sourceHost) + + receive: + for { + select { + case <-ctx.Done(): + check(fmt.Errorf("context timeout")) + + case err := <-errch: + check(err) + + case md, ok := <-mdch: + if !ok { + break receive + } + + mdjson, err := json.MarshalIndent(md, "", " ") + check(err) + fmt.Println(string(mdjson)) + } + } + }, + } + + listCmd.Flags().StringVarP( + &sourceHost, + "source-host", + "s", + "", + "specify which source host to filter the results by", + ) + + return listCmd +} diff --git a/credentials/protobuf/service.pb.go b/credentials/protobuf/service.pb.go index 62a556b..51b31a6 100644 --- a/credentials/protobuf/service.pb.go +++ b/credentials/protobuf/service.pb.go @@ -266,6 +266,7 @@ type Metadata struct { Primary string `protobuf:"bytes,4,opt,name=primary,proto3" json:"primary,omitempty"` SourceHost string `protobuf:"bytes,5,opt,name=source_host,json=sourceHost,proto3" json:"source_host,omitempty"` LoginUrl string `protobuf:"bytes,6,opt,name=login_url,json=loginUrl,proto3" json:"login_url,omitempty"` + Tag string `protobuf:"bytes,7,opt,name=tag,proto3" json:"tag,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -338,6 +339,13 @@ func (m *Metadata) GetLoginUrl() string { return "" } +func (m *Metadata) GetTag() string { + if m != nil { + return m.Tag + } + return "" +} + type Credential struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` CreatedAt *timestamp.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` @@ -348,6 +356,7 @@ type Credential struct { Password string `protobuf:"bytes,7,opt,name=password,proto3" json:"password,omitempty"` SourceHost string `protobuf:"bytes,8,opt,name=source_host,json=sourceHost,proto3" json:"source_host,omitempty"` LoginUrl string `protobuf:"bytes,9,opt,name=login_url,json=loginUrl,proto3" json:"login_url,omitempty"` + Tag string `protobuf:"bytes,10,opt,name=tag,proto3" json:"tag,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -441,6 +450,13 @@ func (m *Credential) GetLoginUrl() string { return "" } +func (m *Credential) GetTag() string { + if m != nil { + return m.Tag + } + return "" +} + type CredentialRequest struct { Primary string `protobuf:"bytes,1,opt,name=primary,proto3" json:"primary,omitempty"` Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` @@ -448,6 +464,7 @@ type CredentialRequest struct { Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` SourceHost string `protobuf:"bytes,5,opt,name=source_host,json=sourceHost,proto3" json:"source_host,omitempty"` LoginUrl string `protobuf:"bytes,6,opt,name=login_url,json=loginUrl,proto3" json:"login_url,omitempty"` + Tag string `protobuf:"bytes,7,opt,name=tag,proto3" json:"tag,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -520,6 +537,13 @@ func (m *CredentialRequest) GetLoginUrl() string { return "" } +func (m *CredentialRequest) GetTag() string { + if m != nil { + return m.Tag + } + return "" +} + func init() { proto.RegisterType((*DeleteResponse)(nil), "selfpass.credentials.DeleteResponse") proto.RegisterType((*GetAllMetadataRequest)(nil), "selfpass.credentials.GetAllMetadataRequest") @@ -535,41 +559,41 @@ func init() { func init() { proto.RegisterFile("credentials/protobuf/service.proto", fileDescriptor_ad34efc7bbd96e69) } var fileDescriptor_ad34efc7bbd96e69 = []byte{ - // 539 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x95, 0x93, 0x34, 0x75, 0xa6, 0x21, 0x12, 0xab, 0x22, 0x59, 0xae, 0x44, 0xa2, 0x05, 0x89, - 0xaa, 0x48, 0x2e, 0x2a, 0x17, 0x38, 0x86, 0x16, 0xa5, 0x1c, 0x00, 0xc9, 0xa5, 0x17, 0x2e, 0xd1, - 0xd6, 0x9e, 0xa6, 0x96, 0x6c, 0xaf, 0xd9, 0x5d, 0x83, 0xfa, 0x01, 0xfc, 0x13, 0xfc, 0x11, 0x9f, - 0x81, 0xbc, 0xf6, 0x26, 0xae, 0xeb, 0x34, 0xe1, 0xc6, 0xcd, 0xb3, 0x7e, 0x6f, 0xf5, 0xde, 0xcc, - 0xce, 0x03, 0x1a, 0x08, 0x0c, 0x31, 0x55, 0x11, 0x8b, 0xe5, 0x71, 0x26, 0xb8, 0xe2, 0x57, 0xf9, - 0xf5, 0xb1, 0x44, 0xf1, 0x3d, 0x0a, 0xd0, 0xd3, 0x07, 0x64, 0x5f, 0x62, 0x7c, 0x9d, 0x31, 0x29, - 0xbd, 0x1a, 0xd8, 0x1d, 0x2f, 0x38, 0x5f, 0xc4, 0xb8, 0x22, 0xa9, 0x28, 0x41, 0xa9, 0x58, 0x92, - 0x95, 0x34, 0x7a, 0x04, 0xa3, 0x33, 0x8c, 0x51, 0xa1, 0x8f, 0x32, 0xe3, 0xa9, 0x44, 0xe2, 0xc0, - 0xae, 0xcc, 0x83, 0x00, 0xa5, 0x74, 0xac, 0x89, 0x75, 0x68, 0xfb, 0xa6, 0xa4, 0x6f, 0xe0, 0xc9, - 0x0c, 0xd5, 0x34, 0x8e, 0x3f, 0xa2, 0x62, 0x21, 0x53, 0xcc, 0xc7, 0x6f, 0x39, 0x4a, 0x45, 0xc6, - 0xb0, 0x27, 0x79, 0x2e, 0x02, 0x9c, 0xdf, 0x70, 0xa9, 0x34, 0x6d, 0xe0, 0x43, 0x79, 0x74, 0xce, - 0xa5, 0xa2, 0x07, 0x30, 0xf8, 0x10, 0x1a, 0xf4, 0x08, 0x3a, 0x51, 0x58, 0x81, 0x3a, 0x51, 0x48, - 0x6f, 0xe0, 0xd1, 0x65, 0x16, 0xb2, 0x42, 0x42, 0x2b, 0x80, 0xcc, 0x00, 0x56, 0x9e, 0x9c, 0xce, - 0xc4, 0x3a, 0xdc, 0x3b, 0x79, 0xe1, 0xb5, 0xf9, 0xf5, 0x4e, 0x97, 0xdf, 0xd5, 0x65, 0x7e, 0x8d, - 0x4a, 0x8f, 0x60, 0x78, 0x96, 0x27, 0xd9, 0xd2, 0xaa, 0x0b, 0x76, 0xc0, 0x53, 0x85, 0xa9, 0x2a, - 0xbd, 0x0e, 0xfd, 0x65, 0x4d, 0x47, 0x30, 0x7c, 0x9f, 0x64, 0xea, 0xb6, 0xba, 0x87, 0xfe, 0xb1, - 0xc0, 0x36, 0xbe, 0xef, 0x29, 0x7c, 0xab, 0x15, 0x32, 0x85, 0xe1, 0x9c, 0xa9, 0x4a, 0xa1, 0xeb, - 0x95, 0xbd, 0xf7, 0x4c, 0xef, 0xbd, 0x2f, 0xa6, 0xf7, 0xfe, 0xa0, 0x42, 0x4f, 0x55, 0x41, 0xcd, - 0xb5, 0x7b, 0x4d, 0xed, 0x6e, 0xa6, 0x56, 0xe8, 0xa9, 0x2a, 0x26, 0x95, 0x89, 0x28, 0x61, 0xe2, - 0xd6, 0xe9, 0x69, 0x29, 0xa6, 0x6c, 0x0e, 0x64, 0xa7, 0x39, 0x10, 0x72, 0x00, 0x83, 0x98, 0x2f, - 0xa2, 0x74, 0x9e, 0x8b, 0xd8, 0xe9, 0xeb, 0xdf, 0xb6, 0x3e, 0xb8, 0x14, 0x31, 0xfd, 0xd5, 0x01, - 0x58, 0x35, 0xf2, 0xbf, 0x37, 0xeb, 0x82, 0x9d, 0x4b, 0x14, 0x29, 0x4b, 0xb0, 0x72, 0xba, 0xac, - 0xc9, 0x3e, 0xec, 0x60, 0xc2, 0x22, 0xe3, 0xb1, 0x2c, 0x0a, 0x46, 0xf1, 0x72, 0x7e, 0x70, 0x11, - 0x3a, 0xbb, 0x25, 0xc3, 0xd4, 0xcd, 0xd6, 0xd9, 0x0f, 0xb7, 0x6e, 0xd0, 0x68, 0xdd, 0x6f, 0x0b, - 0x1e, 0xdf, 0x7b, 0x83, 0x75, 0xed, 0xd6, 0x7a, 0xed, 0x9d, 0x75, 0xda, 0xbb, 0xeb, 0xb4, 0xf7, - 0x1e, 0xd6, 0xfe, 0x6f, 0x63, 0x3f, 0xf9, 0xd9, 0xab, 0x6b, 0xbf, 0x28, 0xd3, 0x85, 0xcc, 0x61, - 0x74, 0x77, 0xe9, 0xc9, 0xcb, 0xf6, 0xd5, 0x6b, 0x8d, 0x06, 0xf7, 0x69, 0x3b, 0xd8, 0xc0, 0x5e, - 0x59, 0xe4, 0x1c, 0xba, 0x33, 0x54, 0x64, 0xdc, 0x0e, 0x5c, 0xc6, 0x86, 0x3b, 0xd9, 0xb4, 0xf1, - 0xe4, 0x02, 0xfa, 0xa7, 0xfa, 0xa9, 0x91, 0x6d, 0xd3, 0x61, 0x8b, 0x4b, 0x3f, 0x43, 0xbf, 0x4c, - 0x27, 0xf2, 0xac, 0x1d, 0x7b, 0x27, 0xbb, 0xb6, 0xbb, 0xb0, 0x4c, 0xdc, 0xcd, 0x96, 0x9f, 0xb7, - 0x03, 0x1a, 0x81, 0xfd, 0x09, 0x7a, 0x45, 0xaa, 0x11, 0xda, 0x8e, 0xae, 0xa7, 0x98, 0xbb, 0x06, - 0x53, 0x4f, 0xc5, 0x77, 0xf0, 0xd5, 0x36, 0xab, 0x78, 0xd5, 0xd7, 0x5f, 0xaf, 0xff, 0x06, 0x00, - 0x00, 0xff, 0xff, 0x39, 0xb2, 0x93, 0x8d, 0x82, 0x06, 0x00, 0x00, + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x94, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0xe5, 0xa4, 0x4d, 0xed, 0x69, 0x88, 0x60, 0x55, 0x24, 0xcb, 0x95, 0x48, 0x64, 0x90, + 0xa8, 0x8a, 0xe4, 0xa2, 0x72, 0x81, 0x63, 0x68, 0x51, 0xca, 0x01, 0x21, 0xb9, 0xf4, 0xc2, 0x25, + 0xda, 0xda, 0xd3, 0xd4, 0x92, 0xed, 0x35, 0xbb, 0x63, 0x50, 0x1f, 0x8c, 0x97, 0x80, 0x37, 0xe2, + 0x84, 0xbc, 0xfe, 0x93, 0x3f, 0x75, 0x9a, 0x48, 0x5c, 0xb8, 0xed, 0xac, 0xbf, 0x59, 0xcd, 0xef, + 0xdb, 0xf5, 0x07, 0x6e, 0x20, 0x31, 0xc4, 0x94, 0x22, 0x1e, 0xab, 0x93, 0x4c, 0x0a, 0x12, 0xd7, + 0xf9, 0xcd, 0x89, 0x42, 0xf9, 0x3d, 0x0a, 0xd0, 0xd3, 0x1b, 0xec, 0x40, 0x61, 0x7c, 0x93, 0x71, + 0xa5, 0xbc, 0x05, 0xb1, 0x33, 0x9c, 0x09, 0x31, 0x8b, 0x71, 0xde, 0x44, 0x51, 0x82, 0x8a, 0x78, + 0x92, 0x95, 0x6d, 0xee, 0x31, 0x0c, 0xce, 0x31, 0x46, 0x42, 0x1f, 0x55, 0x26, 0x52, 0x85, 0xcc, + 0x86, 0x3d, 0x95, 0x07, 0x01, 0x2a, 0x65, 0x1b, 0x23, 0xe3, 0xc8, 0xf4, 0xeb, 0xd2, 0x7d, 0x0b, + 0x4f, 0x27, 0x48, 0xe3, 0x38, 0xfe, 0x84, 0xc4, 0x43, 0x4e, 0xdc, 0xc7, 0x6f, 0x39, 0x2a, 0x62, + 0x43, 0xd8, 0x57, 0x22, 0x97, 0x01, 0x4e, 0x6f, 0x85, 0x22, 0xdd, 0x66, 0xf9, 0x50, 0x6e, 0x5d, + 0x08, 0x45, 0xee, 0x21, 0x58, 0x1f, 0xc3, 0x5a, 0x3d, 0x80, 0x4e, 0x14, 0x56, 0xa2, 0x4e, 0x14, + 0xba, 0xb7, 0xf0, 0xe8, 0x2a, 0x0b, 0x79, 0x31, 0x42, 0xab, 0x80, 0x4d, 0x00, 0xe6, 0x4c, 0x76, + 0x67, 0x64, 0x1c, 0xed, 0x9f, 0xbe, 0xf4, 0xda, 0x78, 0xbd, 0xb3, 0x66, 0x5d, 0x1d, 0xe6, 0x2f, + 0xb4, 0xba, 0xc7, 0xd0, 0x3f, 0xcf, 0x93, 0xac, 0x41, 0x75, 0xc0, 0x0c, 0x44, 0x4a, 0x98, 0x52, + 0xc9, 0xda, 0xf7, 0x9b, 0xda, 0x1d, 0x40, 0xff, 0x43, 0x92, 0xd1, 0x5d, 0x75, 0x8e, 0xfb, 0xc7, + 0x00, 0xb3, 0xe6, 0xbe, 0x37, 0xe1, 0x3b, 0x3d, 0x21, 0x27, 0x0c, 0xa7, 0x9c, 0xaa, 0x09, 0x1d, + 0xaf, 0xf4, 0xde, 0xab, 0xbd, 0xf7, 0xbe, 0xd4, 0xde, 0xfb, 0x56, 0xa5, 0x1e, 0x53, 0xd1, 0x9a, + 0x6b, 0x7a, 0xdd, 0xda, 0xdd, 0xdc, 0x5a, 0xa9, 0xc7, 0x54, 0xdc, 0x54, 0x26, 0xa3, 0x84, 0xcb, + 0x3b, 0x7b, 0x47, 0x8f, 0x52, 0x97, 0xab, 0x17, 0xb2, 0xbb, 0x7a, 0x21, 0xec, 0x10, 0xac, 0x58, + 0xcc, 0xa2, 0x74, 0x9a, 0xcb, 0xd8, 0xee, 0xe9, 0xcf, 0xa6, 0xde, 0xb8, 0x92, 0x31, 0x7b, 0x0c, + 0x5d, 0xe2, 0x33, 0x7b, 0x4f, 0x6f, 0x17, 0x4b, 0xf7, 0x57, 0x07, 0x60, 0x6e, 0xed, 0x7f, 0x8f, + 0xef, 0x80, 0x99, 0x2b, 0x94, 0x29, 0x4f, 0xb0, 0x62, 0x6f, 0x6a, 0x76, 0x00, 0xbb, 0x98, 0xf0, + 0xa8, 0xa6, 0x2e, 0x8b, 0xa2, 0xa3, 0x78, 0x4b, 0x3f, 0x84, 0x0c, 0x2b, 0xee, 0xa6, 0x5e, 0x35, + 0xd3, 0x7c, 0xd8, 0x4c, 0xab, 0xdd, 0x4c, 0x98, 0x9b, 0xf9, 0xdb, 0x80, 0x27, 0xf7, 0xde, 0xe9, + 0x22, 0x8d, 0xb1, 0x9e, 0xa6, 0xb3, 0x8e, 0xa6, 0xbb, 0x8e, 0x66, 0xe7, 0x61, 0x9a, 0x7f, 0x7d, + 0x1a, 0xa7, 0x3f, 0xbb, 0x8b, 0x34, 0x97, 0x65, 0x26, 0xb1, 0x29, 0x0c, 0x96, 0xa3, 0x82, 0xbd, + 0x6a, 0xff, 0x61, 0x5b, 0x03, 0xc5, 0x79, 0xd6, 0x2e, 0xae, 0x65, 0xaf, 0x0d, 0x76, 0x01, 0xdd, + 0x09, 0x12, 0x1b, 0xb6, 0x0b, 0x9b, 0xb0, 0x71, 0x46, 0x9b, 0x72, 0x82, 0x5d, 0x42, 0xef, 0x4c, + 0x3f, 0x47, 0xb6, 0x6d, 0xa6, 0x6c, 0x71, 0xe8, 0x67, 0xe8, 0x95, 0x99, 0xc6, 0x9e, 0xb7, 0x6b, + 0x97, 0x12, 0x6f, 0xbb, 0x03, 0xcb, 0x9c, 0xde, 0x8c, 0xfc, 0xa2, 0x5d, 0xb0, 0x1c, 0xf3, 0xef, + 0xe1, 0xab, 0x59, 0xff, 0x5e, 0xd7, 0x3d, 0xbd, 0x7a, 0xf3, 0x37, 0x00, 0x00, 0xff, 0xff, 0x22, + 0xb6, 0x48, 0x2d, 0x68, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -589,7 +613,6 @@ type CredentialServiceClient interface { Create(ctx context.Context, in *CredentialRequest, opts ...grpc.CallOption) (*Credential, error) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*Credential, error) Delete(ctx context.Context, in *IdRequest, opts ...grpc.CallOption) (*DeleteResponse, error) - Dump(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DumpResponse, error) } type credentialServiceClient struct { @@ -668,15 +691,6 @@ func (c *credentialServiceClient) Delete(ctx context.Context, in *IdRequest, opt return out, nil } -func (c *credentialServiceClient) Dump(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DumpResponse, error) { - out := new(DumpResponse) - err := c.cc.Invoke(ctx, "/selfpass.credentials.CredentialService/Dump", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - // CredentialServiceServer is the server API for CredentialService service. type CredentialServiceServer interface { GetAllMetadata(*GetAllMetadataRequest, CredentialService_GetAllMetadataServer) error @@ -684,7 +698,6 @@ type CredentialServiceServer interface { Create(context.Context, *CredentialRequest) (*Credential, error) Update(context.Context, *UpdateRequest) (*Credential, error) Delete(context.Context, *IdRequest) (*DeleteResponse, error) - Dump(context.Context, *EmptyRequest) (*DumpResponse, error) } // UnimplementedCredentialServiceServer can be embedded to have forward compatible implementations. @@ -706,9 +719,6 @@ func (*UnimplementedCredentialServiceServer) Update(ctx context.Context, req *Up func (*UnimplementedCredentialServiceServer) Delete(ctx context.Context, req *IdRequest) (*DeleteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") } -func (*UnimplementedCredentialServiceServer) Dump(ctx context.Context, req *EmptyRequest) (*DumpResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Dump not implemented") -} func RegisterCredentialServiceServer(s *grpc.Server, srv CredentialServiceServer) { s.RegisterService(&_CredentialService_serviceDesc, srv) @@ -807,24 +817,6 @@ func _CredentialService_Delete_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _CredentialService_Dump_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EmptyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CredentialServiceServer).Dump(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/selfpass.credentials.CredentialService/Dump", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CredentialServiceServer).Dump(ctx, req.(*EmptyRequest)) - } - return interceptor(ctx, in, info, handler) -} - var _CredentialService_serviceDesc = grpc.ServiceDesc{ ServiceName: "selfpass.credentials.CredentialService", HandlerType: (*CredentialServiceServer)(nil), @@ -845,10 +837,6 @@ var _CredentialService_serviceDesc = grpc.ServiceDesc{ MethodName: "Delete", Handler: _CredentialService_Delete_Handler, }, - { - MethodName: "Dump", - Handler: _CredentialService_Dump_Handler, - }, }, Streams: []grpc.StreamDesc{ { diff --git a/credentials/protobuf/service.proto b/credentials/protobuf/service.proto index 767a12e..6ddc759 100644 --- a/credentials/protobuf/service.proto +++ b/credentials/protobuf/service.proto @@ -12,7 +12,7 @@ service CredentialService { rpc Create (CredentialRequest) returns (Credential); rpc Update (UpdateRequest) returns (Credential); rpc Delete (IdRequest) returns (DeleteResponse); - rpc Dump (EmptyRequest) returns (DumpResponse); + // rpc Dump (EmptyRequest) returns (DumpResponse); } message DeleteResponse { @@ -46,6 +46,7 @@ message Metadata { string primary = 4; string source_host = 5; string login_url = 6; + string tag = 7; } message Credential { @@ -58,6 +59,7 @@ message Credential { string password = 7; string source_host = 8; string login_url = 9; + string tag = 10; } message CredentialRequest { @@ -67,4 +69,5 @@ message CredentialRequest { string password = 4; string source_host = 5; string login_url = 6; + string tag = 7; } diff --git a/credentials/repositories/grpc_client.go b/credentials/repositories/grpc_client.go new file mode 100644 index 0000000..b21273b --- /dev/null +++ b/credentials/repositories/grpc_client.go @@ -0,0 +1,115 @@ +package repositories + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + + "github.com/mitchell/selfpass/credentials/endpoints" + "github.com/mitchell/selfpass/credentials/protobuf" + "github.com/mitchell/selfpass/credentials/transport" + "github.com/mitchell/selfpass/credentials/types" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +func NewCredentialServiceClient(ctx context.Context, target, ca, cert, key string) (types.CredentialClient, error) { + keypair, err := tls.X509KeyPair([]byte(cert), []byte(key)) + if err != nil { + return nil, err + } + + capool := x509.NewCertPool() + capool.AppendCertsFromPEM([]byte(ca)) + + creds := credentials.NewTLS(&tls.Config{ + RootCAs: capool, + Certificates: []tls.Certificate{keypair}, + }) + + conn, err := grpc.DialContext(ctx, target, grpc.WithTransportCredentials(creds), grpc.WithBlock()) + if err != nil { + return nil, err + } + + return CredentialServiceClient{ + client: protobuf.NewCredentialServiceClient(conn), + }, nil +} + +type CredentialServiceClient struct { + client protobuf.CredentialServiceClient +} + +func (c CredentialServiceClient) GetAllMetadata(ctx context.Context, sourceHost string) (output <-chan types.Metadata, errch chan error) { + pbmdch := make(chan protobuf.Metadata, 1) + errch = make(chan error, 1) + + stream, err := transport.DecodeMetdataStreamResponse(ctx, transport.ProtobufMetadataStream{ + Metadata: pbmdch, + Errors: errch, + }) + + srv, err := c.client.GetAllMetadata(ctx, &protobuf.GetAllMetadataRequest{SourceHost: sourceHost}) + if err != nil { + errch <- err + return nil, errch + } + + go func() { + defer close(pbmdch) + + for { + select { + case <-ctx.Done(): + errch <- fmt.Errorf("context timeout") + return + default: + } + + pbmd, err := srv.Recv() + if err == io.EOF { + return + } else if err != nil { + errch <- err + return + } + + pbmdch <- *pbmd + } + }() + + return stream.Metadata, stream.Errors +} + +func (c CredentialServiceClient) Get(ctx context.Context, id string) (output types.Credential, err error) { + req := transport.EncodeIdRequest(endpoints.IDRequest{ID: id}) + + res, err := c.client.Get(ctx, &req) + if err != nil { + return output, err + } + + return transport.DecodeCredential(*res) +} + +func (c CredentialServiceClient) Create(ctx context.Context, ci types.CredentialInput) (output types.Credential, err error) { + req := transport.EncodeCredentialRequest(ci) + + res, err := c.client.Create(ctx, &req) + if err != nil { + return output, err + } + + return transport.DecodeCredential(*res) +} + +func (c CredentialServiceClient) Update(ctx context.Context, id string, ci types.CredentialInput) (output types.Credential, err error) { + panic("implement me") +} + +func (c CredentialServiceClient) Delete(ctx context.Context, id string) (err error) { + panic("implement me") +} diff --git a/credentials/repositories/redis.go b/credentials/repositories/redis.go index 819851d..65806d8 100644 --- a/credentials/repositories/redis.go +++ b/credentials/repositories/redis.go @@ -7,18 +7,11 @@ import ( "github.com/mitchell/selfpass/credentials/types" ) -func NewRedisConn(cfg ConnConfig) (c RedisConn, err error) { - p, err := radix.NewPool(cfg.NetworkType, cfg.Address, int(cfg.Size), cfg.Options...) +func NewRedisConn(networkType, address string, connCount uint, options ...radix.PoolOpt) (c RedisConn, err error) { + p, err := radix.NewPool(networkType, address, int(connCount), options...) return RedisConn{p: p}, err } -type ConnConfig struct { - NetworkType string - Address string - Size uint - Options []radix.PoolOpt -} - type RedisConn struct { p *radix.Pool } @@ -30,22 +23,23 @@ func (conn RedisConn) GetAllMetadata(ctx context.Context, sourceHost string, err defer close(mdch) var key string - scr := radix.NewScanner(conn.p, radix.ScanOpts{Command: scan, Pattern: sourceHost + star}) + scr := radix.NewScanner(conn.p, radix.ScanOpts{Command: scan, Pattern: types.TypePrefixCred + dash + sourceHost + star}) for scr.Next(&key) { select { case <-ctx.Done(): return default: - var md types.Metadata - - if err := conn.p.Do(radix.Cmd(&md, hGetAll, key)); err != nil { - errch <- err - return - } - - mdch <- md } + + var md types.Metadata + + if err := conn.p.Do(radix.Cmd(&md, hGetAll, key)); err != nil { + errch <- err + return + } + + mdch <- md } }() @@ -53,41 +47,17 @@ func (conn RedisConn) GetAllMetadata(ctx context.Context, sourceHost string, err } func (conn RedisConn) Get(ctx context.Context, id string) (output types.Credential, err error) { - var key string - scr := radix.NewScanner(conn.p, radix.ScanOpts{Command: scan, Pattern: star + id, Count: 1}) - - if !scr.Next(&key) { - return output, nil - } - - if err = scr.Close(); err != nil { - return output, err - } - - err = conn.p.Do(radix.Cmd(&output, hGetAll, key)) - + err = conn.p.Do(radix.Cmd(&output, hGetAll, id)) return output, err } func (conn RedisConn) Put(ctx context.Context, c types.Credential) (err error) { - err = conn.p.Do(radix.FlatCmd(nil, hMSet, c.SourceHost+dash+c.ID, c)) + err = conn.p.Do(radix.FlatCmd(nil, hMSet, c.ID, c)) return err } func (conn RedisConn) Delete(ctx context.Context, id string) (err error) { - var key string - scr := radix.NewScanner(conn.p, radix.ScanOpts{Command: scan, Pattern: star + id, Count: 1}) - - if !scr.Next(&key) { - return nil - } - - if err = scr.Close(); err != nil { - return err - } - - err = conn.p.Do(radix.Cmd(nil, del, key)) - + err = conn.p.Do(radix.Cmd(nil, del, id)) return err } diff --git a/credentials/service/service.go b/credentials/service/service.go index e815e89..3386bea 100644 --- a/credentials/service/service.go +++ b/credentials/service/service.go @@ -5,7 +5,6 @@ import ( "fmt" "time" - "github.com/google/uuid" "github.com/mitchell/selfpass/credentials/types" ) @@ -38,7 +37,7 @@ func (svc Credentials) Create(ctx context.Context, ci types.CredentialInput) (ou } var c types.Credential - c.ID = "cred-" + uuid.New().String() + c.ID = generateID(ci) c.CreatedAt = time.Now() c.UpdatedAt = time.Now() c.Primary = ci.Primary @@ -47,6 +46,7 @@ func (svc Credentials) Create(ctx context.Context, ci types.CredentialInput) (ou c.Username = ci.Username c.Email = ci.Email c.Password = ci.Password + c.Tag = ci.Tag err = svc.repo.Put(ctx, c) @@ -57,6 +57,8 @@ func validateCredentialInput(c types.CredentialInput) (err error) { switch { case c.SourceHost == "": return fmt.Errorf("%s must specify source host", types.InvalidArgument) + case c.Primary == "": + return fmt.Errorf("%s must specify primary user key", types.InvalidArgument) case c.Password == "": return fmt.Errorf("%s must specify password", types.InvalidArgument) } @@ -64,6 +66,17 @@ func validateCredentialInput(c types.CredentialInput) (err error) { return err } +func generateID(ci types.CredentialInput) string { + idFormat := types.TypePrefixCred + "-%s-%s" + + if ci.Tag != "" { + idFormat += "-%s" + return fmt.Sprintf(idFormat, ci.SourceHost, ci.Primary, ci.Tag) + } + + return fmt.Sprintf(idFormat, ci.SourceHost, ci.Primary) +} + func (svc Credentials) Update(ctx context.Context, id string, ci types.CredentialInput) (output types.Credential, err error) { if err = validateCredentialInput(ci); err != nil { return output, err @@ -78,6 +91,7 @@ func (svc Credentials) Update(ctx context.Context, id string, ci types.Credentia return output, err } + c.ID = generateID(ci) c.UpdatedAt = time.Now() c.Primary = ci.Primary c.LoginURL = ci.LoginURL @@ -85,6 +99,7 @@ func (svc Credentials) Update(ctx context.Context, id string, ci types.Credentia c.Password = ci.Password c.Email = ci.Email c.Username = ci.Username + c.Tag = ci.Tag return c, svc.repo.Put(ctx, c) } diff --git a/credentials/transport/encoding.go b/credentials/transport/encoding.go index dd0ba31..1431186 100644 --- a/credentials/transport/encoding.go +++ b/credentials/transport/encoding.go @@ -16,6 +16,13 @@ func decodeGetAllMetadataRequest(ctx context.Context, request interface{}) (inte }, nil } +func EncodeGetAllMetadataRequest(ctx context.Context, request interface{}) (interface{}, error) { + r := request.(endpoints.GetAllMetadataRequest) + return protobuf.GetAllMetadataRequest{ + SourceHost: r.SourceHost, + }, nil +} + func encodeDumpResponse(ctx context.Context, response interface{}) (interface{}, error) { r := response.(endpoints.DumpResponse) return protobuf.DumpResponse{ @@ -50,28 +57,69 @@ func encodeMetadataStreamResponse(ctx context.Context, response interface{}) (in SourceHost: md.SourceHost, Primary: md.Primary, LoginUrl: md.LoginURL, + Tag: md.Tag, } } }() - return protobufMetadataStream{ + return ProtobufMetadataStream{ Metadata: pbmdch, Errors: r.Errors, }, nil } -type protobufMetadataStream struct { +func DecodeMetdataStreamResponse(ctx context.Context, r ProtobufMetadataStream) (endpoints.MetadataStream, error) { + mdch := make(chan types.Metadata, 1) + errch := make(chan error, 1) + + go func() { + defer close(mdch) + + for pbmd := range r.Metadata { + createdAt, err := ptypes.Timestamp(pbmd.CreatedAt) + if err != nil { + errch <- err + return + } + + updatedAt, err := ptypes.Timestamp(pbmd.UpdatedAt) + if err != nil { + errch <- err + return + } + + mdch <- types.Metadata{ + ID: pbmd.Id, + SourceHost: pbmd.SourceHost, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + Primary: pbmd.Primary, + LoginURL: pbmd.LoginUrl, + Tag: pbmd.Tag, + } + } + }() + + return endpoints.MetadataStream{ + Metadata: mdch, + Errors: errch, + }, nil +} + +type ProtobufMetadataStream struct { Metadata <-chan protobuf.Metadata Errors chan error } func decodeCredentialRequest(ctx context.Context, request interface{}) (interface{}, error) { r := request.(protobuf.CredentialRequest) + return types.CredentialInput{ MetadataInput: types.MetadataInput{ Primary: r.Primary, LoginURL: r.LoginUrl, SourceHost: r.SourceHost, + Tag: r.Tag, }, Username: r.Username, Email: r.Email, @@ -79,6 +127,18 @@ func decodeCredentialRequest(ctx context.Context, request interface{}) (interfac }, nil } +func EncodeCredentialRequest(r types.CredentialInput) protobuf.CredentialRequest { + return protobuf.CredentialRequest{ + Primary: r.Primary, + Username: r.Username, + Email: r.Email, + Password: r.Password, + SourceHost: r.SourceHost, + LoginUrl: r.LoginURL, + Tag: r.Tag, + } +} + func encodeCredentialResponse(ctx context.Context, response interface{}) (interface{}, error) { r := response.(types.Credential) @@ -99,14 +159,44 @@ func encodeCredentialResponse(ctx context.Context, response interface{}) (interf Primary: r.Primary, SourceHost: r.SourceHost, LoginUrl: r.LoginURL, + Tag: r.Tag, Username: r.Username, Email: r.Email, Password: r.Password, }, nil } +func DecodeCredential(r protobuf.Credential) (c types.Credential, err error) { + + createdAt, err := ptypes.Timestamp(r.CreatedAt) + if err != nil { + return c, err + } + + updatedAt, err := ptypes.Timestamp(r.UpdatedAt) + if err != nil { + return c, err + } + + return types.Credential{ + Metadata: types.Metadata{ + ID: r.Id, + SourceHost: r.SourceHost, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + Primary: r.Primary, + LoginURL: r.LoginUrl, + Tag: r.Tag, + }, + Username: r.Username, + Email: r.Email, + Password: r.Password, + }, nil +} + func decodeUpdateRequest(ctx context.Context, request interface{}) (interface{}, error) { r := request.(protobuf.UpdateRequest) + return endpoints.UpdateRequest{ ID: r.Id, Credential: types.CredentialInput{ @@ -114,6 +204,7 @@ func decodeUpdateRequest(ctx context.Context, request interface{}) (interface{}, Primary: r.Credential.Primary, SourceHost: r.Credential.SourceHost, LoginURL: r.Credential.LoginUrl, + Tag: r.Credential.Tag, }, Username: r.Credential.Username, Email: r.Credential.Email, @@ -122,6 +213,24 @@ func decodeUpdateRequest(ctx context.Context, request interface{}) (interface{}, }, nil } +func EncodeUpdateRequest(ctx context.Context, request interface{}) (interface{}, error) { + r := request.(endpoints.UpdateRequest) + + c := r.Credential + return protobuf.UpdateRequest{ + Id: r.ID, + Credential: &protobuf.CredentialRequest{ + Primary: c.Primary, + Username: c.Username, + Email: c.Email, + Password: c.Password, + SourceHost: c.SourceHost, + LoginUrl: c.LoginURL, + Tag: c.Tag, + }, + }, nil +} + func decodeIdRequest(ctx context.Context, request interface{}) (interface{}, error) { r := request.(protobuf.IdRequest) return endpoints.IDRequest{ @@ -129,6 +238,12 @@ func decodeIdRequest(ctx context.Context, request interface{}) (interface{}, err }, nil } -func noOp(ctx context.Context, request interface{}) (interface{}, error) { +func EncodeIdRequest(r endpoints.IDRequest) protobuf.IdRequest { + return protobuf.IdRequest{ + Id: r.ID, + } +} + +func noOp(context.Context, interface{}) (interface{}, error) { return nil, nil } diff --git a/credentials/transport/grpc_server.go b/credentials/transport/grpc_server.go index b7aa569..2acd9ae 100644 --- a/credentials/transport/grpc_server.go +++ b/credentials/transport/grpc_server.go @@ -75,7 +75,7 @@ func (s GRPCServer) GetAllMetadata(r *protobuf.GetAllMetadataRequest, srv protob return err } - mds := i.(protobufMetadataStream) + mds := i.(ProtobufMetadataStream) receiveLoop: for { diff --git a/credentials/types/credential.go b/credentials/types/credential.go index 7edee51..dedb919 100644 --- a/credentials/types/credential.go +++ b/credentials/types/credential.go @@ -4,11 +4,13 @@ import ( "time" ) +const TypePrefixCred = "cred" + type Credential struct { Metadata Username string Email string - Password string + Password string `json:"-"` } type CredentialInput struct { @@ -25,10 +27,12 @@ type Metadata struct { UpdatedAt time.Time Primary string LoginURL string + Tag string } type MetadataInput struct { Primary string SourceHost string LoginURL string + Tag string } diff --git a/credentials/types/interfaces.go b/credentials/types/interfaces.go index fe26334..7e78e25 100644 --- a/credentials/types/interfaces.go +++ b/credentials/types/interfaces.go @@ -18,3 +18,13 @@ type CredentialRepo interface { Delete(ctx context.Context, id string) (err error) DumpDB(ctx context.Context) (bs []byte, err error) } + +type CredentialClientInit func(ctx context.Context, target, ca, cert, key string) (c CredentialClient, err error) + +type CredentialClient interface { + GetAllMetadata(ctx context.Context, sourceHost string) (output <-chan Metadata, errch chan error) + Get(ctx context.Context, id string) (output Credential, err error) + Create(ctx context.Context, ci CredentialInput) (output Credential, err error) + Update(ctx context.Context, id string, ci CredentialInput) (output Credential, err error) + Delete(ctx context.Context, id string) (err error) +} diff --git a/crypto/cbc.go b/crypto/cbc.go new file mode 100644 index 0000000..cf644f2 --- /dev/null +++ b/crypto/cbc.go @@ -0,0 +1,99 @@ +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "fmt" + "io" + + "github.com/cloudflare/redoctober/padding" +) + +func GenerateKeyFromPassword(pass []byte) ([]byte, error) { + if len(pass) < 8 { + return nil, fmt.Errorf("master password must be at least 8 characters") + } + + for idx := 0; len(pass) < 32; idx++ { + pass = append(pass, pass[idx]) + + if idx == len(pass) { + idx = 0 + } + } + + return pass, nil +} + +func CombinePasswordAndKey(pass, key []byte) ([]byte, error) { + if len(pass) < 8 { + return nil, fmt.Errorf("master password must be at least 8 characters") + } + if len(key) != 16 { + return nil, fmt.Errorf("key was not of length 16") + } + + for idx := 0; len(pass) < 16; idx++ { + pass = append(pass, pass[idx]) + } + + return append(pass[:16], key...), nil +} + +func CBCEncrypt(key []byte, plaintext []byte) ([]byte, error) { + if len(key) != 32 { + return nil, fmt.Errorf("key is not 32 bytes") + } + + plaintext = padding.AddPadding(plaintext) + + if len(plaintext)%aes.BlockSize != 0 { + return nil, fmt.Errorf("plaintext is not a multiple of the block size") + } + + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + ciphertext := make([]byte, aes.BlockSize+len(plaintext)) + iv := ciphertext[:aes.BlockSize] + + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return nil, err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], plaintext) + + return ciphertext, nil +} + +func CBCDecrypt(key []byte, ciphertext []byte) ([]byte, error) { + if len(key) != 32 { + return nil, fmt.Errorf("key is not 32 bytes") + } + + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + if len(ciphertext) < aes.BlockSize { + return nil, err + } + + iv := ciphertext[:aes.BlockSize] + + ciphertext = ciphertext[aes.BlockSize:] + + if len(ciphertext)%aes.BlockSize != 0 { + return nil, fmt.Errorf("ciphertext is not a multiple of the block size") + } + + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(ciphertext, ciphertext) + + return padding.RemovePadding(ciphertext) +} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..fdbecc3 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,21 @@ +version: "3.7" +services: + redis: + image: "redis:alpine" + selfpass: + build: . + entrypoint: + - server + ports: + - "8080:8080" + secrets: + - ca_cert + - server_cert + - server_key +secrets: + ca_cert: + file: ./certs/ca.pem + server_cert: + file: ./certs/server.pem + server_key: + file: ./certs/server-key.pem diff --git a/dual-entry b/dual-entry deleted file mode 100755 index 747dea1..0000000 --- a/dual-entry +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Start the first process -redis-server ./redis.conf & -status=$? -if [ $status -ne 0 ]; then - echo "Failed to start my_first_process: $status" - exit $status -fi - -# Start the second process -./server --dev -v & -status=$? -if [ $status -ne 0 ]; then - echo "Failed to start my_second_process: $status" - exit $status -fi - -# Naive check runs checks once a minute to see if either of the processes exited. -# This illustrates part of the heavy lifting you need to do if you want to run -# more than one service in a container. The container exits with an error -# if it detects that either of the processes has exited. -# Otherwise it loops forever, waking up every 60 seconds - -while sleep 60; do - ps aux |grep my_first_process |grep -q -v grep - PROCESS_1_STATUS=$? - ps aux |grep my_second_process |grep -q -v grep - PROCESS_2_STATUS=$? - # If the greps above find anything, they exit with 0 status - # If they are not both 0, then something is wrong - if [ $PROCESS_1_STATUS -ne 0 -o $PROCESS_2_STATUS -ne 0 ]; then - echo "One of the processes has already exited." - exit 1 - fi -done diff --git a/go.mod b/go.mod index 9a3a438..f2f84c4 100644 --- a/go.mod +++ b/go.mod @@ -1,15 +1,33 @@ module github.com/mitchell/selfpass require ( + github.com/Netflix/go-expect v0.0.0-20180928190340-9d1f4485533b // indirect + github.com/atotto/clipboard v0.1.2 github.com/aws/aws-sdk-go-v2 v0.7.0 + github.com/cloudflare/redoctober v0.0.0-20180928214028-3f826eedb692 github.com/go-kit/kit v0.8.0 github.com/go-logfmt/logfmt v0.4.0 // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/golang/protobuf v1.3.1 github.com/google/uuid v1.1.1 + github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/kr/pty v1.1.4 // indirect + github.com/mattn/go-colorable v0.1.1 // indirect + github.com/mattn/go-isatty v0.0.7 // indirect github.com/mediocregopher/radix/v3 v3.2.3 + github.com/mitchellh/go-homedir v1.1.0 + github.com/spf13/cobra v0.0.3 + github.com/spf13/pflag v1.0.3 + github.com/spf13/viper v1.3.2 github.com/stretchr/testify v1.3.0 // indirect - golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 // indirect - golang.org/x/sys v0.0.0-20190416152802-12500544f89f // indirect - google.golang.org/grpc v1.20.0 + golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f // indirect + golang.org/x/net v0.0.0-20190514140710-3ec191127204 // indirect + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect + golang.org/x/sys v0.0.0-20190516110030-61b9204099cb // indirect + golang.org/x/text v0.3.2 // indirect + google.golang.org/appengine v1.4.0 // indirect + google.golang.org/genproto v0.0.0-20190516172635-bb713bdc0e52 // indirect + google.golang.org/grpc v1.20.1 + gopkg.in/AlecAivazis/survey.v1 v1.8.4 ) diff --git a/go.sum b/go.sum index 22c6de1..96505bf 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,26 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/Netflix/go-expect v0.0.0-20180928190340-9d1f4485533b h1:sSQK05nvxs4UkgCJaxihteu+r+6ela3dNMm7NVmsS3c= +github.com/Netflix/go-expect v0.0.0-20180928190340-9d1f4485533b/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/atotto/clipboard v0.1.2 h1:YZCtFu5Ie8qX2VmVTBnrqLSiU9XOWwqNRmdT3gIQzbY= +github.com/atotto/clipboard v0.1.2/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aws/aws-sdk-go-v2 v0.7.0 h1:a5xRI/tBmUFKuAA0SOyEY2P1YhQb+jVOEI9P/7KfrP0= github.com/aws/aws-sdk-go-v2 v0.7.0/go.mod h1:17MaCZ9g0q5BIMxwzRQeiv8M3c8+W7iuBnlWAEprcxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/redoctober v0.0.0-20180928214028-3f826eedb692 h1:eVvf+nrm0mXV3JOh2c9vt+Pemh/TAUOjqRNrp1eyPmk= +github.com/cloudflare/redoctober v0.0.0-20180928214028-3f826eedb692/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= @@ -14,8 +28,6 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -25,45 +37,123 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gucumber/gucumber v0.0.0-20180127021336-7d5c79e832a2/go.mod h1:YbdHRK9ViqwGMS0rtRY+1I6faHvVyyurKPIPwifihxI= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= +github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c h1:kp3AxgXgDOmIJFR7bIwqFhwJ2qWar8tEQSE5XXhCfVk= +github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.4 h1:5Myjjh3JY/NaAi4IsUbHADytDyl1VE1Y9PXDlL+P/VQ= +github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed h1:3dQJqqDouawQgl3gBE1PNHKFkJYGEuFb1DbSlaxdosE= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= github.com/mediocregopher/radix/v3 v3.2.3 h1:TbcGCZdo9zfPYPgevsqRn+OjvCyfOK6TzuXhqzWdCt0= github.com/mediocregopher/radix/v3 v3.2.3/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190514140710-3ec191127204 h1:4yG6GqBtw9C+UrLp6s2wtSniayy/Vd/3F7ffLE427XI= +golang.org/x/net v0.0.0-20190514140710-3ec191127204/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180606202747-9527bec2660b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190416152802-12500544f89f h1:1ZH9RnjNgLzh6YrsRp/c6ddZ8Lq0fq9xztNOoWJ2sz4= -golang.org/x/sys v0.0.0-20190416152802-12500544f89f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190516110030-61b9204099cb h1:k07iPOt0d6nEnwXF+kHB+iEg+WSuKe/SOQuFM2QoD+E= +golang.org/x/sys v0.0.0-20190516110030-61b9204099cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.20.0 h1:DlsSIrgEBuZAUFJcta2B5i/lzeHHbnfkNFAfFXLVFYQ= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/genproto v0.0.0-20190516172635-bb713bdc0e52 h1:LHc/6x2dMeCKkSsrVgo4DY+Z566T1OeoMwLtdfoy8LE= +google.golang.org/genproto v0.0.0-20190516172635-bb713bdc0e52/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +gopkg.in/AlecAivazis/survey.v1 v1.8.4 h1:10xXXN3wgIhPheb5NI58zFgZv32Ana7P3Tl4shW+0Qc= +gopkg.in/AlecAivazis/survey.v1 v1.8.4/go.mod h1:iBNOmqKz/NUbZx3bA+4hAGLRC7fSK7tgtVDT4tB22XA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/grpcurl.sh b/grpcurl.sh index 6e8b0ba..e375792 100644 --- a/grpcurl.sh +++ b/grpcurl.sh @@ -3,4 +3,4 @@ grpcurl -cacert ./certs/ca.pem \ -key ./certs/client-key.pem \ -proto ./credentials/protobuf/service.proto \ localhost:8080 \ - selfpass.credentials.CredentialService/Dump + selfpass.credentials.CredentialService/GetAllMetadata diff --git a/redis.conf b/redis.conf deleted file mode 100644 index 15a8235..0000000 --- a/redis.conf +++ /dev/null @@ -1,1317 +0,0 @@ -# Redis configuration file example. -# -# Note that in order to read the configuration file, Redis must be -# started with the file path as first argument: -# -# ./redis-server /path/to/redis.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf - -################################## MODULES ##################################### - -# Load modules at startup. If the server is not able to load modules -# it will abort. It is possible to use multiple loadmodule directives. -# -# loadmodule /path/to/my_module.so -# loadmodule /path/to/other_module.so - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all the network interfaces available on the server. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 ::1 -# -# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only into -# the IPv4 lookback interface address (this means Redis will be able to -# accept connections only from clients running into the same computer it -# is running). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -bind 127.0.0.1 - -# Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. -# -# When protected mode is on and if: -# -# 1) The server is not binding explicitly to a set of addresses using the -# "bind" directive. -# 2) No password is configured. -# -# The server only accepts connections from clients connecting from the -# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain -# sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis -# even if no authentication is configured, nor a specific set of interfaces -# are explicitly listed using the "bind" directive. -protected-mode yes - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified Redis will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/redis.sock -# unixsocketperm 700 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 300 seconds, which is the new -# Redis default starting with Redis 3.2.1. -tcp-keepalive 300 - -################################# GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize no - -# If you run Redis from upstart or systemd, Redis can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -# If a pid file is specified, Redis writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". -# -# Creating a pid file is best effort: if Redis is not able to create it -# nothing bad happens, the server will start and run normally. -pidfile /var/run/redis_6379.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel debug - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile "" - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 1 - -# By default Redis shows an ASCII art logo only when started to log to the -# standard output and if the standard output is a TTY. Basically this means -# that normally a logo is displayed only in interactive sessions. -# -# However it is possible to force the pre-4.0 behavior and always show a -# ASCII art logo in startup logs by setting the following option to yes. -always-show-logo yes - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving completely by commenting out all "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -save 900 1 -save 300 10 -save 60 10000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./db/ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters -# and resynchronize with them. -# -# slaveof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data yes - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# ------------------------------------------------------- -# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY -# ------------------------------------------------------- -# -# New slaves and reconnecting slaves that are not able to continue the replication -# process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the slaves. -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The Redis master creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the slaves incrementally. -# 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to slave sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more slaves -# can be queued and served with the RDB file as soon as the current child producing -# the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new slaves arriving will be queued and a new transfer -# will start when the current one terminates. -# -# When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple slaves -# will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync no - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the slaves. -# -# This is important since once the transfer starts, it is not possible to serve -# new slaves arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more slaves arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the slave socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the slave side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. -# -# The bigger the replication backlog, the longer the time the slave can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a slave connected. -# -# repl-backlog-size 1mb - -# After a master has no longer connected slaves for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for -# the backlog buffer to be freed. -# -# Note that slaves never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially -# resynchronize" with the slaves: hence they should always accumulate backlog. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 - -# It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. -# -# The N slaves need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves -# are available, to the specified number of seconds. -# -# For example to require at least 3 slaves with a lag <= 10 seconds use: -# -# min-slaves-to-write 3 -# min-slaves-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. - -# A Redis master is able to list the address and port of the attached -# slaves in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover slave instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a master. -# -# The listed IP and address normally reported by a slave is obtained -# in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the slave to connect with the master. -# -# Port: The port is communicated by the slave during the replication -# handshake, and is normally the port that the slave is using to -# list for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the slave may be actually reachable via different IP and port -# pairs. The following two options can be used by a slave in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# slave-announce-ip 5.5.5.5 -# slave-announce-port 1234 - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -# requirepass foobared - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### CLIENTS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -############################## MEMORY MANAGEMENT ################################ - -# Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU or LFU cache, or to -# set a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -maxmemory 8096000000 - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> Evict using approximated LRU among the keys with an expire set. -# allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. -# allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key among the ones with an expire set. -# allkeys-random -> Remove a random key, any key. -# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) -# noeviction -> Don't evict anything, just return an error on write operations. -# -# LRU means Least Recently Used -# LFU means Least Frequently Used -# -# Both LRU, LFU and volatile-ttl are implemented using approximated -# randomized algorithms. -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are no suitable keys for eviction. -# -# At the date of writing these commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -maxmemory-policy noeviction - -# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default Redis will check five keys and pick the one that was -# used less recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. -# -# maxmemory-samples 5 - -############################# LAZY FREEING #################################### - -# Redis has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands -# in order to reclaim all the memory associated with an object in a synchronous -# way. If the key deleted is associated with a small object, the time needed -# in order to execute the DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in Redis. However if the key is associated with an -# aggregated value containing millions of elements, the server can block for -# a long time (even seconds) in order to complete the operation. -# -# For the above reasons Redis also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. -# -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the Redis server sometimes has to -# delete keys or flush the whole database as a side effect of other operations. -# Specifically Redis deletes objects independently of a user call in the -# following scenarios: -# -# 1) On eviction, because of the maxmemory and maxmemory policy configurations, -# in order to make room for new data, without going over the specified -# memory limit. -# 2) Because of expire: when a key with an associated time to live (see the -# EXPIRE command) must be deleted from memory. -# 3) Because of a side effect of a command that stores data on a key that may -# already exist. For example the RENAME command may delete the old key -# content when it is replaced with another one. Similarly SUNIONSTORE -# or SORT with STORE option may delete existing keys. The SET command -# itself removes any old content of the specified key in order to replace -# it with the specified string. -# 4) During replication, when a slave performs a full resynchronization with -# its master, the content of the whole database is removed in order to -# load the RDB file just transfered. -# -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives: - -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -slave-lazy-flush no - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") - -appendfilename "appendonly.aof" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the Redis -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself -# crashes or aborts but the operating system still works correctly). -# -# Redis can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "redis-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -# When rewriting the AOF file, Redis is able to use an RDB preamble in the -# AOF file for faster rewrites and recoveries. When this option is turned -# on the rewritten AOF file is composed of two different stanzas: -# -# [RDB file][AOF tail] -# -# When loading Redis recognizes that the AOF file starts with the "REDIS" -# string and loads the prefixed RDB file, and continues loading the AOF -# tail. -# -# This is currently turned off by default in order to avoid the surprise -# of a format change, but will at some point be used as the default. -aof-use-rdb-preamble no - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceeds the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write command was -# already issued by the script but the user doesn't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################ REDIS CLUSTER ############################### -# -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however -# in order to mark it as "mature" we need to wait for a non trivial percentage -# of users to deploy it in production. -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# Normal Redis instances can't be part of a Redis Cluster; only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. -# -# cluster-node-timeout 15000 - -# A slave of a failing master will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a slave to actually have an exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple slaves able to failover, they exchange messages -# in order to try to give an advantage to the slave with the best -# replication offset (more data from the master processed). -# Slaves will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single slave computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the slave will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a slave will not perform -# the failover if, since the last interaction with the master, the time -# elapsed is greater than: -# -# (node-timeout * slave-validity-factor) + repl-ping-slave-period -# -# So for example if node-timeout is 30 seconds, and the slave-validity-factor -# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the -# slave will not try to failover if it was not able to talk with the master -# for longer than 310 seconds. -# -# A large slave-validity-factor may allow slaves with too old data to failover -# a master, while a too small value may prevent the cluster from being able to -# elect a slave at all. -# -# For maximum availability, it is possible to set the slave-validity-factor -# to a value of 0, which means, that slaves will always try to failover the -# master regardless of the last time they interacted with the master. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-slave-validity-factor 10 - -# Cluster slaves are able to migrate to orphaned masters, that are masters -# that are left without working slaves. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working slaves. -# -# Slaves migrate to orphaned masters only if there are still at least a -# given number of other working slaves for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a slave -# will migrate only if there is at least 1 other working slave for its master -# and so forth. It usually reflects the number of slaves you want for every -# master in your cluster. -# -# Default is 1 (slaves migrate only if their masters remain with at least -# one slave). To disable migration just set it to a very large value. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# By default Redis Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -# cluster-require-full-coverage yes - -# This option, when set to yes, prevents slaves from trying to failover its -# master during master failures. However the master can still perform a -# manual failover, if forced to do so. -# -# This is useful in different scenarios, especially in the case of multiple -# data center operations, where we want one side to never be promoted if not -# in the case of a total DC failure. -# -# cluster-slave-no-failover no - -# In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. - -########################## CLUSTER DOCKER/NAT support ######################## - -# In certain deployments, Redis Cluster nodes address discovery fails, because -# addresses are NAT-ted or because ports are forwarded (the typical case is -# Docker and other containers). -# -# In order to make Redis Cluster working in such environments, a static -# configuration where each node knows its public address is needed. The -# following two options are used for this scope, and are: -# -# * cluster-announce-ip -# * cluster-announce-port -# * cluster-announce-bus-port -# -# Each instruct the node about its address, client port, and cluster message -# bus port. The information is then published in the header of the bus packets -# so that other nodes will be able to correctly map the address of the node -# publishing the information. -# -# If the above options are not used, the normal Redis Cluster auto-detection -# will be used instead. -# -# Note that when remapped, the bus port may not be at the fixed offset of -# clients port + 10000, so you can specify any port and bus-port depending -# on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usually. -# -# Example: -# -# cluster-announce-ip 10.1.1.5 -# cluster-announce-port 6379 -# cluster-announce-bus-port 6380 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -############################# EVENT NOTIFICATION ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-ziplist-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# slave -> slave clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Client query buffers accumulate new commands. They are limited to a fixed -# amount by default in order to avoid that a protocol desynchronization (for -# instance due to a bug in the client) will lead to unbound memory usage in -# the query buffer. However you can configure it here if you have very special -# needs, such us huge multi/exec requests or alike. -# -# client-query-buffer-limit 1gb - -# In the Redis protocol, bulk requests, that are, elements representing single -# strings, are normally limited ot 512 mb. However you can change this limit -# here. -# -# proto-max-bulk-len 512mb - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good -# idea to start with the default settings and only change them after investigating -# how to improve the performances and how the keys LFU change over time, which -# is possible to inspect via the OBJECT FREQ command. -# -# There are two tunable parameters in the Redis LFU implementation: the -# counter logarithm factor and the counter decay time. It is important to -# understand what the two parameters mean before changing them. -# -# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis -# uses a probabilistic increment with logarithmic behavior. Given the value -# of the old counter, when a key is accessed, the counter is incremented in -# this way: -# -# 1. A random number R between 0 and 1 is extracted. -# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). -# 3. The counter is incremented only if R < P. -# -# The default lfu-log-factor is 10. This is a table of how the frequency -# counter changes with a different number of accesses with different -# logarithmic factors: -# -# +--------+------------+------------+------------+------------+------------+ -# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | -# +--------+------------+------------+------------+------------+------------+ -# | 0 | 104 | 255 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 1 | 18 | 49 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 10 | 10 | 18 | 142 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 100 | 8 | 11 | 49 | 143 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# -# NOTE: The above table was obtained by running the following commands: -# -# redis-benchmark -n 1000000 incr foo -# redis-cli object freq foo -# -# NOTE 2: The counter initial value is 5 in order to give new objects a chance -# to accumulate hits. -# -# The counter decay time is the time, in minutes, that must elapse in order -# for the key counter to be divided by two (or decremented if it has a value -# less <= 10). -# -# The default value for the lfu-decay-time is 1. A Special value of 0 means to -# decay the counter every time it happens to be scanned. -# -# lfu-log-factor 10 -# lfu-decay-time 1 - -########################### ACTIVE DEFRAGMENTATION ####################### -# -# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested -# even in production and manually tested by multiple engineers for some -# time. -# -# What is active defragmentation? -# ------------------------------- -# -# Active (online) defragmentation allows a Redis server to compact the -# spaces left between small allocations and deallocations of data in memory, -# thus allowing to reclaim back memory. -# -# Fragmentation is a natural process that happens with every allocator (but -# less so with Jemalloc, fortunately) and certain workloads. Normally a server -# restart is needed in order to lower the fragmentation, or at least to flush -# away all the data and create it again. However thanks to this feature -# implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in an "hot" way, while the server is running. -# -# Basically when the fragmentation is over a certain level (see the -# configuration options below) Redis will start to create new copies of the -# values in contiguous memory regions by exploiting certain specific Jemalloc -# features (in order to understand if an allocation is causing fragmentation -# and to allocate it in a better place), and at the same time, will release the -# old copies of the data. This process, repeated incrementally for all the keys -# will cause the fragmentation to drop back to normal values. -# -# Important things to understand: -# -# 1. This feature is disabled by default, and only works if you compiled Redis -# to use the copy of Jemalloc we ship with the source code of Redis. -# This is the default with Linux builds. -# -# 2. You never need to enable this feature if you don't have fragmentation -# issues. -# -# 3. Once you experience fragmentation, you can enable this feature when -# needed with the command "CONFIG SET activedefrag yes". -# -# The configuration parameters are able to fine tune the behavior of the -# defragmentation process. If you are not sure about what they mean it is -# a good idea to leave the defaults untouched. - -# Enabled active defragmentation -# activedefrag yes - -# Minimum amount of fragmentation waste to start active defrag -# active-defrag-ignore-bytes 100mb - -# Minimum percentage of fragmentation to start active defrag -# active-defrag-threshold-lower 10 - -# Maximum percentage of fragmentation at which we use maximum effort -# active-defrag-threshold-upper 100 - -# Minimal effort for defrag in CPU percentage -# active-defrag-cycle-min 25 - -# Maximal effort for defrag in CPU percentage -# active-defrag-cycle-max 75 -