From fcbc25e7896b6ea115d1f62107483c9325b4a305 Mon Sep 17 00:00:00 2001 From: Milos Gajdos Date: Fri, 25 Aug 2023 11:32:23 +0100 Subject: [PATCH] Replace redigo with redis-go We are replacing the very outdated redigo Go module with the official redis Go module, go-redis. Signed-off-by: Milos Gajdos --- configuration/configuration.go | 75 +- configuration/configuration_test.go | 54 +- go.mod | 12 +- go.sum | 34 +- registry/handlers/app.go | 111 +- registry/storage/cache/redis/redis.go | 104 +- registry/storage/cache/redis/redis_test.go | 29 +- .../github.com/dgryski/go-rendezvous/LICENSE | 21 + .../github.com/dgryski/go-rendezvous/rdv.go | 79 + vendor/github.com/go-logr/logr/.golangci.yaml | 26 + vendor/github.com/go-logr/logr/CHANGELOG.md | 6 + .../github.com/go-logr/logr/CONTRIBUTING.md | 17 + .../{gomodule/redigo => go-logr/logr}/LICENSE | 28 +- vendor/github.com/go-logr/logr/README.md | 282 + vendor/github.com/go-logr/logr/discard.go | 24 + vendor/github.com/go-logr/logr/funcr/funcr.go | 804 +++ vendor/github.com/go-logr/logr/logr.go | 550 ++ vendor/github.com/go-logr/stdr/LICENSE | 201 + vendor/github.com/go-logr/stdr/README.md | 6 + vendor/github.com/go-logr/stdr/stdr.go | 170 + .../gomodule/redigo/redis/commandinfo.go | 55 - .../github.com/gomodule/redigo/redis/conn.go | 736 --- .../github.com/gomodule/redigo/redis/doc.go | 177 - .../github.com/gomodule/redigo/redis/go17.go | 29 - .../github.com/gomodule/redigo/redis/go18.go | 9 - .../github.com/gomodule/redigo/redis/log.go | 146 - .../github.com/gomodule/redigo/redis/pool.go | 635 -- .../gomodule/redigo/redis/pubsub.go | 148 - .../github.com/gomodule/redigo/redis/redis.go | 138 - .../github.com/gomodule/redigo/redis/reply.go | 583 -- .../github.com/gomodule/redigo/redis/scan.go | 673 --- .../gomodule/redigo/redis/script.go | 91 - .../redis/go-redis/extra/rediscmd/v9/LICENSE | 25 + .../go-redis/extra/rediscmd/v9/rediscmd.go | 149 + .../redis/go-redis/extra/rediscmd/v9/safe.go | 12 + .../go-redis/extra/rediscmd/v9/unsafe.go | 21 + .../redis/go-redis/extra/redisotel/v9/LICENSE | 25 + .../go-redis/extra/redisotel/v9/README.md | 34 + .../go-redis/extra/redisotel/v9/config.go | 138 + .../go-redis/extra/redisotel/v9/metrics.go | 253 + .../go-redis/extra/redisotel/v9/tracing.go | 215 + .../github.com/redis/go-redis/v9/.gitignore | 4 + .../redis/go-redis/v9/.golangci.yml | 4 + .../redis/go-redis/v9/.prettierrc.yml | 4 + .../github.com/redis/go-redis/v9/CHANGELOG.md | 124 + vendor/github.com/redis/go-redis/v9/LICENSE | 25 + vendor/github.com/redis/go-redis/v9/Makefile | 41 + vendor/github.com/redis/go-redis/v9/README.md | 224 + .../github.com/redis/go-redis/v9/RELEASING.md | 15 + .../github.com/redis/go-redis/v9/cluster.go | 1901 ++++++ .../redis/go-redis/v9/cluster_commands.go | 109 + .../github.com/redis/go-redis/v9/command.go | 5235 +++++++++++++++++ .../github.com/redis/go-redis/v9/commands.go | 3970 +++++++++++++ vendor/github.com/redis/go-redis/v9/doc.go | 4 + vendor/github.com/redis/go-redis/v9/error.go | 155 + .../redis/go-redis/v9/internal/arg.go | 58 + .../go-redis/v9/internal/hashtag/hashtag.go | 78 + .../redis/go-redis/v9/internal/hscan/hscan.go | 207 + .../go-redis/v9/internal/hscan/structmap.go | 121 + .../redis/go-redis/v9/internal/internal.go | 29 + .../redis/go-redis/v9/internal/log.go | 26 + .../redis/go-redis/v9/internal/once.go | 63 + .../redis/go-redis/v9/internal/pool/conn.go | 127 + .../go-redis/v9/internal/pool/conn_check.go | 50 + .../v9/internal/pool/conn_check_dummy.go | 10 + .../redis/go-redis/v9/internal/pool/pool.go | 502 ++ .../go-redis/v9/internal/pool/pool_single.go | 58 + .../go-redis/v9/internal/pool/pool_sticky.go | 201 + .../go-redis/v9/internal/proto/reader.go | 552 ++ .../redis/go-redis/v9/internal/proto/scan.go | 185 + .../go-redis/v9/internal/proto/writer.go | 158 + .../redis/go-redis/v9/internal/rand/rand.go | 50 + .../redis/go-redis/v9/internal/util.go | 46 + .../redis/go-redis/v9/internal/util/safe.go | 12 + .../go-redis/v9/internal/util/strconv.go | 19 + .../redis/go-redis/v9/internal/util/unsafe.go | 23 + .../github.com/redis/go-redis/v9/iterator.go | 66 + .../github.com/redis/go-redis/v9/options.go | 505 ++ .../github.com/redis/go-redis/v9/package.json | 8 + .../github.com/redis/go-redis/v9/pipeline.go | 121 + .../redis/go-redis/v9/probabilistic.go | 1433 +++++ vendor/github.com/redis/go-redis/v9/pubsub.go | 729 +++ vendor/github.com/redis/go-redis/v9/redis.go | 827 +++ .../redis/go-redis/v9/redis_gears.go | 161 + vendor/github.com/redis/go-redis/v9/result.go | 188 + vendor/github.com/redis/go-redis/v9/ring.go | 832 +++ vendor/github.com/redis/go-redis/v9/script.go | 84 + .../github.com/redis/go-redis/v9/sentinel.go | 820 +++ vendor/github.com/redis/go-redis/v9/tx.go | 151 + .../github.com/redis/go-redis/v9/universal.go | 231 + .../github.com/redis/go-redis/v9/version.go | 6 + .../go.opentelemetry.io/otel/.codespellignore | 5 + vendor/go.opentelemetry.io/otel/.codespellrc | 10 + .../go.opentelemetry.io/otel/.gitattributes | 3 + vendor/go.opentelemetry.io/otel/.gitignore | 24 + vendor/go.opentelemetry.io/otel/.gitmodules | 3 + vendor/go.opentelemetry.io/otel/.golangci.yml | 246 + vendor/go.opentelemetry.io/otel/.lycheeignore | 6 + .../otel/.markdownlint.yaml | 29 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 2567 ++++++++ vendor/go.opentelemetry.io/otel/CODEOWNERS | 17 + .../go.opentelemetry.io/otel/CONTRIBUTING.md | 562 ++ vendor/go.opentelemetry.io/otel/LICENSE | 201 + vendor/go.opentelemetry.io/otel/Makefile | 269 + vendor/go.opentelemetry.io/otel/README.md | 109 + vendor/go.opentelemetry.io/otel/RELEASING.md | 126 + vendor/go.opentelemetry.io/otel/VERSIONING.md | 224 + .../go.opentelemetry.io/otel/attribute/doc.go | 16 + .../otel/attribute/encoder.go | 146 + .../otel/attribute/iterator.go | 161 + .../go.opentelemetry.io/otel/attribute/key.go | 134 + .../go.opentelemetry.io/otel/attribute/kv.go | 86 + .../go.opentelemetry.io/otel/attribute/set.go | 436 ++ .../otel/attribute/type_string.go | 31 + .../otel/attribute/value.go | 270 + .../otel/baggage/baggage.go | 562 ++ .../otel/baggage/context.go | 39 + .../go.opentelemetry.io/otel/baggage/doc.go | 20 + .../go.opentelemetry.io/otel/codes/codes.go | 116 + vendor/go.opentelemetry.io/otel/codes/doc.go | 21 + vendor/go.opentelemetry.io/otel/doc.go | 34 + .../go.opentelemetry.io/otel/error_handler.go | 38 + .../go.opentelemetry.io/otel/get_main_pkgs.sh | 41 + vendor/go.opentelemetry.io/otel/handler.go | 48 + .../otel/internal/attribute/attribute.go | 111 + .../otel/internal/baggage/baggage.go | 43 + .../otel/internal/baggage/context.go | 92 + .../otel/internal/global/handler.go | 103 + .../otel/internal/global/instruments.go | 359 ++ .../otel/internal/global/internal_logging.go | 70 + .../otel/internal/global/meter.go | 354 ++ .../otel/internal/global/propagator.go | 82 + .../otel/internal/global/state.go | 156 + .../otel/internal/global/trace.go | 192 + .../otel/internal/rawhelpers.go | 55 + .../otel/internal_logging.go | 26 + vendor/go.opentelemetry.io/otel/metric.go | 53 + .../go.opentelemetry.io/otel/metric/LICENSE | 201 + .../otel/metric/asyncfloat64.go | 271 + .../otel/metric/asyncint64.go | 269 + .../go.opentelemetry.io/otel/metric/config.go | 92 + vendor/go.opentelemetry.io/otel/metric/doc.go | 170 + .../otel/metric/embedded/embedded.go | 234 + .../otel/metric/instrument.go | 332 ++ .../go.opentelemetry.io/otel/metric/meter.go | 210 + .../otel/metric/syncfloat64.go | 179 + .../otel/metric/syncint64.go | 179 + .../go.opentelemetry.io/otel/propagation.go | 31 + .../otel/propagation/baggage.go | 58 + .../otel/propagation/doc.go | 24 + .../otel/propagation/propagation.go | 153 + .../otel/propagation/trace_context.go | 159 + .../go.opentelemetry.io/otel/requirements.txt | 1 + .../otel/semconv/internal/http.go | 338 ++ .../otel/semconv/v1.10.0/doc.go | 20 + .../otel/semconv/v1.10.0/exception.go | 20 + .../otel/semconv/v1.10.0/http.go | 114 + .../otel/semconv/v1.10.0/resource.go | 981 +++ .../otel/semconv/v1.10.0/schema.go | 20 + .../otel/semconv/v1.10.0/trace.go | 1700 ++++++ .../otel/semconv/v1.12.0/doc.go | 20 + .../otel/semconv/v1.12.0/exception.go | 20 + .../otel/semconv/v1.12.0/http.go | 114 + .../otel/semconv/v1.12.0/resource.go | 1042 ++++ .../otel/semconv/v1.12.0/schema.go | 20 + .../otel/semconv/v1.12.0/trace.go | 1704 ++++++ vendor/go.opentelemetry.io/otel/trace.go | 47 + vendor/go.opentelemetry.io/otel/trace/LICENSE | 201 + .../go.opentelemetry.io/otel/trace/config.go | 333 ++ .../go.opentelemetry.io/otel/trace/context.go | 61 + vendor/go.opentelemetry.io/otel/trace/doc.go | 66 + .../otel/trace/nonrecording.go | 27 + vendor/go.opentelemetry.io/otel/trace/noop.go | 89 + .../go.opentelemetry.io/otel/trace/trace.go | 551 ++ .../otel/trace/tracestate.go | 212 + .../otel/verify_examples.sh | 85 + vendor/go.opentelemetry.io/otel/version.go | 20 + vendor/go.opentelemetry.io/otel/versions.yaml | 57 + vendor/golang.org/x/sys/unix/ioctl_signed.go | 70 + .../sys/unix/{ioctl.go => ioctl_unsigned.go} | 4 +- vendor/golang.org/x/sys/unix/ioctl_zos.go | 12 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 5 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 4 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - .../golang.org/x/sys/unix/syscall_darwin.go | 3 +- .../x/sys/unix/syscall_dragonfly.go | 1 - .../golang.org/x/sys/unix/syscall_freebsd.go | 1 - vendor/golang.org/x/sys/unix/syscall_linux.go | 10 +- .../x/sys/unix/syscall_linux_386.go | 27 - .../x/sys/unix/syscall_linux_amd64.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 27 - .../x/sys/unix/syscall_linux_arm64.go | 10 - .../x/sys/unix/syscall_linux_loong64.go | 5 - .../x/sys/unix/syscall_linux_mips64x.go | 1 - .../x/sys/unix/syscall_linux_mipsx.go | 27 - .../x/sys/unix/syscall_linux_ppc.go | 27 - .../x/sys/unix/syscall_linux_ppc64x.go | 1 - .../x/sys/unix/syscall_linux_riscv64.go | 1 - .../x/sys/unix/syscall_linux_s390x.go | 1 - .../x/sys/unix/syscall_linux_sparc64.go | 1 - .../golang.org/x/sys/unix/syscall_netbsd.go | 2 - .../golang.org/x/sys/unix/syscall_openbsd.go | 1 - .../golang.org/x/sys/unix/syscall_solaris.go | 21 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 7 + .../x/sys/unix/syscall_zos_s390x.go | 4 +- .../x/sys/unix/zerrors_darwin_amd64.go | 19 + .../x/sys/unix/zerrors_darwin_arm64.go | 19 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 14 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 15 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 18 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 10 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 10 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 39 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 11 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 39 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 11 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 10 - .../x/sys/unix/zsyscall_freebsd_386.go | 10 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 10 - .../x/sys/unix/zsyscall_freebsd_arm.go | 10 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 10 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 10 - .../golang.org/x/sys/unix/zsyscall_linux.go | 10 - .../x/sys/unix/zsyscall_linux_386.go | 10 - .../x/sys/unix/zsyscall_linux_amd64.go | 10 - .../x/sys/unix/zsyscall_linux_arm.go | 10 - .../x/sys/unix/zsyscall_linux_arm64.go | 10 - .../x/sys/unix/zsyscall_linux_mips.go | 10 - .../x/sys/unix/zsyscall_linux_mips64.go | 10 - .../x/sys/unix/zsyscall_linux_mips64le.go | 10 - .../x/sys/unix/zsyscall_linux_mipsle.go | 10 - .../x/sys/unix/zsyscall_linux_ppc.go | 10 - .../x/sys/unix/zsyscall_linux_ppc64.go | 10 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 10 - .../x/sys/unix/zsyscall_linux_riscv64.go | 10 - .../x/sys/unix/zsyscall_linux_s390x.go | 10 - .../x/sys/unix/zsyscall_linux_sparc64.go | 10 - .../x/sys/unix/zsyscall_netbsd_386.go | 10 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 10 - .../x/sys/unix/zsyscall_netbsd_arm.go | 10 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 10 - .../x/sys/unix/zsyscall_openbsd_386.go | 14 - .../x/sys/unix/zsyscall_openbsd_386.s | 5 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 14 - .../x/sys/unix/zsyscall_openbsd_amd64.s | 5 - .../x/sys/unix/zsyscall_openbsd_arm.go | 14 - .../x/sys/unix/zsyscall_openbsd_arm.s | 5 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 14 - .../x/sys/unix/zsyscall_openbsd_arm64.s | 5 - .../x/sys/unix/zsyscall_openbsd_mips64.go | 14 - .../x/sys/unix/zsyscall_openbsd_mips64.s | 5 - .../x/sys/unix/zsyscall_openbsd_ppc64.go | 14 - .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 - .../x/sys/unix/zsyscall_openbsd_riscv64.go | 14 - .../x/sys/unix/zsyscall_openbsd_riscv64.s | 5 - .../x/sys/unix/zsyscall_solaris_amd64.go | 17 +- .../x/sys/unix/zsyscall_zos_s390x.go | 4 +- .../x/sys/unix/ztypes_darwin_amd64.go | 11 + .../x/sys/unix/ztypes_darwin_arm64.go | 11 + .../golang.org/x/sys/windows/env_windows.go | 6 +- .../golang.org/x/sys/windows/exec_windows.go | 7 +- vendor/golang.org/x/sys/windows/service.go | 7 + .../golang.org/x/sys/windows/types_windows.go | 10 +- .../x/sys/windows/zsyscall_windows.go | 9 + vendor/modules.txt | 52 +- 266 files changed, 42877 insertions(+), 4320 deletions(-) create mode 100644 vendor/github.com/dgryski/go-rendezvous/LICENSE create mode 100644 vendor/github.com/dgryski/go-rendezvous/rdv.go create mode 100644 vendor/github.com/go-logr/logr/.golangci.yaml create mode 100644 vendor/github.com/go-logr/logr/CHANGELOG.md create mode 100644 vendor/github.com/go-logr/logr/CONTRIBUTING.md rename vendor/github.com/{gomodule/redigo => go-logr/logr}/LICENSE (89%) create mode 100644 vendor/github.com/go-logr/logr/README.md create mode 100644 vendor/github.com/go-logr/logr/discard.go create mode 100644 vendor/github.com/go-logr/logr/funcr/funcr.go create mode 100644 vendor/github.com/go-logr/logr/logr.go create mode 100644 vendor/github.com/go-logr/stdr/LICENSE create mode 100644 vendor/github.com/go-logr/stdr/README.md create mode 100644 vendor/github.com/go-logr/stdr/stdr.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/commandinfo.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/conn.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/doc.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/go17.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/go18.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/log.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/pool.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/pubsub.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/redis.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/reply.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/scan.go delete mode 100644 vendor/github.com/gomodule/redigo/redis/script.go create mode 100644 vendor/github.com/redis/go-redis/extra/rediscmd/v9/LICENSE create mode 100644 vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go create mode 100644 vendor/github.com/redis/go-redis/extra/rediscmd/v9/safe.go create mode 100644 vendor/github.com/redis/go-redis/extra/rediscmd/v9/unsafe.go create mode 100644 vendor/github.com/redis/go-redis/extra/redisotel/v9/LICENSE create mode 100644 vendor/github.com/redis/go-redis/extra/redisotel/v9/README.md create mode 100644 vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go create mode 100644 vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go create mode 100644 vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go create mode 100644 vendor/github.com/redis/go-redis/v9/.gitignore create mode 100644 vendor/github.com/redis/go-redis/v9/.golangci.yml create mode 100644 vendor/github.com/redis/go-redis/v9/.prettierrc.yml create mode 100644 vendor/github.com/redis/go-redis/v9/CHANGELOG.md create mode 100644 vendor/github.com/redis/go-redis/v9/LICENSE create mode 100644 vendor/github.com/redis/go-redis/v9/Makefile create mode 100644 vendor/github.com/redis/go-redis/v9/README.md create mode 100644 vendor/github.com/redis/go-redis/v9/RELEASING.md create mode 100644 vendor/github.com/redis/go-redis/v9/cluster.go create mode 100644 vendor/github.com/redis/go-redis/v9/cluster_commands.go create mode 100644 vendor/github.com/redis/go-redis/v9/command.go create mode 100644 vendor/github.com/redis/go-redis/v9/commands.go create mode 100644 vendor/github.com/redis/go-redis/v9/doc.go create mode 100644 vendor/github.com/redis/go-redis/v9/error.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/arg.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/hscan/hscan.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/hscan/structmap.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/internal.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/log.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/once.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/conn.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/pool.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/proto/reader.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/proto/scan.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/proto/writer.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/rand/rand.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/util.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/util/safe.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/util/strconv.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/util/unsafe.go create mode 100644 vendor/github.com/redis/go-redis/v9/iterator.go create mode 100644 vendor/github.com/redis/go-redis/v9/options.go create mode 100644 vendor/github.com/redis/go-redis/v9/package.json create mode 100644 vendor/github.com/redis/go-redis/v9/pipeline.go create mode 100644 vendor/github.com/redis/go-redis/v9/probabilistic.go create mode 100644 vendor/github.com/redis/go-redis/v9/pubsub.go create mode 100644 vendor/github.com/redis/go-redis/v9/redis.go create mode 100644 vendor/github.com/redis/go-redis/v9/redis_gears.go create mode 100644 vendor/github.com/redis/go-redis/v9/result.go create mode 100644 vendor/github.com/redis/go-redis/v9/ring.go create mode 100644 vendor/github.com/redis/go-redis/v9/script.go create mode 100644 vendor/github.com/redis/go-redis/v9/sentinel.go create mode 100644 vendor/github.com/redis/go-redis/v9/tx.go create mode 100644 vendor/github.com/redis/go-redis/v9/universal.go create mode 100644 vendor/github.com/redis/go-redis/v9/version.go create mode 100644 vendor/go.opentelemetry.io/otel/.codespellignore create mode 100644 vendor/go.opentelemetry.io/otel/.codespellrc create mode 100644 vendor/go.opentelemetry.io/otel/.gitattributes create mode 100644 vendor/go.opentelemetry.io/otel/.gitignore create mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules create mode 100644 vendor/go.opentelemetry.io/otel/.golangci.yml create mode 100644 vendor/go.opentelemetry.io/otel/.lycheeignore create mode 100644 vendor/go.opentelemetry.io/otel/.markdownlint.yaml create mode 100644 vendor/go.opentelemetry.io/otel/CHANGELOG.md create mode 100644 vendor/go.opentelemetry.io/otel/CODEOWNERS create mode 100644 vendor/go.opentelemetry.io/otel/CONTRIBUTING.md create mode 100644 vendor/go.opentelemetry.io/otel/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/Makefile create mode 100644 vendor/go.opentelemetry.io/otel/README.md create mode 100644 vendor/go.opentelemetry.io/otel/RELEASING.md create mode 100644 vendor/go.opentelemetry.io/otel/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/otel/attribute/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/encoder.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/iterator.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/key.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/kv.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/set.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/type_string.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/value.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/context.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/codes/codes.go create mode 100644 vendor/go.opentelemetry.io/otel/codes/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/error_handler.go create mode 100644 vendor/go.opentelemetry.io/otel/get_main_pkgs.sh create mode 100644 vendor/go.opentelemetry.io/otel/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/context.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/instruments.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/propagator.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/state.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go create mode 100644 vendor/go.opentelemetry.io/otel/internal_logging.go create mode 100644 vendor/go.opentelemetry.io/otel/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/config.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/trace_context.go create mode 100644 vendor/go.opentelemetry.io/otel/requirements.txt create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/trace/config.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/context.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/nonrecording.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracestate.go create mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh create mode 100644 vendor/go.opentelemetry.io/otel/version.go create mode 100644 vendor/go.opentelemetry.io/otel/versions.yaml create mode 100644 vendor/golang.org/x/sys/unix/ioctl_signed.go rename vendor/golang.org/x/sys/unix/{ioctl.go => ioctl_unsigned.go} (92%) diff --git a/configuration/configuration.go b/configuration/configuration.go index f9f2574b..7d3fac0b 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -167,42 +167,7 @@ type Configuration struct { Notifications Notifications `yaml:"notifications,omitempty"` // Redis configures the redis pool available to the registry webapp. - Redis struct { - // Addr specifies the the redis instance available to the application. - Addr string `yaml:"addr,omitempty"` - - // Usernames can be used as a finer-grained permission control since the introduction of the redis 6.0. - Username string `yaml:"username,omitempty"` - - // Password string to use when making a connection. - Password string `yaml:"password,omitempty"` - - // DB specifies the database to connect to on the redis instance. - DB int `yaml:"db,omitempty"` - - // TLS configures settings for redis in-transit encryption - TLS struct { - Enabled bool `yaml:"enabled,omitempty"` - } `yaml:"tls,omitempty"` - - DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect - ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data - WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data - - // Pool configures the behavior of the redis connection pool. - Pool struct { - // MaxIdle sets the maximum number of idle connections. - MaxIdle int `yaml:"maxidle,omitempty"` - - // MaxActive sets the maximum number of connections that should be - // opened before blocking a connection request. - MaxActive int `yaml:"maxactive,omitempty"` - - // IdleTimeout sets the amount time to wait before closing - // inactive connections. - IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` - } `yaml:"pool,omitempty"` - } `yaml:"redis,omitempty"` + Redis Redis `yaml:"redis,omitempty"` Health Health `yaml:"health,omitempty"` Catalog Catalog `yaml:"catalog,omitempty"` @@ -330,6 +295,44 @@ type FileChecker struct { Threshold int `yaml:"threshold,omitempty"` } +// Redis configures the redis pool available to the registry webapp. +type Redis struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + + // Usernames can be used as a finer-grained permission control since the introduction of the redis 6.0. + Username string `yaml:"username,omitempty"` + + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + + // DB specifies the database to connect to on the redis instance. + DB int `yaml:"db,omitempty"` + + // TLS configures settings for redis in-transit encryption + TLS struct { + Enabled bool `yaml:"enabled,omitempty"` + } `yaml:"tls,omitempty"` + + DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect + ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data + WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data + + // Pool configures the behavior of the redis connection pool. + Pool struct { + // MaxIdle sets the maximum number of idle connections. + MaxIdle int `yaml:"maxidle,omitempty"` + + // MaxActive sets the maximum number of connections that should be + // opened before blocking a connection request. + MaxActive int `yaml:"maxactive,omitempty"` + + // IdleTimeout sets the amount time to wait before closing + // inactive connections. + IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` + } `yaml:"pool,omitempty"` +} + // HTTPChecker is a type of entry in the health section for checking HTTP URIs. type HTTPChecker struct { // Timeout is the duration to wait before timing out the HTTP request diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index aa64acc8..0ac8dbdc 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -126,23 +126,7 @@ var configStruct = Configuration{ Disabled: false, }, }, - Redis: struct { - Addr string `yaml:"addr,omitempty"` - Username string `yaml:"username,omitempty"` - Password string `yaml:"password,omitempty"` - DB int `yaml:"db,omitempty"` - TLS struct { - Enabled bool `yaml:"enabled,omitempty"` - } `yaml:"tls,omitempty"` - DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` - ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` - WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` - Pool struct { - MaxIdle int `yaml:"maxidle,omitempty"` - MaxActive int `yaml:"maxactive,omitempty"` - IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` - } `yaml:"pool,omitempty"` - }{ + Redis: Redis{ Addr: "localhost:6379", Username: "alice", Password: "123456", @@ -279,23 +263,7 @@ func (suite *ConfigSuite) TestParseSimple(c *check.C) { func (suite *ConfigSuite) TestParseInmemory(c *check.C) { suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} suite.expectedConfig.Log.Fields = nil - suite.expectedConfig.Redis = struct { - Addr string `yaml:"addr,omitempty"` - Username string `yaml:"username,omitempty"` - Password string `yaml:"password,omitempty"` - DB int `yaml:"db,omitempty"` - TLS struct { - Enabled bool `yaml:"enabled,omitempty"` - } `yaml:"tls,omitempty"` - DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` - ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` - WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` - Pool struct { - MaxIdle int `yaml:"maxidle,omitempty"` - MaxActive int `yaml:"maxactive,omitempty"` - IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` - } `yaml:"pool,omitempty"` - }{} + suite.expectedConfig.Redis = Redis{} config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) c.Assert(err, check.IsNil) @@ -315,23 +283,7 @@ func (suite *ConfigSuite) TestParseIncomplete(c *check.C) { suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}} suite.expectedConfig.Notifications = Notifications{} suite.expectedConfig.HTTP.Headers = nil - suite.expectedConfig.Redis = struct { - Addr string `yaml:"addr,omitempty"` - Username string `yaml:"username,omitempty"` - Password string `yaml:"password,omitempty"` - DB int `yaml:"db,omitempty"` - TLS struct { - Enabled bool `yaml:"enabled,omitempty"` - } `yaml:"tls,omitempty"` - DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` - ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` - WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` - Pool struct { - MaxIdle int `yaml:"maxidle,omitempty"` - MaxActive int `yaml:"maxactive,omitempty"` - IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` - } `yaml:"pool,omitempty"` - }{} + suite.expectedConfig.Redis = Redis{} // Note: this also tests that REGISTRY_STORAGE and // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together diff --git a/go.mod b/go.mod index c61f2a9e..120b4473 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,6 @@ require ( github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c github.com/docker/go-metrics v0.0.1 github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 - github.com/gomodule/redigo v1.8.2 github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 github.com/hashicorp/golang-lru/arc/v2 v2.0.5 @@ -21,6 +20,8 @@ require ( github.com/mitchellh/mapstructure v1.1.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.0.2 + github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 + github.com/redis/go-redis/v9 v9.1.0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/cobra v1.6.1 golang.org/x/crypto v0.7.0 @@ -40,7 +41,10 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/felixge/httpsnoop v1.0.1 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -60,10 +64,14 @@ require ( github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect + github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect golang.org/x/net v0.8.0 // indirect; updated for CVE-2022-27664, CVE-2022-41717 - golang.org/x/sys v0.6.0 // indirect + golang.org/x/sys v0.8.0 // indirect golang.org/x/text v0.8.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 374c924e..0a8694e2 100644 --- a/go.sum +++ b/go.sum @@ -69,6 +69,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= +github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0= +github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= +github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -86,6 +90,8 @@ github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -108,6 +114,11 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= @@ -141,8 +152,6 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= -github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -268,6 +277,13 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= +github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= +github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= +github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -286,12 +302,11 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -303,6 +318,13 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -441,8 +463,8 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 46c5f4ba..8901e196 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -40,8 +40,9 @@ import ( "github.com/distribution/distribution/v3/version" events "github.com/docker/go-events" "github.com/docker/go-metrics" - "github.com/gomodule/redigo/redis" "github.com/gorilla/mux" + "github.com/redis/go-redis/extra/redisotel/v9" + "github.com/redis/go-redis/v9" "github.com/sirupsen/logrus" ) @@ -76,7 +77,7 @@ type App struct { source notifications.SourceRecord } - redis *redis.Pool + redis *redis.Client // isCache is true if this registry is configured as a pull through cache isCache bool @@ -484,84 +485,23 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { } } -type redisStartAtKey struct{} - -func (app *App) configureRedis(configuration *configuration.Configuration) { - if configuration.Redis.Addr == "" { +func (app *App) configureRedis(cfg *configuration.Configuration) { + if cfg.Redis.Addr == "" { dcontext.GetLogger(app).Infof("redis not configured") return } - pool := &redis.Pool{ - Dial: func() (redis.Conn, error) { - // TODO(stevvooe): Yet another use case for contextual timing. - ctx := context.WithValue(app, redisStartAtKey{}, time.Now()) + app.redis = app.createPool(cfg.Redis) - done := func(err error) { - logger := dcontext.GetLoggerWithField(ctx, "redis.connect.duration", - dcontext.Since(ctx, redisStartAtKey{})) - if err != nil { - logger.Errorf("redis: error connecting: %v", err) - } else { - logger.Infof("redis: connect %v", configuration.Redis.Addr) - } - } - - conn, err := redis.Dial("tcp", - configuration.Redis.Addr, - redis.DialConnectTimeout(configuration.Redis.DialTimeout), - redis.DialReadTimeout(configuration.Redis.ReadTimeout), - redis.DialWriteTimeout(configuration.Redis.WriteTimeout), - redis.DialUseTLS(configuration.Redis.TLS.Enabled)) - if err != nil { - dcontext.GetLogger(app).Errorf("error connecting to redis instance %s: %v", - configuration.Redis.Addr, err) - done(err) - return nil, err - } - - // authorize the connection - authArgs := make([]interface{}, 0, 2) - if configuration.Redis.Username != "" { - authArgs = append(authArgs, configuration.Redis.Username) - } - if configuration.Redis.Password != "" { - authArgs = append(authArgs, configuration.Redis.Password) - } - - if len(authArgs) > 0 { - if _, err = conn.Do("AUTH", authArgs...); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - // select the database to use - if configuration.Redis.DB != 0 { - if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - done(nil) - return conn, nil - }, - MaxIdle: configuration.Redis.Pool.MaxIdle, - MaxActive: configuration.Redis.Pool.MaxActive, - IdleTimeout: configuration.Redis.Pool.IdleTimeout, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - // TODO(stevvooe): We can probably do something more interesting - // here with the health package. - _, err := c.Do("PING") - return err - }, - Wait: false, // if a connection is not available, proceed without cache. + // Enable tracing instrumentation. + if err := redisotel.InstrumentTracing(app.redis); err != nil { + dcontext.GetLogger(app).Errorf("failed to instrument tracing on redis: %v", err) } - app.redis = pool + // Enable metrics instrumentation. + if err := redisotel.InstrumentMetrics(app.redis); err != nil { + dcontext.GetLogger(app).Errorf("failed to instrument metrics on redis: %v", err) + } // setup expvar registry := expvar.Get("registry") @@ -570,13 +510,34 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { } registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { + stats := app.redis.PoolStats() return map[string]interface{}{ - "Config": configuration.Redis, - "Active": app.redis.ActiveCount(), + "Config": cfg, + "Active": stats.TotalConns - stats.IdleConns, } })) } +func (app *App) createPool(cfg configuration.Redis) *redis.Client { + return redis.NewClient(&redis.Options{ + Addr: cfg.Addr, + OnConnect: func(ctx context.Context, cn *redis.Conn) error { + res := cn.Ping(ctx) + return res.Err() + }, + Password: cfg.Password, + DB: cfg.DB, + MaxRetries: 3, + DialTimeout: cfg.DialTimeout, + ReadTimeout: cfg.ReadTimeout, + WriteTimeout: cfg.WriteTimeout, + PoolFIFO: false, + MaxIdleConns: cfg.Pool.MaxIdle, + PoolSize: cfg.Pool.MaxActive, + ConnMaxIdleTime: cfg.Pool.IdleTimeout, + }) +} + // configureLogHook prepares logging hook parameters. func (app *App) configureLogHook(configuration *configuration.Configuration) { entry, ok := dcontext.GetLogger(app).(*logrus.Entry) diff --git a/registry/storage/cache/redis/redis.go b/registry/storage/cache/redis/redis.go index a4b31737..9efbbf22 100644 --- a/registry/storage/cache/redis/redis.go +++ b/registry/storage/cache/redis/redis.go @@ -3,13 +3,14 @@ package redis import ( "context" "fmt" + "strconv" "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/reference" "github.com/distribution/distribution/v3/registry/storage/cache" "github.com/distribution/distribution/v3/registry/storage/cache/metrics" - "github.com/gomodule/redigo/redis" "github.com/opencontainers/go-digest" + "github.com/redis/go-redis/v9" ) // redisBlobStatService provides an implementation of @@ -24,7 +25,7 @@ import ( // Note that there is no implied relationship between these two caches. The // layer may exist in one, both or none and the code must be written this way. type redisBlobDescriptorService struct { - pool *redis.Pool + pool *redis.Client // TODO(stevvooe): We use a pool because we don't have great control over // the cache lifecycle to manage connections. A new connection if fetched @@ -34,7 +35,7 @@ type redisBlobDescriptorService struct { // NewRedisBlobDescriptorCacheProvider returns a new redis-based // BlobDescriptorCacheProvider using the provided redis connection pool. -func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { +func NewRedisBlobDescriptorCacheProvider(pool *redis.Client) cache.BlobDescriptorCacheProvider { return metrics.NewPrometheusCacheProvider( &redisBlobDescriptorService{ pool: pool, @@ -62,10 +63,7 @@ func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Di return distribution.Descriptor{}, err } - conn := rbds.pool.Get() - defer conn.Close() - - return rbds.stat(ctx, conn, dgst) + return rbds.stat(ctx, rbds.pool, dgst) } func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { @@ -73,26 +71,23 @@ func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.D return err } - conn := rbds.pool.Get() - defer conn.Close() - // Not atomic in redis <= 2.3 - reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype") + cmd := rbds.pool.HDel(ctx, rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype") + res, err := cmd.Result() if err != nil { return err } - - if reply == 0 { + if res == 0 { return distribution.ErrBlobUnknown } - return nil } // stat provides an internal stat call that takes a connection parameter. This // allows some internal management of the connection scope. -func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { - reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) +func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn *redis.Client, dgst digest.Digest) (distribution.Descriptor, error) { + cmd := conn.HMGet(ctx, rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype") + reply, err := cmd.Result() if err != nil { return distribution.Descriptor{}, err } @@ -105,10 +100,26 @@ func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Con } var desc distribution.Descriptor - if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { + digestString, ok := reply[0].(string) + if !ok { + return distribution.Descriptor{}, fmt.Errorf("digest is not a string") + } + desc.Digest = digest.Digest(digestString) + sizeString, ok := reply[1].(string) + if !ok { + return distribution.Descriptor{}, fmt.Errorf("size is not a string") + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { return distribution.Descriptor{}, err } - + desc.Size = size + if reply[2] != nil { + mediaType, ok := reply[2].(string) + if ok { + desc.MediaType = mediaType + } + } return desc, nil } @@ -124,25 +135,23 @@ func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst return err } - conn := rbds.pool.Get() - defer conn.Close() + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } - return rbds.setDescriptor(ctx, conn, dgst, desc) + return rbds.setDescriptor(ctx, rbds.pool, dgst, desc) } -func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { - if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), - "digest", desc.Digest, - "size", desc.Size); err != nil { - return err +func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn *redis.Client, dgst digest.Digest, desc distribution.Descriptor) error { + cmd := rbds.pool.HMSet(ctx, rbds.blobDescriptorHashKey(dgst), "digest", desc.Digest.String(), "size", desc.Size) + if cmd.Err() != nil { + return cmd.Err() } - // Only set mediatype if not already set. - if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), - "mediatype", desc.MediaType); err != nil { - return err + cmd = rbds.pool.HSetNX(ctx, rbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType) + if cmd.Err() != nil { + return cmd.Err() } - return nil } @@ -165,31 +174,27 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Conte return distribution.Descriptor{}, err } - conn := rsrbds.upstream.pool.Get() - defer conn.Close() - + pool := rsrbds.upstream.pool // Check membership to repository first - member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + member, err := pool.SIsMember(ctx, rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst.String()).Result() if err != nil { return distribution.Descriptor{}, err } - if !member { return distribution.Descriptor{}, distribution.ErrBlobUnknown } - upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) + upstream, err := rsrbds.upstream.stat(ctx, pool, dgst) if err != nil { return distribution.Descriptor{}, err } // We allow a per repository mediatype, let's look it up here. - mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) + mediatype, err := pool.HGet(ctx, rsrbds.blobDescriptorHashKey(dgst), "mediatype").Result() if err != nil { - if err == redis.ErrNil { + if err == redis.Nil { return distribution.Descriptor{}, distribution.ErrBlobUnknown } - return distribution.Descriptor{}, err } @@ -206,15 +211,11 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Cont return err } - conn := rsrbds.upstream.pool.Get() - defer conn.Close() - // Check membership to repository first - member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + member, err := rsrbds.upstream.pool.SIsMember(ctx, rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst.String()).Result() if err != nil { return err } - if !member { return distribution.ErrBlobUnknown } @@ -237,14 +238,12 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx cont } } - conn := rsrbds.upstream.pool.Get() - defer conn.Close() - - return rsrbds.setDescriptor(ctx, conn, dgst, desc) + return rsrbds.setDescriptor(ctx, rsrbds.upstream.pool, dgst, desc) } -func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { - if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { +func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn *redis.Client, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := conn.SAdd(ctx, rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst.String()).Result() + if err != nil { return err } @@ -253,7 +252,8 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx cont } // Override repository mediatype. - if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { + _, err = conn.HSet(ctx, rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType).Result() + if err != nil { return err } diff --git a/registry/storage/cache/redis/redis_test.go b/registry/storage/cache/redis/redis_test.go index b69b9cc2..328a2ec9 100644 --- a/registry/storage/cache/redis/redis_test.go +++ b/registry/storage/cache/redis/redis_test.go @@ -1,13 +1,13 @@ package redis import ( + "context" "flag" "os" "testing" - "time" "github.com/distribution/distribution/v3/registry/storage/cache/cachecheck" - "github.com/gomodule/redigo/redis" + "github.com/redis/go-redis/v9" ) var redisAddr string @@ -29,25 +29,22 @@ func TestRedisBlobDescriptorCacheProvider(t *testing.T) { t.Skip("please set -test.registry.storage.cache.redis.addr to test layer info cache against redis") } - pool := &redis.Pool{ - Dial: func() (redis.Conn, error) { - return redis.Dial("tcp", redisAddr) + pool := redis.NewClient(&redis.Options{ + Addr: redisAddr, + OnConnect: func(ctx context.Context, cn *redis.Conn) error { + res := cn.Ping(ctx) + return res.Err() }, - MaxIdle: 1, - MaxActive: 2, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - Wait: false, // if a connection is not available, proceed without cache. - } + MaxRetries: 3, + PoolSize: 2, + }) // Clear the database - conn := pool.Get() - if _, err := conn.Do("FLUSHDB"); err != nil { + ctx := context.Background() + err := pool.FlushDB(ctx).Err() + if err != nil { t.Fatalf("unexpected error flushing redis db: %v", err) } - conn.Close() cachecheck.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) } diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE new file mode 100644 index 00000000..22080f73 --- /dev/null +++ b/vendor/github.com/dgryski/go-rendezvous/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017-2020 Damian Gryski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go new file mode 100644 index 00000000..7a6f8203 --- /dev/null +++ b/vendor/github.com/dgryski/go-rendezvous/rdv.go @@ -0,0 +1,79 @@ +package rendezvous + +type Rendezvous struct { + nodes map[string]int + nstr []string + nhash []uint64 + hash Hasher +} + +type Hasher func(s string) uint64 + +func New(nodes []string, hash Hasher) *Rendezvous { + r := &Rendezvous{ + nodes: make(map[string]int, len(nodes)), + nstr: make([]string, len(nodes)), + nhash: make([]uint64, len(nodes)), + hash: hash, + } + + for i, n := range nodes { + r.nodes[n] = i + r.nstr[i] = n + r.nhash[i] = hash(n) + } + + return r +} + +func (r *Rendezvous) Lookup(k string) string { + // short-circuit if we're empty + if len(r.nodes) == 0 { + return "" + } + + khash := r.hash(k) + + var midx int + var mhash = xorshiftMult64(khash ^ r.nhash[0]) + + for i, nhash := range r.nhash[1:] { + if h := xorshiftMult64(khash ^ nhash); h > mhash { + midx = i + 1 + mhash = h + } + } + + return r.nstr[midx] +} + +func (r *Rendezvous) Add(node string) { + r.nodes[node] = len(r.nstr) + r.nstr = append(r.nstr, node) + r.nhash = append(r.nhash, r.hash(node)) +} + +func (r *Rendezvous) Remove(node string) { + // find index of node to remove + nidx := r.nodes[node] + + // remove from the slices + l := len(r.nstr) + r.nstr[nidx] = r.nstr[l] + r.nstr = r.nstr[:l] + + r.nhash[nidx] = r.nhash[l] + r.nhash = r.nhash[:l] + + // update the map + delete(r.nodes, node) + moved := r.nstr[nidx] + r.nodes[moved] = nidx +} + +func xorshiftMult64(x uint64) uint64 { + x ^= x >> 12 // a + x ^= x << 25 // b + x ^= x >> 27 // c + return x * 2685821657736338717 +} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml new file mode 100644 index 00000000..0cffafa7 --- /dev/null +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -0,0 +1,26 @@ +run: + timeout: 1m + tests: true + +linters: + disable-all: true + enable: + - asciicheck + - errcheck + - forcetypeassert + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 00000000..c3569600 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 00000000..5d37e294 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/gomodule/redigo/LICENSE b/vendor/github.com/go-logr/logr/LICENSE similarity index 89% rename from vendor/github.com/gomodule/redigo/LICENSE rename to vendor/github.com/go-logr/logr/LICENSE index 67db8588..8dada3ed 100644 --- a/vendor/github.com/gomodule/redigo/LICENSE +++ b/vendor/github.com/go-logr/logr/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -173,3 +172,30 @@ defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md new file mode 100644 index 00000000..ab593118 --- /dev/null +++ b/vendor/github.com/go-logr/logr/README.md @@ -0,0 +1,282 @@ +# A minimal logging API for Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) + +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: + +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } + + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +### Inspiration + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. + +### Differences from Dave's ideas + +The main differences are: + +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. + +Info logs are things you want to tell the user which are not errors. Error +logs are, well, errors. If your code receives an `error` from a subordinate +function call and is logging that `error` *and not returning it*, use error +logs. + +2. Verbosity-levels on info logs. This gives developers a chance to indicate +arbitrary grades of importance for info logs, without assigning names with +semantic meaning such as "warning", "trace", and "debug." Superficially this +may feel very similar, but the primary difference is the lack of semantics. +Because verbosity is a numerical value, it's safe to assume that an app running +with higher verbosity means more (and less important) logs will be generated. + +## Implementations (non-exhaustive) + +There are implementations for the following logging libraries: + +- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) +- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) +- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) +- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) +- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) +- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) +- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) +- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) + +## FAQ + +### Conceptual + +#### Why structured logging? + +- **Structured logs are more easily queryable**: Since you've got + key-value pairs, it's much easier to query your structured logs for + particular values by filtering on the contents of a particular key -- + think searching request logs for error codes, Kubernetes reconcilers for + the name and namespace of the reconciled object, etc. + +- **Structured logging makes it easier to have cross-referenceable logs**: + Similarly to searchability, if you maintain conventions around your + keys, it becomes easy to gather all log lines related to a particular + concept. + +- **Structured logs allow better dimensions of filtering**: if you have + structure to your logs, you've got more precise control over how much + information is logged -- you might choose in a particular configuration + to log certain keys but not others, only log lines where a certain key + matches a certain value, etc., instead of just having v-levels and names + to key off of. + +- **Structured logs better represent structured data**: sometimes, the + data that you want to log is inherently structured (think tuple-link + objects.) Structured logs allow you to preserve that structure when + outputting. + +#### Why V-levels? + +**V-levels give operators an easy way to control the chattiness of log +operations**. V-levels provide a way for a given package to distinguish +the relative importance or verbosity of a given log message. Then, if +a particular logger or package is logging too many messages, the user +of the package can simply change the v-levels for that library. + +#### Why not named levels, like Info/Warning/Error? + +Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences +from Dave's ideas](#differences-from-daves-ideas). + +#### Why not allow format strings, too? + +**Format strings negate many of the benefits of structured logs**: + +- They're not easily searchable without resorting to fuzzy searching, + regular expressions, etc. + +- They don't store structured data well, since contents are flattened into + a string. + +- They're not cross-referenceable. + +- They don't compress easily, since the message is not constant. + +(Unless you turn positional parameters into key-value pairs with numerical +keys, at which point you've gotten key-value logging with meaningless +keys.) + +### Practical + +#### Why key-value pairs, and not a map? + +Key-value pairs are *much* easier to optimize, especially around +allocations. Zap (a structured logger that inspired logr's interface) has +[performance measurements](https://github.com/uber-go/zap#performance) +that show this quite nicely. + +While the interface ends up being a little less obvious, you get +potentially better performance, plus avoid making users type +`map[string]string{}` every time they want to log. + +#### What if my V-levels differ between libraries? + +That's fine. Control your V-levels on a per-logger basis, and use the +`WithName` method to pass different loggers to different libraries. + +Generally, you should take care to ensure that you have relatively +consistent V-levels within a given logger, however, as this makes deciding +on what verbosity of logs to request easier. + +#### But I really want to use a format string! + +That's not actually a question. Assuming your question is "how do +I convert my mental model of logging with format strings to logging with +constant messages": + +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. + +2. For every place you'd write a format specifier, look to the word before + it, and add that as a key value pair. + +For instance, consider the following examples (all taken from spots in the +Kubernetes codebase): + +- `klog.V(4).Infof("Client is returning errors: code %v, error %v", + responseCode, err)` becomes `logger.Error(err, "client returned an + error", "code", responseCode)` + +- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", + seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after + response when requesting url", "attempt", retries, "after + seconds", seconds, "url", url)` + +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to +reflect over type %T")` becomes `logger.Info("unable to reflect over +type", "type", fmt.Sprintf("%T"))`. In general though, the cases where +this is necessary should be few and far between. + +#### How do I choose my V-levels? + +This is basically the only hard constraint: increase V-levels to denote +more verbose or more debug-y logs. + +Otherwise, you can start out with `0` as "you always want to see this", +`1` as "common logging that you might *possibly* want to turn off", and +`10` as "I would like to performance-test your log collection stack." + +Then gradually choose levels in between as you need them, working your way +down from 10 (for debug and trace style logs) and up from 1 (for chattier +info-type logs.) + +#### How do I choose my keys? + +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. + +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). + +While key names are mostly unrestricted (and spaces are acceptable), +it's generally a good idea to stick to printable ascii characters, or at +least match the general character set of your log lines. + +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + +[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go new file mode 100644 index 00000000..99fe8be9 --- /dev/null +++ b/vendor/github.com/go-logr/logr/discard.go @@ -0,0 +1,24 @@ +/* +Copyright 2020 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. +func Discard() Logger { + return New(nil) +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 00000000..e52f0cd0 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,804 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// # Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// # Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []interface{}) []interface{} + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []interface{}) []interface{} + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: &opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []interface{} + valuesStr string + depth int + opts *Options +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []interface{} + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []interface{}) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') + } + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + if len(f.valuesStr) > 0 { + if continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + buf.WriteByte(' ') + } + } + continuing = true + buf.WriteString(f.valuesStr) + } + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + if f.outputFormat == outputJSON { + buf.WriteByte('}') + } + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + if escapeKeys { + buf.WriteString(prettyString(k)) + } else { + // this is faster + buf.WriteByte('"') + buf.WriteString(k) + buf.WriteByte('"') + } + if f.outputFormat == outputJSON { + buf.WriteByte(':') + } else { + buf.WriteByte('=') + } + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) pretty(value interface{}) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + printComma := false // testing i>0 is not enough because of JSON omitted fields + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if printComma { + buf.WriteByte(',') + } + printComma = true // if we got here, we are rendering a field + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteByte('"') + buf.WriteString(name) + buf.WriteByte('"') + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + // If this is outputing as JSON make sure this isn't really a json.RawMessage. + // If so just emit "as-is" and don't pretty it as that will just print + // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. + if f.outputFormat == outputJSON { + if rm, ok := value.(json.RawMessage); ok { + // If it's empty make sure we emit an empty value as the array style would below. + if len(rm) > 0 { + buf.Write(rm) + } else { + buf.WriteString("null") + } + return buf.String() + } + } + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(',') + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v interface{}) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v interface{}) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []interface{}) []interface{} { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return f.prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 00000000..e027aea3 --- /dev/null +++ b/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,550 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging + +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. +// +// # Usage +// +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". +// +// With Go's standard log package, we might write: +// +// log.Printf("setting target value %s", targetValue) +// +// With logr's structured logging, we'd write: +// +// logger.Info("setting target", "value", targetValue) +// +// Errors are much the same. Instead of: +// +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// We'd write: +// +// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. +// +// # Verbosity +// +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. +// +// Where we might have written: +// +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } +// +// We can write: +// +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names +// +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: +// +// logger.WithName("compactor").Info("started", "time", time.Now()) +// +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). +// +// # Saved Values +// +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: +// +// With the standard log package, we might write: +// +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr we'd write: +// +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) +// +// # Best Practices +// +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. +// +// The log message consists of a constant message attached to the log line. +// This should generally be a simple description of what's occurring, and should +// never be a format string. Variable information can then be attached using +// named values. +// +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. +// +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// +// # Key Naming Conventions +// +// Keys are not strictly required to conform to any specification or regex, but +// it is recommended that they: +// - be human-readable and meaningful (not auto-generated or simple ordinals) +// - be constant (not dependent on input data) +// - contain only printable characters +// - not contain whitespace or punctuation +// - use lower case for simple keys and lowerCamelCase for more complex ones +// +// These guidelines help ensure that log data is processed properly regardless +// of the log implementation. For example, log implementations will try to +// output JSON data or will store data for later database (e.g. SQL) queries. +// +// While users are generally free to use key names of their choice, it's +// generally best to avoid using the following keys, as they're frequently used +// by implementations: +// - "caller": the calling information (file/line) of a particular log line +// - "error": the underlying error value in the `Error` method +// - "level": the log level +// - "logger": the name of the associated logger +// - "msg": the log message +// - "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// - "ts": the timestamp for a log line +// +// Implementations are encouraged to make use of these keys to represent the +// above concepts, when necessary (for example, in a pure-JSON output form, it +// would be necessary to represent at least message and timestamp as ordinary +// named values). +// +// # Break Glass +// +// Implementations may choose to give callers access to the underlying +// logging implementation. The recommended pattern for this is: +// +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } +// +// Logger grants access to the sink to enable type assertions like this: +// +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink().(impl.Underlier); ok { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. +package logr + +import ( + "context" +) + +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. Passing a nil sink will create +// a Logger which discards all log lines. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + if sink != nil { + sink.Init(runtimeInfo) + } + return logger +} + +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} + +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + return l.sink != nil && l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } + if l.Enabled() { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). The log message will always be emitted, regardless of +// verbosity level. +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. +func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if l.sink == nil { + return l + } + if level < 0 { + level = 0 + } + l.level += level + return l +} + +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + if l.sink == nil { + return l + } + l.setSink(l.sink.WithValues(keysAndValues...)) + return l +} + +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + if l.sink == nil { + return l + } + l.setSink(l.sink.WithName(name)) + return l +} + +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. +// +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if l.sink == nil { + return l + } + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} + +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + if l.sink == nil { + return func() {}, l + } + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} + +// IsZero returns true if this logger is an uninitialized zero value +func (l Logger) IsZero() bool { + return l.sink == nil +} + +// contextKey is how we find Loggers in a context.Context. +type contextKey struct{} + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...interface{}) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...interface{}) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a LogSink that knows how to climb the call stack +// to identify the original call site and can offset the depth by a specified +// number of frames. This is useful for users who have helper functions +// between the "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as file, +// function, or line) would otherwise log information about the intermediate +// helper functions. +// +// This is an optional interface and implementations are not required to +// support it. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. + // Successive calls to this are additive. + WithCallDepth(depth int) LogSink +} + +// CallStackHelperLogSink represents a LogSink that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. +// +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. +// +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() +} + +// Marshaler is an optional interface that logged values may choose to +// implement. Loggers with structured output, such as JSON, should +// log the object return by the MarshalLog method instead of the +// original value. +type Marshaler interface { + // MarshalLog can be used to: + // - ensure that structs are not logged as strings when the original + // value has a String method: return a different type without a + // String method + // - select which fields of a complex type should get logged: + // return a simpler struct with fewer fields + // - log unexported fields: return a different struct + // with exported fields + // + // It may return any value of any type. + MarshalLog() interface{} +} diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/go-logr/stdr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 00000000..51586678 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 00000000..93a8aab5 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/gomodule/redigo/redis/commandinfo.go b/vendor/github.com/gomodule/redigo/redis/commandinfo.go deleted file mode 100644 index b6df6a25..00000000 --- a/vendor/github.com/gomodule/redigo/redis/commandinfo.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2014 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "strings" -) - -const ( - connectionWatchState = 1 << iota - connectionMultiState - connectionSubscribeState - connectionMonitorState -) - -type commandInfo struct { - // Set or Clear these states on connection. - Set, Clear int -} - -var commandInfos = map[string]commandInfo{ - "WATCH": {Set: connectionWatchState}, - "UNWATCH": {Clear: connectionWatchState}, - "MULTI": {Set: connectionMultiState}, - "EXEC": {Clear: connectionWatchState | connectionMultiState}, - "DISCARD": {Clear: connectionWatchState | connectionMultiState}, - "PSUBSCRIBE": {Set: connectionSubscribeState}, - "SUBSCRIBE": {Set: connectionSubscribeState}, - "MONITOR": {Set: connectionMonitorState}, -} - -func init() { - for n, ci := range commandInfos { - commandInfos[strings.ToLower(n)] = ci - } -} - -func lookupCommandInfo(commandName string) commandInfo { - if ci, ok := commandInfos[commandName]; ok { - return ci - } - return commandInfos[strings.ToUpper(commandName)] -} diff --git a/vendor/github.com/gomodule/redigo/redis/conn.go b/vendor/github.com/gomodule/redigo/redis/conn.go deleted file mode 100644 index 33b43be6..00000000 --- a/vendor/github.com/gomodule/redigo/redis/conn.go +++ /dev/null @@ -1,736 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bufio" - "bytes" - "context" - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "net/url" - "regexp" - "strconv" - "sync" - "time" -) - -var ( - _ ConnWithTimeout = (*conn)(nil) -) - -// conn is the low-level implementation of Conn -type conn struct { - // Shared - mu sync.Mutex - pending int - err error - conn net.Conn - - // Read - readTimeout time.Duration - br *bufio.Reader - - // Write - writeTimeout time.Duration - bw *bufio.Writer - - // Scratch space for formatting argument length. - // '*' or '$', length, "\r\n" - lenScratch [32]byte - - // Scratch space for formatting integers and floats. - numScratch [40]byte -} - -// DialTimeout acts like Dial but takes timeouts for establishing the -// connection to the server, writing a command and reading a reply. -// -// Deprecated: Use Dial with options instead. -func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { - return Dial(network, address, - DialConnectTimeout(connectTimeout), - DialReadTimeout(readTimeout), - DialWriteTimeout(writeTimeout)) -} - -// DialOption specifies an option for dialing a Redis server. -type DialOption struct { - f func(*dialOptions) -} - -type dialOptions struct { - readTimeout time.Duration - writeTimeout time.Duration - dialer *net.Dialer - dialContext func(ctx context.Context, network, addr string) (net.Conn, error) - db int - username string - password string - clientName string - useTLS bool - skipVerify bool - tlsConfig *tls.Config -} - -// DialReadTimeout specifies the timeout for reading a single command reply. -func DialReadTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.readTimeout = d - }} -} - -// DialWriteTimeout specifies the timeout for writing a single command. -func DialWriteTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.writeTimeout = d - }} -} - -// DialConnectTimeout specifies the timeout for connecting to the Redis server when -// no DialNetDial option is specified. -func DialConnectTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.dialer.Timeout = d - }} -} - -// DialKeepAlive specifies the keep-alive period for TCP connections to the Redis server -// when no DialNetDial option is specified. -// If zero, keep-alives are not enabled. If no DialKeepAlive option is specified then -// the default of 5 minutes is used to ensure that half-closed TCP sessions are detected. -func DialKeepAlive(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.dialer.KeepAlive = d - }} -} - -// DialNetDial specifies a custom dial function for creating TCP -// connections, otherwise a net.Dialer customized via the other options is used. -// DialNetDial overrides DialConnectTimeout and DialKeepAlive. -func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { - return DialOption{func(do *dialOptions) { - do.dialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { - return dial(network, addr) - } - }} -} - -// DialContextFunc specifies a custom dial function with context for creating TCP -// connections, otherwise a net.Dialer customized via the other options is used. -// DialContextFunc overrides DialConnectTimeout and DialKeepAlive. -func DialContextFunc(f func(ctx context.Context, network, addr string) (net.Conn, error)) DialOption { - return DialOption{func(do *dialOptions) { - do.dialContext = f - }} -} - -// DialDatabase specifies the database to select when dialing a connection. -func DialDatabase(db int) DialOption { - return DialOption{func(do *dialOptions) { - do.db = db - }} -} - -// DialPassword specifies the password to use when connecting to -// the Redis server. -func DialPassword(password string) DialOption { - return DialOption{func(do *dialOptions) { - do.password = password - }} -} - -// DialUsername specifies the username to use when connecting to -// the Redis server when Redis ACLs are used. -func DialUsername(username string) DialOption { - return DialOption{func(do *dialOptions) { - do.username = username - }} -} - -// DialClientName specifies a client name to be used -// by the Redis server connection. -func DialClientName(name string) DialOption { - return DialOption{func(do *dialOptions) { - do.clientName = name - }} -} - -// DialTLSConfig specifies the config to use when a TLS connection is dialed. -// Has no effect when not dialing a TLS connection. -func DialTLSConfig(c *tls.Config) DialOption { - return DialOption{func(do *dialOptions) { - do.tlsConfig = c - }} -} - -// DialTLSSkipVerify disables server name verification when connecting over -// TLS. Has no effect when not dialing a TLS connection. -func DialTLSSkipVerify(skip bool) DialOption { - return DialOption{func(do *dialOptions) { - do.skipVerify = skip - }} -} - -// DialUseTLS specifies whether TLS should be used when connecting to the -// server. This option is ignore by DialURL. -func DialUseTLS(useTLS bool) DialOption { - return DialOption{func(do *dialOptions) { - do.useTLS = useTLS - }} -} - -// Dial connects to the Redis server at the given network and -// address using the specified options. -func Dial(network, address string, options ...DialOption) (Conn, error) { - return DialContext(context.Background(), network, address, options...) -} - -// DialContext connects to the Redis server at the given network and -// address using the specified options and context. -func DialContext(ctx context.Context, network, address string, options ...DialOption) (Conn, error) { - do := dialOptions{ - dialer: &net.Dialer{ - KeepAlive: time.Minute * 5, - }, - } - for _, option := range options { - option.f(&do) - } - if do.dialContext == nil { - do.dialContext = do.dialer.DialContext - } - - netConn, err := do.dialContext(ctx, network, address) - if err != nil { - return nil, err - } - - if do.useTLS { - var tlsConfig *tls.Config - if do.tlsConfig == nil { - tlsConfig = &tls.Config{InsecureSkipVerify: do.skipVerify} - } else { - tlsConfig = cloneTLSConfig(do.tlsConfig) - } - if tlsConfig.ServerName == "" { - host, _, err := net.SplitHostPort(address) - if err != nil { - netConn.Close() - return nil, err - } - tlsConfig.ServerName = host - } - - tlsConn := tls.Client(netConn, tlsConfig) - if err := tlsConn.Handshake(); err != nil { - netConn.Close() - return nil, err - } - netConn = tlsConn - } - - c := &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: do.readTimeout, - writeTimeout: do.writeTimeout, - } - - if do.password != "" { - authArgs := make([]interface{}, 0, 2) - if do.username != "" { - authArgs = append(authArgs, do.username) - } - authArgs = append(authArgs, do.password) - if _, err := c.Do("AUTH", authArgs...); err != nil { - netConn.Close() - return nil, err - } - } - - if do.clientName != "" { - if _, err := c.Do("CLIENT", "SETNAME", do.clientName); err != nil { - netConn.Close() - return nil, err - } - } - - if do.db != 0 { - if _, err := c.Do("SELECT", do.db); err != nil { - netConn.Close() - return nil, err - } - } - - return c, nil -} - -var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`) - -// DialURL connects to a Redis server at the given URL using the Redis -// URI scheme. URLs should follow the draft IANA specification for the -// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis). -func DialURL(rawurl string, options ...DialOption) (Conn, error) { - u, err := url.Parse(rawurl) - if err != nil { - return nil, err - } - - if u.Scheme != "redis" && u.Scheme != "rediss" { - return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) - } - - if u.Opaque != "" { - return nil, fmt.Errorf("invalid redis URL, url is opaque: %s", rawurl) - } - - // As per the IANA draft spec, the host defaults to localhost and - // the port defaults to 6379. - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // assume port is missing - host = u.Host - port = "6379" - } - if host == "" { - host = "localhost" - } - address := net.JoinHostPort(host, port) - - if u.User != nil { - password, isSet := u.User.Password() - if isSet { - options = append(options, DialUsername(u.User.Username()), DialPassword(password)) - } - } - - match := pathDBRegexp.FindStringSubmatch(u.Path) - if len(match) == 2 { - db := 0 - if len(match[1]) > 0 { - db, err = strconv.Atoi(match[1]) - if err != nil { - return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) - } - } - if db != 0 { - options = append(options, DialDatabase(db)) - } - } else if u.Path != "" { - return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) - } - - options = append(options, DialUseTLS(u.Scheme == "rediss")) - - return Dial("tcp", address, options...) -} - -// NewConn returns a new Redigo connection for the given net connection. -func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { - return &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: readTimeout, - writeTimeout: writeTimeout, - } -} - -func (c *conn) Close() error { - c.mu.Lock() - err := c.err - if c.err == nil { - c.err = errors.New("redigo: closed") - err = c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) fatal(err error) error { - c.mu.Lock() - if c.err == nil { - c.err = err - // Close connection to force errors on subsequent calls and to unblock - // other reader or writer. - c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) Err() error { - c.mu.Lock() - err := c.err - c.mu.Unlock() - return err -} - -func (c *conn) writeLen(prefix byte, n int) error { - c.lenScratch[len(c.lenScratch)-1] = '\n' - c.lenScratch[len(c.lenScratch)-2] = '\r' - i := len(c.lenScratch) - 3 - for { - c.lenScratch[i] = byte('0' + n%10) - i -= 1 - n = n / 10 - if n == 0 { - break - } - } - c.lenScratch[i] = prefix - _, err := c.bw.Write(c.lenScratch[i:]) - return err -} - -func (c *conn) writeString(s string) error { - c.writeLen('$', len(s)) - c.bw.WriteString(s) - _, err := c.bw.WriteString("\r\n") - return err -} - -func (c *conn) writeBytes(p []byte) error { - c.writeLen('$', len(p)) - c.bw.Write(p) - _, err := c.bw.WriteString("\r\n") - return err -} - -func (c *conn) writeInt64(n int64) error { - return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) -} - -func (c *conn) writeFloat64(n float64) error { - return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) -} - -func (c *conn) writeCommand(cmd string, args []interface{}) error { - c.writeLen('*', 1+len(args)) - if err := c.writeString(cmd); err != nil { - return err - } - for _, arg := range args { - if err := c.writeArg(arg, true); err != nil { - return err - } - } - return nil -} - -func (c *conn) writeArg(arg interface{}, argumentTypeOK bool) (err error) { - switch arg := arg.(type) { - case string: - return c.writeString(arg) - case []byte: - return c.writeBytes(arg) - case int: - return c.writeInt64(int64(arg)) - case int64: - return c.writeInt64(arg) - case float64: - return c.writeFloat64(arg) - case bool: - if arg { - return c.writeString("1") - } else { - return c.writeString("0") - } - case nil: - return c.writeString("") - case Argument: - if argumentTypeOK { - return c.writeArg(arg.RedisArg(), false) - } - // See comment in default clause below. - var buf bytes.Buffer - fmt.Fprint(&buf, arg) - return c.writeBytes(buf.Bytes()) - default: - // This default clause is intended to handle builtin numeric types. - // The function should return an error for other types, but this is not - // done for compatibility with previous versions of the package. - var buf bytes.Buffer - fmt.Fprint(&buf, arg) - return c.writeBytes(buf.Bytes()) - } -} - -type protocolError string - -func (pe protocolError) Error() string { - return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) -} - -// readLine reads a line of input from the RESP stream. -func (c *conn) readLine() ([]byte, error) { - // To avoid allocations, attempt to read the line using ReadSlice. This - // call typically succeeds. The known case where the call fails is when - // reading the output from the MONITOR command. - p, err := c.br.ReadSlice('\n') - if err == bufio.ErrBufferFull { - // The line does not fit in the bufio.Reader's buffer. Fall back to - // allocating a buffer for the line. - buf := append([]byte{}, p...) - for err == bufio.ErrBufferFull { - p, err = c.br.ReadSlice('\n') - buf = append(buf, p...) - } - p = buf - } - if err != nil { - return nil, err - } - i := len(p) - 2 - if i < 0 || p[i] != '\r' { - return nil, protocolError("bad response line terminator") - } - return p[:i], nil -} - -// parseLen parses bulk string and array lengths. -func parseLen(p []byte) (int, error) { - if len(p) == 0 { - return -1, protocolError("malformed length") - } - - if p[0] == '-' && len(p) == 2 && p[1] == '1' { - // handle $-1 and $-1 null replies. - return -1, nil - } - - var n int - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return -1, protocolError("illegal bytes in length") - } - n += int(b - '0') - } - - return n, nil -} - -// parseInt parses an integer reply. -func parseInt(p []byte) (interface{}, error) { - if len(p) == 0 { - return 0, protocolError("malformed integer") - } - - var negate bool - if p[0] == '-' { - negate = true - p = p[1:] - if len(p) == 0 { - return 0, protocolError("malformed integer") - } - } - - var n int64 - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return 0, protocolError("illegal bytes in length") - } - n += int64(b - '0') - } - - if negate { - n = -n - } - return n, nil -} - -var ( - okReply interface{} = "OK" - pongReply interface{} = "PONG" -) - -func (c *conn) readReply() (interface{}, error) { - line, err := c.readLine() - if err != nil { - return nil, err - } - if len(line) == 0 { - return nil, protocolError("short response line") - } - switch line[0] { - case '+': - switch string(line[1:]) { - case "OK": - // Avoid allocation for frequent "+OK" response. - return okReply, nil - case "PONG": - // Avoid allocation in PING command benchmarks :) - return pongReply, nil - default: - return string(line[1:]), nil - } - case '-': - return Error(string(line[1:])), nil - case ':': - return parseInt(line[1:]) - case '$': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - p := make([]byte, n) - _, err = io.ReadFull(c.br, p) - if err != nil { - return nil, err - } - if line, err := c.readLine(); err != nil { - return nil, err - } else if len(line) != 0 { - return nil, protocolError("bad bulk string format") - } - return p, nil - case '*': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - r := make([]interface{}, n) - for i := range r { - r[i], err = c.readReply() - if err != nil { - return nil, err - } - } - return r, nil - } - return nil, protocolError("unexpected response line") -} - -func (c *conn) Send(cmd string, args ...interface{}) error { - c.mu.Lock() - c.pending += 1 - c.mu.Unlock() - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - if err := c.writeCommand(cmd, args); err != nil { - return c.fatal(err) - } - return nil -} - -func (c *conn) Flush() error { - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - if err := c.bw.Flush(); err != nil { - return c.fatal(err) - } - return nil -} - -func (c *conn) Receive() (interface{}, error) { - return c.ReceiveWithTimeout(c.readTimeout) -} - -func (c *conn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { - var deadline time.Time - if timeout != 0 { - deadline = time.Now().Add(timeout) - } - c.conn.SetReadDeadline(deadline) - - if reply, err = c.readReply(); err != nil { - return nil, c.fatal(err) - } - // When using pub/sub, the number of receives can be greater than the - // number of sends. To enable normal use of the connection after - // unsubscribing from all channels, we do not decrement pending to a - // negative value. - // - // The pending field is decremented after the reply is read to handle the - // case where Receive is called before Send. - c.mu.Lock() - if c.pending > 0 { - c.pending -= 1 - } - c.mu.Unlock() - if err, ok := reply.(Error); ok { - return nil, err - } - return -} - -func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { - return c.DoWithTimeout(c.readTimeout, cmd, args...) -} - -func (c *conn) DoWithTimeout(readTimeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { - c.mu.Lock() - pending := c.pending - c.pending = 0 - c.mu.Unlock() - - if cmd == "" && pending == 0 { - return nil, nil - } - - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - - if cmd != "" { - if err := c.writeCommand(cmd, args); err != nil { - return nil, c.fatal(err) - } - } - - if err := c.bw.Flush(); err != nil { - return nil, c.fatal(err) - } - - var deadline time.Time - if readTimeout != 0 { - deadline = time.Now().Add(readTimeout) - } - c.conn.SetReadDeadline(deadline) - - if cmd == "" { - reply := make([]interface{}, pending) - for i := range reply { - r, e := c.readReply() - if e != nil { - return nil, c.fatal(e) - } - reply[i] = r - } - return reply, nil - } - - var err error - var reply interface{} - for i := 0; i <= pending; i++ { - var e error - if reply, e = c.readReply(); e != nil { - return nil, c.fatal(e) - } - if e, ok := reply.(Error); ok && err == nil { - err = e - } - } - return reply, err -} diff --git a/vendor/github.com/gomodule/redigo/redis/doc.go b/vendor/github.com/gomodule/redigo/redis/doc.go deleted file mode 100644 index 69ad506c..00000000 --- a/vendor/github.com/gomodule/redigo/redis/doc.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package redis is a client for the Redis database. -// -// The Redigo FAQ (https://github.com/gomodule/redigo/wiki/FAQ) contains more -// documentation about this package. -// -// Connections -// -// The Conn interface is the primary interface for working with Redis. -// Applications create connections by calling the Dial, DialWithTimeout or -// NewConn functions. In the future, functions will be added for creating -// sharded and other types of connections. -// -// The application must call the connection Close method when the application -// is done with the connection. -// -// Executing Commands -// -// The Conn interface has a generic method for executing Redis commands: -// -// Do(commandName string, args ...interface{}) (reply interface{}, err error) -// -// The Redis command reference (http://redis.io/commands) lists the available -// commands. An example of using the Redis APPEND command is: -// -// n, err := conn.Do("APPEND", "key", "value") -// -// The Do method converts command arguments to bulk strings for transmission -// to the server as follows: -// -// Go Type Conversion -// []byte Sent as is -// string Sent as is -// int, int64 strconv.FormatInt(v) -// float64 strconv.FormatFloat(v, 'g', -1, 64) -// bool true -> "1", false -> "0" -// nil "" -// all other types fmt.Fprint(w, v) -// -// Redis command reply types are represented using the following Go types: -// -// Redis type Go type -// error redis.Error -// integer int64 -// simple string string -// bulk string []byte or nil if value not present. -// array []interface{} or nil if value not present. -// -// Use type assertions or the reply helper functions to convert from -// interface{} to the specific Go type for the command result. -// -// Pipelining -// -// Connections support pipelining using the Send, Flush and Receive methods. -// -// Send(commandName string, args ...interface{}) error -// Flush() error -// Receive() (reply interface{}, err error) -// -// Send writes the command to the connection's output buffer. Flush flushes the -// connection's output buffer to the server. Receive reads a single reply from -// the server. The following example shows a simple pipeline. -// -// c.Send("SET", "foo", "bar") -// c.Send("GET", "foo") -// c.Flush() -// c.Receive() // reply from SET -// v, err = c.Receive() // reply from GET -// -// The Do method combines the functionality of the Send, Flush and Receive -// methods. The Do method starts by writing the command and flushing the output -// buffer. Next, the Do method receives all pending replies including the reply -// for the command just sent by Do. If any of the received replies is an error, -// then Do returns the error. If there are no errors, then Do returns the last -// reply. If the command argument to the Do method is "", then the Do method -// will flush the output buffer and receive pending replies without sending a -// command. -// -// Use the Send and Do methods to implement pipelined transactions. -// -// c.Send("MULTI") -// c.Send("INCR", "foo") -// c.Send("INCR", "bar") -// r, err := c.Do("EXEC") -// fmt.Println(r) // prints [1, 1] -// -// Concurrency -// -// Connections support one concurrent caller to the Receive method and one -// concurrent caller to the Send and Flush methods. No other concurrency is -// supported including concurrent calls to the Do and Close methods. -// -// For full concurrent access to Redis, use the thread-safe Pool to get, use -// and release a connection from within a goroutine. Connections returned from -// a Pool have the concurrency restrictions described in the previous -// paragraph. -// -// Publish and Subscribe -// -// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. -// -// c.Send("SUBSCRIBE", "example") -// c.Flush() -// for { -// reply, err := c.Receive() -// if err != nil { -// return err -// } -// // process pushed message -// } -// -// The PubSubConn type wraps a Conn with convenience methods for implementing -// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods -// send and flush a subscription management command. The receive method -// converts a pushed message to convenient types for use in a type switch. -// -// psc := redis.PubSubConn{Conn: c} -// psc.Subscribe("example") -// for { -// switch v := psc.Receive().(type) { -// case redis.Message: -// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) -// case redis.Subscription: -// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) -// case error: -// return v -// } -// } -// -// Reply Helpers -// -// The Bool, Int, Bytes, String, Strings and Values functions convert a reply -// to a value of a specific type. To allow convenient wrapping of calls to the -// connection Do and Receive methods, the functions take a second argument of -// type error. If the error is non-nil, then the helper function returns the -// error. If the error is nil, the function converts the reply to the specified -// type: -// -// exists, err := redis.Bool(c.Do("EXISTS", "foo")) -// if err != nil { -// // handle error return from c.Do or type conversion error. -// } -// -// The Scan function converts elements of a array reply to Go types: -// -// var value1 int -// var value2 string -// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) -// if err != nil { -// // handle error -// } -// if _, err := redis.Scan(reply, &value1, &value2); err != nil { -// // handle error -// } -// -// Errors -// -// Connection methods return error replies from the server as type redis.Error. -// -// Call the connection Err() method to determine if the connection encountered -// non-recoverable error such as a network error or protocol parsing error. If -// Err() returns a non-nil value, then the connection is not usable and should -// be closed. -package redis diff --git a/vendor/github.com/gomodule/redigo/redis/go17.go b/vendor/github.com/gomodule/redigo/redis/go17.go deleted file mode 100644 index 5f363791..00000000 --- a/vendor/github.com/gomodule/redigo/redis/go17.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build go1.7,!go1.8 - -package redis - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, - Renegotiation: cfg.Renegotiation, - } -} diff --git a/vendor/github.com/gomodule/redigo/redis/go18.go b/vendor/github.com/gomodule/redigo/redis/go18.go deleted file mode 100644 index 558363be..00000000 --- a/vendor/github.com/gomodule/redigo/redis/go18.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.8 - -package redis - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - return cfg.Clone() -} diff --git a/vendor/github.com/gomodule/redigo/redis/log.go b/vendor/github.com/gomodule/redigo/redis/log.go deleted file mode 100644 index a06db9d6..00000000 --- a/vendor/github.com/gomodule/redigo/redis/log.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "fmt" - "log" - "time" -) - -var ( - _ ConnWithTimeout = (*loggingConn)(nil) -) - -// NewLoggingConn returns a logging wrapper around a connection. -func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { - if prefix != "" { - prefix = prefix + "." - } - return &loggingConn{conn, logger, prefix, nil} -} - -//NewLoggingConnFilter returns a logging wrapper around a connection and a filter function. -func NewLoggingConnFilter(conn Conn, logger *log.Logger, prefix string, skip func(cmdName string) bool) Conn { - if prefix != "" { - prefix = prefix + "." - } - return &loggingConn{conn, logger, prefix, skip} -} - -type loggingConn struct { - Conn - logger *log.Logger - prefix string - skip func(cmdName string) bool -} - -func (c *loggingConn) Close() error { - err := c.Conn.Close() - var buf bytes.Buffer - fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) - c.logger.Output(2, buf.String()) - return err -} - -func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { - const chop = 32 - switch v := v.(type) { - case []byte: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case string: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case []interface{}: - if len(v) == 0 { - buf.WriteString("[]") - } else { - sep := "[" - fin := "]" - if len(v) > chop { - v = v[:chop] - fin = "...]" - } - for _, vv := range v { - buf.WriteString(sep) - c.printValue(buf, vv) - sep = ", " - } - buf.WriteString(fin) - } - default: - fmt.Fprint(buf, v) - } -} - -func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { - if c.skip != nil && c.skip(commandName) { - return - } - var buf bytes.Buffer - fmt.Fprintf(&buf, "%s%s(", c.prefix, method) - if method != "Receive" { - buf.WriteString(commandName) - for _, arg := range args { - buf.WriteString(", ") - c.printValue(&buf, arg) - } - } - buf.WriteString(") -> (") - if method != "Send" { - c.printValue(&buf, reply) - buf.WriteString(", ") - } - fmt.Fprintf(&buf, "%v)", err) - c.logger.Output(3, buf.String()) -} - -func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { - reply, err := c.Conn.Do(commandName, args...) - c.print("Do", commandName, args, reply, err) - return reply, err -} - -func (c *loggingConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (interface{}, error) { - reply, err := DoWithTimeout(c.Conn, timeout, commandName, args...) - c.print("DoWithTimeout", commandName, args, reply, err) - return reply, err -} - -func (c *loggingConn) Send(commandName string, args ...interface{}) error { - err := c.Conn.Send(commandName, args...) - c.print("Send", commandName, args, nil, err) - return err -} - -func (c *loggingConn) Receive() (interface{}, error) { - reply, err := c.Conn.Receive() - c.print("Receive", "", nil, reply, err) - return reply, err -} - -func (c *loggingConn) ReceiveWithTimeout(timeout time.Duration) (interface{}, error) { - reply, err := ReceiveWithTimeout(c.Conn, timeout) - c.print("ReceiveWithTimeout", "", nil, reply, err) - return reply, err -} diff --git a/vendor/github.com/gomodule/redigo/redis/pool.go b/vendor/github.com/gomodule/redigo/redis/pool.go deleted file mode 100644 index fef6bae5..00000000 --- a/vendor/github.com/gomodule/redigo/redis/pool.go +++ /dev/null @@ -1,635 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/sha1" - "errors" - "io" - "strconv" - "sync" - "sync/atomic" - "time" -) - -var ( - _ ConnWithTimeout = (*activeConn)(nil) - _ ConnWithTimeout = (*errorConn)(nil) -) - -var nowFunc = time.Now // for testing - -// ErrPoolExhausted is returned from a pool connection method (Do, Send, -// Receive, Flush, Err) when the maximum number of database connections in the -// pool has been reached. -var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") - -var ( - errPoolClosed = errors.New("redigo: connection pool closed") - errConnClosed = errors.New("redigo: connection closed") -) - -// Pool maintains a pool of connections. The application calls the Get method -// to get a connection from the pool and the connection's Close method to -// return the connection's resources to the pool. -// -// The following example shows how to use a pool in a web application. The -// application creates a pool at application startup and makes it available to -// request handlers using a package level variable. The pool configuration used -// here is an example, not a recommendation. -// -// func newPool(addr string) *redis.Pool { -// return &redis.Pool{ -// MaxIdle: 3, -// IdleTimeout: 240 * time.Second, -// // Dial or DialContext must be set. When both are set, DialContext takes precedence over Dial. -// Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) }, -// } -// } -// -// var ( -// pool *redis.Pool -// redisServer = flag.String("redisServer", ":6379", "") -// ) -// -// func main() { -// flag.Parse() -// pool = newPool(*redisServer) -// ... -// } -// -// A request handler gets a connection from the pool and closes the connection -// when the handler is done: -// -// func serveHome(w http.ResponseWriter, r *http.Request) { -// conn := pool.Get() -// defer conn.Close() -// ... -// } -// -// Use the Dial function to authenticate connections with the AUTH command or -// select a database with the SELECT command: -// -// pool := &redis.Pool{ -// // Other pool configuration not shown in this example. -// Dial: func () (redis.Conn, error) { -// c, err := redis.Dial("tcp", server) -// if err != nil { -// return nil, err -// } -// if _, err := c.Do("AUTH", password); err != nil { -// c.Close() -// return nil, err -// } -// if _, err := c.Do("SELECT", db); err != nil { -// c.Close() -// return nil, err -// } -// return c, nil -// }, -// } -// -// Use the TestOnBorrow function to check the health of an idle connection -// before the connection is returned to the application. This example PINGs -// connections that have been idle more than a minute: -// -// pool := &redis.Pool{ -// // Other pool configuration not shown in this example. -// TestOnBorrow: func(c redis.Conn, t time.Time) error { -// if time.Since(t) < time.Minute { -// return nil -// } -// _, err := c.Do("PING") -// return err -// }, -// } -// -type Pool struct { - // Dial is an application supplied function for creating and configuring a - // connection. - // - // The connection returned from Dial must not be in a special state - // (subscribed to pubsub channel, transaction started, ...). - Dial func() (Conn, error) - - // DialContext is an application supplied function for creating and configuring a - // connection with the given context. - // - // The connection returned from Dial must not be in a special state - // (subscribed to pubsub channel, transaction started, ...). - DialContext func(ctx context.Context) (Conn, error) - - // TestOnBorrow is an optional application supplied function for checking - // the health of an idle connection before the connection is used again by - // the application. Argument t is the time that the connection was returned - // to the pool. If the function returns an error, then the connection is - // closed. - TestOnBorrow func(c Conn, t time.Time) error - - // Maximum number of idle connections in the pool. - MaxIdle int - - // Maximum number of connections allocated by the pool at a given time. - // When zero, there is no limit on the number of connections in the pool. - MaxActive int - - // Close connections after remaining idle for this duration. If the value - // is zero, then idle connections are not closed. Applications should set - // the timeout to a value less than the server's timeout. - IdleTimeout time.Duration - - // If Wait is true and the pool is at the MaxActive limit, then Get() waits - // for a connection to be returned to the pool before returning. - Wait bool - - // Close connections older than this duration. If the value is zero, then - // the pool does not close connections based on age. - MaxConnLifetime time.Duration - - chInitialized uint32 // set to 1 when field ch is initialized - - mu sync.Mutex // mu protects the following fields - closed bool // set to true when the pool is closed. - active int // the number of open connections in the pool - ch chan struct{} // limits open connections when p.Wait is true - idle idleList // idle connections - waitCount int64 // total number of connections waited for. - waitDuration time.Duration // total time waited for new connections. -} - -// NewPool creates a new pool. -// -// Deprecated: Initialize the Pool directly as shown in the example. -func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { - return &Pool{Dial: newFn, MaxIdle: maxIdle} -} - -// Get gets a connection. The application must close the returned connection. -// This method always returns a valid connection so that applications can defer -// error handling to the first use of the connection. If there is an error -// getting an underlying connection, then the connection Err, Do, Send, Flush -// and Receive methods return that error. -func (p *Pool) Get() Conn { - // GetContext returns errorConn in the first argument when an error occurs. - c, _ := p.GetContext(context.Background()) - return c -} - -// GetContext gets a connection using the provided context. -// -// The provided Context must be non-nil. If the context expires before the -// connection is complete, an error is returned. Any expiration on the context -// will not affect the returned connection. -// -// If the function completes without error, then the application must close the -// returned connection. -func (p *Pool) GetContext(ctx context.Context) (Conn, error) { - // Wait until there is a vacant connection in the pool. - waited, err := p.waitVacantConn(ctx) - if err != nil { - return nil, err - } - - p.mu.Lock() - - if waited > 0 { - p.waitCount++ - p.waitDuration += waited - } - - // Prune stale connections at the back of the idle list. - if p.IdleTimeout > 0 { - n := p.idle.count - for i := 0; i < n && p.idle.back != nil && p.idle.back.t.Add(p.IdleTimeout).Before(nowFunc()); i++ { - pc := p.idle.back - p.idle.popBack() - p.mu.Unlock() - pc.c.Close() - p.mu.Lock() - p.active-- - } - } - - // Get idle connection from the front of idle list. - for p.idle.front != nil { - pc := p.idle.front - p.idle.popFront() - p.mu.Unlock() - if (p.TestOnBorrow == nil || p.TestOnBorrow(pc.c, pc.t) == nil) && - (p.MaxConnLifetime == 0 || nowFunc().Sub(pc.created) < p.MaxConnLifetime) { - return &activeConn{p: p, pc: pc}, nil - } - pc.c.Close() - p.mu.Lock() - p.active-- - } - - // Check for pool closed before dialing a new connection. - if p.closed { - p.mu.Unlock() - err := errors.New("redigo: get on closed pool") - return errorConn{err}, err - } - - // Handle limit for p.Wait == false. - if !p.Wait && p.MaxActive > 0 && p.active >= p.MaxActive { - p.mu.Unlock() - return errorConn{ErrPoolExhausted}, ErrPoolExhausted - } - - p.active++ - p.mu.Unlock() - c, err := p.dial(ctx) - if err != nil { - c = nil - p.mu.Lock() - p.active-- - if p.ch != nil && !p.closed { - p.ch <- struct{}{} - } - p.mu.Unlock() - return errorConn{err}, err - } - return &activeConn{p: p, pc: &poolConn{c: c, created: nowFunc()}}, nil -} - -// PoolStats contains pool statistics. -type PoolStats struct { - // ActiveCount is the number of connections in the pool. The count includes - // idle connections and connections in use. - ActiveCount int - // IdleCount is the number of idle connections in the pool. - IdleCount int - - // WaitCount is the total number of connections waited for. - // This value is currently not guaranteed to be 100% accurate. - WaitCount int64 - - // WaitDuration is the total time blocked waiting for a new connection. - // This value is currently not guaranteed to be 100% accurate. - WaitDuration time.Duration -} - -// Stats returns pool's statistics. -func (p *Pool) Stats() PoolStats { - p.mu.Lock() - stats := PoolStats{ - ActiveCount: p.active, - IdleCount: p.idle.count, - WaitCount: p.waitCount, - WaitDuration: p.waitDuration, - } - p.mu.Unlock() - - return stats -} - -// ActiveCount returns the number of connections in the pool. The count -// includes idle connections and connections in use. -func (p *Pool) ActiveCount() int { - p.mu.Lock() - active := p.active - p.mu.Unlock() - return active -} - -// IdleCount returns the number of idle connections in the pool. -func (p *Pool) IdleCount() int { - p.mu.Lock() - idle := p.idle.count - p.mu.Unlock() - return idle -} - -// Close releases the resources used by the pool. -func (p *Pool) Close() error { - p.mu.Lock() - if p.closed { - p.mu.Unlock() - return nil - } - p.closed = true - p.active -= p.idle.count - pc := p.idle.front - p.idle.count = 0 - p.idle.front, p.idle.back = nil, nil - if p.ch != nil { - close(p.ch) - } - p.mu.Unlock() - for ; pc != nil; pc = pc.next { - pc.c.Close() - } - return nil -} - -func (p *Pool) lazyInit() { - // Fast path. - if atomic.LoadUint32(&p.chInitialized) == 1 { - return - } - // Slow path. - p.mu.Lock() - if p.chInitialized == 0 { - p.ch = make(chan struct{}, p.MaxActive) - if p.closed { - close(p.ch) - } else { - for i := 0; i < p.MaxActive; i++ { - p.ch <- struct{}{} - } - } - atomic.StoreUint32(&p.chInitialized, 1) - } - p.mu.Unlock() -} - -// waitVacantConn waits for a vacant connection in pool if waiting -// is enabled and pool size is limited, otherwise returns instantly. -// If ctx expires before that, an error is returned. -// -// If there were no vacant connection in the pool right away it returns the time spent waiting -// for that connection to appear in the pool. -func (p *Pool) waitVacantConn(ctx context.Context) (waited time.Duration, err error) { - if !p.Wait || p.MaxActive <= 0 { - // No wait or no connection limit. - return 0, nil - } - - p.lazyInit() - - // wait indicates if we believe it will block so its not 100% accurate - // however for stats it should be good enough. - wait := len(p.ch) == 0 - var start time.Time - if wait { - start = time.Now() - } - - if ctx == nil { - <-p.ch - } else { - select { - case <-p.ch: - // Additionally check that context hasn't expired while we were waiting, - // because `select` picks a random `case` if several of them are "ready". - select { - case <-ctx.Done(): - return 0, ctx.Err() - default: - } - case <-ctx.Done(): - return 0, ctx.Err() - } - } - - if wait { - return time.Since(start), nil - } - return 0, nil -} - -func (p *Pool) dial(ctx context.Context) (Conn, error) { - if p.DialContext != nil { - return p.DialContext(ctx) - } - if p.Dial != nil { - return p.Dial() - } - return nil, errors.New("redigo: must pass Dial or DialContext to pool") -} - -func (p *Pool) put(pc *poolConn, forceClose bool) error { - p.mu.Lock() - if !p.closed && !forceClose { - pc.t = nowFunc() - p.idle.pushFront(pc) - if p.idle.count > p.MaxIdle { - pc = p.idle.back - p.idle.popBack() - } else { - pc = nil - } - } - - if pc != nil { - p.mu.Unlock() - pc.c.Close() - p.mu.Lock() - p.active-- - } - - if p.ch != nil && !p.closed { - p.ch <- struct{}{} - } - p.mu.Unlock() - return nil -} - -type activeConn struct { - p *Pool - pc *poolConn - state int -} - -var ( - sentinel []byte - sentinelOnce sync.Once -) - -func initSentinel() { - p := make([]byte, 64) - if _, err := rand.Read(p); err == nil { - sentinel = p - } else { - h := sha1.New() - io.WriteString(h, "Oops, rand failed. Use time instead.") - io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) - sentinel = h.Sum(nil) - } -} - -func (ac *activeConn) Close() error { - pc := ac.pc - if pc == nil { - return nil - } - ac.pc = nil - - if ac.state&connectionMultiState != 0 { - pc.c.Send("DISCARD") - ac.state &^= (connectionMultiState | connectionWatchState) - } else if ac.state&connectionWatchState != 0 { - pc.c.Send("UNWATCH") - ac.state &^= connectionWatchState - } - if ac.state&connectionSubscribeState != 0 { - pc.c.Send("UNSUBSCRIBE") - pc.c.Send("PUNSUBSCRIBE") - // To detect the end of the message stream, ask the server to echo - // a sentinel value and read until we see that value. - sentinelOnce.Do(initSentinel) - pc.c.Send("ECHO", sentinel) - pc.c.Flush() - for { - p, err := pc.c.Receive() - if err != nil { - break - } - if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { - ac.state &^= connectionSubscribeState - break - } - } - } - pc.c.Do("") - ac.p.put(pc, ac.state != 0 || pc.c.Err() != nil) - return nil -} - -func (ac *activeConn) Err() error { - pc := ac.pc - if pc == nil { - return errConnClosed - } - return pc.c.Err() -} - -func (ac *activeConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - pc := ac.pc - if pc == nil { - return nil, errConnClosed - } - ci := lookupCommandInfo(commandName) - ac.state = (ac.state | ci.Set) &^ ci.Clear - return pc.c.Do(commandName, args...) -} - -func (ac *activeConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) { - pc := ac.pc - if pc == nil { - return nil, errConnClosed - } - cwt, ok := pc.c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - ci := lookupCommandInfo(commandName) - ac.state = (ac.state | ci.Set) &^ ci.Clear - return cwt.DoWithTimeout(timeout, commandName, args...) -} - -func (ac *activeConn) Send(commandName string, args ...interface{}) error { - pc := ac.pc - if pc == nil { - return errConnClosed - } - ci := lookupCommandInfo(commandName) - ac.state = (ac.state | ci.Set) &^ ci.Clear - return pc.c.Send(commandName, args...) -} - -func (ac *activeConn) Flush() error { - pc := ac.pc - if pc == nil { - return errConnClosed - } - return pc.c.Flush() -} - -func (ac *activeConn) Receive() (reply interface{}, err error) { - pc := ac.pc - if pc == nil { - return nil, errConnClosed - } - return pc.c.Receive() -} - -func (ac *activeConn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { - pc := ac.pc - if pc == nil { - return nil, errConnClosed - } - cwt, ok := pc.c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - return cwt.ReceiveWithTimeout(timeout) -} - -type errorConn struct{ err error } - -func (ec errorConn) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } -func (ec errorConn) DoWithTimeout(time.Duration, string, ...interface{}) (interface{}, error) { - return nil, ec.err -} -func (ec errorConn) Send(string, ...interface{}) error { return ec.err } -func (ec errorConn) Err() error { return ec.err } -func (ec errorConn) Close() error { return nil } -func (ec errorConn) Flush() error { return ec.err } -func (ec errorConn) Receive() (interface{}, error) { return nil, ec.err } -func (ec errorConn) ReceiveWithTimeout(time.Duration) (interface{}, error) { return nil, ec.err } - -type idleList struct { - count int - front, back *poolConn -} - -type poolConn struct { - c Conn - t time.Time - created time.Time - next, prev *poolConn -} - -func (l *idleList) pushFront(pc *poolConn) { - pc.next = l.front - pc.prev = nil - if l.count == 0 { - l.back = pc - } else { - l.front.prev = pc - } - l.front = pc - l.count++ - return -} - -func (l *idleList) popFront() { - pc := l.front - l.count-- - if l.count == 0 { - l.front, l.back = nil, nil - } else { - pc.next.prev = nil - l.front = pc.next - } - pc.next, pc.prev = nil, nil -} - -func (l *idleList) popBack() { - pc := l.back - l.count-- - if l.count == 0 { - l.front, l.back = nil, nil - } else { - pc.prev.next = nil - l.back = pc.prev - } - pc.next, pc.prev = nil, nil -} diff --git a/vendor/github.com/gomodule/redigo/redis/pubsub.go b/vendor/github.com/gomodule/redigo/redis/pubsub.go deleted file mode 100644 index 2da60211..00000000 --- a/vendor/github.com/gomodule/redigo/redis/pubsub.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "time" -) - -// Subscription represents a subscribe or unsubscribe notification. -type Subscription struct { - // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" - Kind string - - // The channel that was changed. - Channel string - - // The current number of subscriptions for connection. - Count int -} - -// Message represents a message notification. -type Message struct { - // The originating channel. - Channel string - - // The matched pattern, if any - Pattern string - - // The message data. - Data []byte -} - -// Pong represents a pubsub pong notification. -type Pong struct { - Data string -} - -// PubSubConn wraps a Conn with convenience methods for subscribers. -type PubSubConn struct { - Conn Conn -} - -// Close closes the connection. -func (c PubSubConn) Close() error { - return c.Conn.Close() -} - -// Subscribe subscribes the connection to the specified channels. -func (c PubSubConn) Subscribe(channel ...interface{}) error { - c.Conn.Send("SUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PSubscribe subscribes the connection to the given patterns. -func (c PubSubConn) PSubscribe(channel ...interface{}) error { - c.Conn.Send("PSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Unsubscribe unsubscribes the connection from the given channels, or from all -// of them if none is given. -func (c PubSubConn) Unsubscribe(channel ...interface{}) error { - c.Conn.Send("UNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PUnsubscribe unsubscribes the connection from the given patterns, or from all -// of them if none is given. -func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { - c.Conn.Send("PUNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Ping sends a PING to the server with the specified data. -// -// The connection must be subscribed to at least one channel or pattern when -// calling this method. -func (c PubSubConn) Ping(data string) error { - c.Conn.Send("PING", data) - return c.Conn.Flush() -} - -// Receive returns a pushed message as a Subscription, Message, Pong or error. -// The return value is intended to be used directly in a type switch as -// illustrated in the PubSubConn example. -func (c PubSubConn) Receive() interface{} { - return c.receiveInternal(c.Conn.Receive()) -} - -// ReceiveWithTimeout is like Receive, but it allows the application to -// override the connection's default timeout. -func (c PubSubConn) ReceiveWithTimeout(timeout time.Duration) interface{} { - return c.receiveInternal(ReceiveWithTimeout(c.Conn, timeout)) -} - -func (c PubSubConn) receiveInternal(replyArg interface{}, errArg error) interface{} { - reply, err := Values(replyArg, errArg) - if err != nil { - return err - } - - var kind string - reply, err = Scan(reply, &kind) - if err != nil { - return err - } - - switch kind { - case "message": - var m Message - if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { - return err - } - return m - case "pmessage": - var m Message - if _, err := Scan(reply, &m.Pattern, &m.Channel, &m.Data); err != nil { - return err - } - return m - case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": - s := Subscription{Kind: kind} - if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { - return err - } - return s - case "pong": - var p Pong - if _, err := Scan(reply, &p.Data); err != nil { - return err - } - return p - } - return errors.New("redigo: unknown pubsub notification") -} diff --git a/vendor/github.com/gomodule/redigo/redis/redis.go b/vendor/github.com/gomodule/redigo/redis/redis.go deleted file mode 100644 index e4464874..00000000 --- a/vendor/github.com/gomodule/redigo/redis/redis.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "time" -) - -// Error represents an error returned in a command reply. -type Error string - -func (err Error) Error() string { return string(err) } - -// Conn represents a connection to a Redis server. -type Conn interface { - // Close closes the connection. - Close() error - - // Err returns a non-nil value when the connection is not usable. - Err() error - - // Do sends a command to the server and returns the received reply. - Do(commandName string, args ...interface{}) (reply interface{}, err error) - - // Send writes the command to the client's output buffer. - Send(commandName string, args ...interface{}) error - - // Flush flushes the output buffer to the Redis server. - Flush() error - - // Receive receives a single reply from the Redis server - Receive() (reply interface{}, err error) -} - -// Argument is the interface implemented by an object which wants to control how -// the object is converted to Redis bulk strings. -type Argument interface { - // RedisArg returns a value to be encoded as a bulk string per the - // conversions listed in the section 'Executing Commands'. - // Implementations should typically return a []byte or string. - RedisArg() interface{} -} - -// Scanner is implemented by an object which wants to control its value is -// interpreted when read from Redis. -type Scanner interface { - // RedisScan assigns a value from a Redis value. The argument src is one of - // the reply types listed in the section `Executing Commands`. - // - // An error should be returned if the value cannot be stored without - // loss of information. - RedisScan(src interface{}) error -} - -// ConnWithTimeout is an optional interface that allows the caller to override -// a connection's default read timeout. This interface is useful for executing -// the BLPOP, BRPOP, BRPOPLPUSH, XREAD and other commands that block at the -// server. -// -// A connection's default read timeout is set with the DialReadTimeout dial -// option. Applications should rely on the default timeout for commands that do -// not block at the server. -// -// All of the Conn implementations in this package satisfy the ConnWithTimeout -// interface. -// -// Use the DoWithTimeout and ReceiveWithTimeout helper functions to simplify -// use of this interface. -type ConnWithTimeout interface { - Conn - - // Do sends a command to the server and returns the received reply. - // The timeout overrides the read timeout set when dialing the - // connection. - DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) - - // Receive receives a single reply from the Redis server. The timeout - // overrides the read timeout set when dialing the connection. - ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) -} - -var errTimeoutNotSupported = errors.New("redis: connection does not support ConnWithTimeout") - -// DoWithTimeout executes a Redis command with the specified read timeout. If -// the connection does not satisfy the ConnWithTimeout interface, then an error -// is returned. -func DoWithTimeout(c Conn, timeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { - cwt, ok := c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - return cwt.DoWithTimeout(timeout, cmd, args...) -} - -// ReceiveWithTimeout receives a reply with the specified read timeout. If the -// connection does not satisfy the ConnWithTimeout interface, then an error is -// returned. -func ReceiveWithTimeout(c Conn, timeout time.Duration) (interface{}, error) { - cwt, ok := c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - return cwt.ReceiveWithTimeout(timeout) -} - -// SlowLog represents a redis SlowLog -type SlowLog struct { - // ID is a unique progressive identifier for every slow log entry. - ID int64 - - // Time is the unix timestamp at which the logged command was processed. - Time time.Time - - // ExecutationTime is the amount of time needed for the command execution. - ExecutionTime time.Duration - - // Args is the command name and arguments - Args []string - - // ClientAddr is the client IP address (4.0 only). - ClientAddr string - - // ClientName is the name set via the CLIENT SETNAME command (4.0 only). - ClientName string -} diff --git a/vendor/github.com/gomodule/redigo/redis/reply.go b/vendor/github.com/gomodule/redigo/redis/reply.go deleted file mode 100644 index 251a7884..00000000 --- a/vendor/github.com/gomodule/redigo/redis/reply.go +++ /dev/null @@ -1,583 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "strconv" - "time" -) - -// ErrNil indicates that a reply value is nil. -var ErrNil = errors.New("redigo: nil returned") - -// Int is a helper that converts a command reply to an integer. If err is not -// equal to nil, then Int returns 0, err. Otherwise, Int converts the -// reply to an int as follows: -// -// Reply type Result -// integer int(reply), nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int(reply interface{}, err error) (int, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - x := int(reply) - if int64(x) != reply { - return 0, strconv.ErrRange - } - return x, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 0) - return int(n), err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) -} - -// Int64 is a helper that converts a command reply to 64 bit integer. If err is -// not equal to nil, then Int64 returns 0, err. Otherwise, Int64 converts the -// reply to an int64 as follows: -// -// Reply type Result -// integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int64(reply interface{}, err error) (int64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - return reply, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) -} - -func errNegativeInt(v int64) error { - return fmt.Errorf("redigo: unexpected negative value %v for Uint64", v) -} - -// Uint64 is a helper that converts a command reply to 64 bit unsigned integer. -// If err is not equal to nil, then Uint64 returns 0, err. Otherwise, Uint64 converts the -// reply to an uint64 as follows: -// -// Reply type Result -// +integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Uint64(reply interface{}, err error) (uint64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - if reply < 0 { - return 0, errNegativeInt(reply) - } - return uint64(reply), nil - case []byte: - n, err := strconv.ParseUint(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) -} - -// Float64 is a helper that converts a command reply to 64 bit float. If err is -// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts -// the reply to an int as follows: -// -// Reply type Result -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Float64(reply interface{}, err error) (float64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case []byte: - n, err := strconv.ParseFloat(string(reply), 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) -} - -// String is a helper that converts a command reply to a string. If err is not -// equal to nil, then String returns "", err. Otherwise String converts the -// reply to a string as follows: -// -// Reply type Result -// bulk string string(reply), nil -// simple string reply, nil -// nil "", ErrNil -// other "", error -func String(reply interface{}, err error) (string, error) { - if err != nil { - return "", err - } - switch reply := reply.(type) { - case []byte: - return string(reply), nil - case string: - return reply, nil - case nil: - return "", ErrNil - case Error: - return "", reply - } - return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) -} - -// Bytes is a helper that converts a command reply to a slice of bytes. If err -// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts -// the reply to a slice of bytes as follows: -// -// Reply type Result -// bulk string reply, nil -// simple string []byte(reply), nil -// nil nil, ErrNil -// other nil, error -func Bytes(reply interface{}, err error) ([]byte, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []byte: - return reply, nil - case string: - return []byte(reply), nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) -} - -// Bool is a helper that converts a command reply to a boolean. If err is not -// equal to nil, then Bool returns false, err. Otherwise Bool converts the -// reply to boolean as follows: -// -// Reply type Result -// integer value != 0, nil -// bulk string strconv.ParseBool(reply) -// nil false, ErrNil -// other false, error -func Bool(reply interface{}, err error) (bool, error) { - if err != nil { - return false, err - } - switch reply := reply.(type) { - case int64: - return reply != 0, nil - case []byte: - return strconv.ParseBool(string(reply)) - case nil: - return false, ErrNil - case Error: - return false, reply - } - return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) -} - -// MultiBulk is a helper that converts an array command reply to a []interface{}. -// -// Deprecated: Use Values instead. -func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } - -// Values is a helper that converts an array command reply to a []interface{}. -// If err is not equal to nil, then Values returns nil, err. Otherwise, Values -// converts the reply as follows: -// -// Reply type Result -// array reply, nil -// nil nil, ErrNil -// other nil, error -func Values(reply interface{}, err error) ([]interface{}, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []interface{}: - return reply, nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) -} - -func sliceHelper(reply interface{}, err error, name string, makeSlice func(int), assign func(int, interface{}) error) error { - if err != nil { - return err - } - switch reply := reply.(type) { - case []interface{}: - makeSlice(len(reply)) - for i := range reply { - if reply[i] == nil { - continue - } - if err := assign(i, reply[i]); err != nil { - return err - } - } - return nil - case nil: - return ErrNil - case Error: - return reply - } - return fmt.Errorf("redigo: unexpected type for %s, got type %T", name, reply) -} - -// Float64s is a helper that converts an array command reply to a []float64. If -// err is not equal to nil, then Float64s returns nil, err. Nil array items are -// converted to 0 in the output slice. Floats64 returns an error if an array -// item is not a bulk string or nil. -func Float64s(reply interface{}, err error) ([]float64, error) { - var result []float64 - err = sliceHelper(reply, err, "Float64s", func(n int) { result = make([]float64, n) }, func(i int, v interface{}) error { - p, ok := v.([]byte) - if !ok { - return fmt.Errorf("redigo: unexpected element type for Floats64, got type %T", v) - } - f, err := strconv.ParseFloat(string(p), 64) - result[i] = f - return err - }) - return result, err -} - -// Strings is a helper that converts an array command reply to a []string. If -// err is not equal to nil, then Strings returns nil, err. Nil array items are -// converted to "" in the output slice. Strings returns an error if an array -// item is not a bulk string or nil. -func Strings(reply interface{}, err error) ([]string, error) { - var result []string - err = sliceHelper(reply, err, "Strings", func(n int) { result = make([]string, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case string: - result[i] = v - return nil - case []byte: - result[i] = string(v) - return nil - default: - return fmt.Errorf("redigo: unexpected element type for Strings, got type %T", v) - } - }) - return result, err -} - -// ByteSlices is a helper that converts an array command reply to a [][]byte. -// If err is not equal to nil, then ByteSlices returns nil, err. Nil array -// items are stay nil. ByteSlices returns an error if an array item is not a -// bulk string or nil. -func ByteSlices(reply interface{}, err error) ([][]byte, error) { - var result [][]byte - err = sliceHelper(reply, err, "ByteSlices", func(n int) { result = make([][]byte, n) }, func(i int, v interface{}) error { - p, ok := v.([]byte) - if !ok { - return fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", v) - } - result[i] = p - return nil - }) - return result, err -} - -// Int64s is a helper that converts an array command reply to a []int64. -// If err is not equal to nil, then Int64s returns nil, err. Nil array -// items are stay nil. Int64s returns an error if an array item is not a -// bulk string or nil. -func Int64s(reply interface{}, err error) ([]int64, error) { - var result []int64 - err = sliceHelper(reply, err, "Int64s", func(n int) { result = make([]int64, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case int64: - result[i] = v - return nil - case []byte: - n, err := strconv.ParseInt(string(v), 10, 64) - result[i] = n - return err - default: - return fmt.Errorf("redigo: unexpected element type for Int64s, got type %T", v) - } - }) - return result, err -} - -// Ints is a helper that converts an array command reply to a []in. -// If err is not equal to nil, then Ints returns nil, err. Nil array -// items are stay nil. Ints returns an error if an array item is not a -// bulk string or nil. -func Ints(reply interface{}, err error) ([]int, error) { - var result []int - err = sliceHelper(reply, err, "Ints", func(n int) { result = make([]int, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case int64: - n := int(v) - if int64(n) != v { - return strconv.ErrRange - } - result[i] = n - return nil - case []byte: - n, err := strconv.Atoi(string(v)) - result[i] = n - return err - default: - return fmt.Errorf("redigo: unexpected element type for Ints, got type %T", v) - } - }) - return result, err -} - -// StringMap is a helper that converts an array of strings (alternating key, value) -// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. -// Requires an even number of values in result. -func StringMap(result interface{}, err error) (map[string]string, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: StringMap expects even number of values result") - } - m := make(map[string]string, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, okKey := values[i].([]byte) - value, okValue := values[i+1].([]byte) - if !okKey || !okValue { - return nil, errors.New("redigo: StringMap key not a bulk string value") - } - m[string(key)] = string(value) - } - return m, nil -} - -// IntMap is a helper that converts an array of strings (alternating key, value) -// into a map[string]int. The HGETALL commands return replies in this format. -// Requires an even number of values in result. -func IntMap(result interface{}, err error) (map[string]int, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: IntMap expects even number of values result") - } - m := make(map[string]int, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].([]byte) - if !ok { - return nil, errors.New("redigo: IntMap key not a bulk string value") - } - value, err := Int(values[i+1], nil) - if err != nil { - return nil, err - } - m[string(key)] = value - } - return m, nil -} - -// Int64Map is a helper that converts an array of strings (alternating key, value) -// into a map[string]int64. The HGETALL commands return replies in this format. -// Requires an even number of values in result. -func Int64Map(result interface{}, err error) (map[string]int64, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: Int64Map expects even number of values result") - } - m := make(map[string]int64, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].([]byte) - if !ok { - return nil, errors.New("redigo: Int64Map key not a bulk string value") - } - value, err := Int64(values[i+1], nil) - if err != nil { - return nil, err - } - m[string(key)] = value - } - return m, nil -} - -// Positions is a helper that converts an array of positions (lat, long) -// into a [][2]float64. The GEOPOS command returns replies in this format. -func Positions(result interface{}, err error) ([]*[2]float64, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - positions := make([]*[2]float64, len(values)) - for i := range values { - if values[i] == nil { - continue - } - p, ok := values[i].([]interface{}) - if !ok { - return nil, fmt.Errorf("redigo: unexpected element type for interface slice, got type %T", values[i]) - } - if len(p) != 2 { - return nil, fmt.Errorf("redigo: unexpected number of values for a member position, got %d", len(p)) - } - lat, err := Float64(p[0], nil) - if err != nil { - return nil, err - } - long, err := Float64(p[1], nil) - if err != nil { - return nil, err - } - positions[i] = &[2]float64{lat, long} - } - return positions, nil -} - -// Uint64s is a helper that converts an array command reply to a []uint64. -// If err is not equal to nil, then Uint64s returns nil, err. Nil array -// items are stay nil. Uint64s returns an error if an array item is not a -// bulk string or nil. -func Uint64s(reply interface{}, err error) ([]uint64, error) { - var result []uint64 - err = sliceHelper(reply, err, "Uint64s", func(n int) { result = make([]uint64, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case uint64: - result[i] = v - return nil - case []byte: - n, err := strconv.ParseUint(string(v), 10, 64) - result[i] = n - return err - default: - return fmt.Errorf("redigo: unexpected element type for Uint64s, got type %T", v) - } - }) - return result, err -} - -// Uint64Map is a helper that converts an array of strings (alternating key, value) -// into a map[string]uint64. The HGETALL commands return replies in this format. -// Requires an even number of values in result. -func Uint64Map(result interface{}, err error) (map[string]uint64, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: Uint64Map expects even number of values result") - } - m := make(map[string]uint64, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].([]byte) - if !ok { - return nil, errors.New("redigo: Uint64Map key not a bulk string value") - } - value, err := Uint64(values[i+1], nil) - if err != nil { - return nil, err - } - m[string(key)] = value - } - return m, nil -} - -// SlowLogs is a helper that parse the SLOWLOG GET command output and -// return the array of SlowLog -func SlowLogs(result interface{}, err error) ([]SlowLog, error) { - rawLogs, err := Values(result, err) - if err != nil { - return nil, err - } - logs := make([]SlowLog, len(rawLogs)) - for i, rawLog := range rawLogs { - rawLog, ok := rawLog.([]interface{}) - if !ok { - return nil, errors.New("redigo: slowlog element is not an array") - } - - var log SlowLog - - if len(rawLog) < 4 { - return nil, errors.New("redigo: slowlog element has less than four elements") - } - log.ID, ok = rawLog[0].(int64) - if !ok { - return nil, errors.New("redigo: slowlog element[0] not an int64") - } - timestamp, ok := rawLog[1].(int64) - if !ok { - return nil, errors.New("redigo: slowlog element[1] not an int64") - } - log.Time = time.Unix(timestamp, 0) - duration, ok := rawLog[2].(int64) - if !ok { - return nil, errors.New("redigo: slowlog element[2] not an int64") - } - log.ExecutionTime = time.Duration(duration) * time.Microsecond - - log.Args, err = Strings(rawLog[3], nil) - if err != nil { - return nil, fmt.Errorf("redigo: slowlog element[3] is not array of string. actual error is : %s", err.Error()) - } - if len(rawLog) >= 6 { - log.ClientAddr, err = String(rawLog[4], nil) - if err != nil { - return nil, fmt.Errorf("redigo: slowlog element[4] is not a string. actual error is : %s", err.Error()) - } - log.ClientName, err = String(rawLog[5], nil) - if err != nil { - return nil, fmt.Errorf("redigo: slowlog element[5] is not a string. actual error is : %s", err.Error()) - } - } - logs[i] = log - } - return logs, nil -} diff --git a/vendor/github.com/gomodule/redigo/redis/scan.go b/vendor/github.com/gomodule/redigo/redis/scan.go deleted file mode 100644 index 93d0c658..00000000 --- a/vendor/github.com/gomodule/redigo/redis/scan.go +++ /dev/null @@ -1,673 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "sync" -) - -var ( - scannerType = reflect.TypeOf((*Scanner)(nil)).Elem() -) - -func ensureLen(d reflect.Value, n int) { - if n > d.Cap() { - d.Set(reflect.MakeSlice(d.Type(), n, n)) - } else { - d.SetLen(n) - } -} - -func cannotConvert(d reflect.Value, s interface{}) error { - var sname string - switch s.(type) { - case string: - sname = "Redis simple string" - case Error: - sname = "Redis error" - case int64: - sname = "Redis integer" - case []byte: - sname = "Redis bulk string" - case []interface{}: - sname = "Redis array" - case nil: - sname = "Redis nil" - default: - sname = reflect.TypeOf(s).String() - } - return fmt.Errorf("cannot convert from %s to %s", sname, d.Type()) -} - -func convertAssignNil(d reflect.Value) (err error) { - switch d.Type().Kind() { - case reflect.Slice, reflect.Interface: - d.Set(reflect.Zero(d.Type())) - default: - err = cannotConvert(d, nil) - } - return err -} - -func convertAssignError(d reflect.Value, s Error) (err error) { - if d.Kind() == reflect.String { - d.SetString(string(s)) - } else if d.Kind() == reflect.Slice && d.Type().Elem().Kind() == reflect.Uint8 { - d.SetBytes([]byte(s)) - } else { - err = cannotConvert(d, s) - } - return -} - -func convertAssignString(d reflect.Value, s string) (err error) { - switch d.Type().Kind() { - case reflect.Float32, reflect.Float64: - var x float64 - x, err = strconv.ParseFloat(s, d.Type().Bits()) - d.SetFloat(x) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - var x int64 - x, err = strconv.ParseInt(s, 10, d.Type().Bits()) - d.SetInt(x) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - var x uint64 - x, err = strconv.ParseUint(s, 10, d.Type().Bits()) - d.SetUint(x) - case reflect.Bool: - var x bool - x, err = strconv.ParseBool(s) - d.SetBool(x) - case reflect.String: - d.SetString(s) - case reflect.Slice: - if d.Type().Elem().Kind() == reflect.Uint8 { - d.SetBytes([]byte(s)) - } else { - err = cannotConvert(d, s) - } - case reflect.Ptr: - err = convertAssignString(d.Elem(), s) - default: - err = cannotConvert(d, s) - } - return -} - -func convertAssignBulkString(d reflect.Value, s []byte) (err error) { - switch d.Type().Kind() { - case reflect.Slice: - // Handle []byte destination here to avoid unnecessary - // []byte -> string -> []byte converion. - if d.Type().Elem().Kind() == reflect.Uint8 { - d.SetBytes(s) - } else { - err = cannotConvert(d, s) - } - case reflect.Ptr: - if d.CanInterface() && d.CanSet() { - if s == nil { - if d.IsNil() { - return nil - } - - d.Set(reflect.Zero(d.Type())) - return nil - } - - if d.IsNil() { - d.Set(reflect.New(d.Type().Elem())) - } - - if sc, ok := d.Interface().(Scanner); ok { - return sc.RedisScan(s) - } - } - err = convertAssignString(d, string(s)) - default: - err = convertAssignString(d, string(s)) - } - return err -} - -func convertAssignInt(d reflect.Value, s int64) (err error) { - switch d.Type().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - d.SetInt(s) - if d.Int() != s { - err = strconv.ErrRange - d.SetInt(0) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if s < 0 { - err = strconv.ErrRange - } else { - x := uint64(s) - d.SetUint(x) - if d.Uint() != x { - err = strconv.ErrRange - d.SetUint(0) - } - } - case reflect.Bool: - d.SetBool(s != 0) - default: - err = cannotConvert(d, s) - } - return -} - -func convertAssignValue(d reflect.Value, s interface{}) (err error) { - if d.Kind() != reflect.Ptr { - if d.CanAddr() { - d2 := d.Addr() - if d2.CanInterface() { - if scanner, ok := d2.Interface().(Scanner); ok { - return scanner.RedisScan(s) - } - } - } - } else if d.CanInterface() { - // Already a reflect.Ptr - if d.IsNil() { - d.Set(reflect.New(d.Type().Elem())) - } - if scanner, ok := d.Interface().(Scanner); ok { - return scanner.RedisScan(s) - } - } - - switch s := s.(type) { - case nil: - err = convertAssignNil(d) - case []byte: - err = convertAssignBulkString(d, s) - case int64: - err = convertAssignInt(d, s) - case string: - err = convertAssignString(d, s) - case Error: - err = convertAssignError(d, s) - default: - err = cannotConvert(d, s) - } - return err -} - -func convertAssignArray(d reflect.Value, s []interface{}) error { - if d.Type().Kind() != reflect.Slice { - return cannotConvert(d, s) - } - ensureLen(d, len(s)) - for i := 0; i < len(s); i++ { - if err := convertAssignValue(d.Index(i), s[i]); err != nil { - return err - } - } - return nil -} - -func convertAssign(d interface{}, s interface{}) (err error) { - if scanner, ok := d.(Scanner); ok { - return scanner.RedisScan(s) - } - - // Handle the most common destination types using type switches and - // fall back to reflection for all other types. - switch s := s.(type) { - case nil: - // ignore - case []byte: - switch d := d.(type) { - case *string: - *d = string(s) - case *int: - *d, err = strconv.Atoi(string(s)) - case *bool: - *d, err = strconv.ParseBool(string(s)) - case *[]byte: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignBulkString(d.Elem(), s) - } - } - case int64: - switch d := d.(type) { - case *int: - x := int(s) - if int64(x) != s { - err = strconv.ErrRange - x = 0 - } - *d = x - case *bool: - *d = s != 0 - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignInt(d.Elem(), s) - } - } - case string: - switch d := d.(type) { - case *string: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - err = cannotConvert(reflect.ValueOf(d), s) - } - case []interface{}: - switch d := d.(type) { - case *[]interface{}: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignArray(d.Elem(), s) - } - } - case Error: - err = s - default: - err = cannotConvert(reflect.ValueOf(d), s) - } - return -} - -// Scan copies from src to the values pointed at by dest. -// -// Scan uses RedisScan if available otherwise: -// -// The values pointed at by dest must be an integer, float, boolean, string, -// []byte, interface{} or slices of these types. Scan uses the standard strconv -// package to convert bulk strings to numeric and boolean types. -// -// If a dest value is nil, then the corresponding src value is skipped. -// -// If a src element is nil, then the corresponding dest value is not modified. -// -// To enable easy use of Scan in a loop, Scan returns the slice of src -// following the copied values. -func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { - if len(src) < len(dest) { - return nil, errors.New("redigo.Scan: array short") - } - var err error - for i, d := range dest { - err = convertAssign(d, src[i]) - if err != nil { - err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err) - break - } - } - return src[len(dest):], err -} - -type fieldSpec struct { - name string - index []int - omitEmpty bool -} - -type structSpec struct { - m map[string]*fieldSpec - l []*fieldSpec -} - -func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { - return ss.m[string(name)] -} - -func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - switch { - case f.PkgPath != "" && !f.Anonymous: - // Ignore unexported fields. - case f.Anonymous: - switch f.Type.Kind() { - case reflect.Struct: - compileStructSpec(f.Type, depth, append(index, i), ss) - case reflect.Ptr: - // TODO(steve): Protect against infinite recursion. - if f.Type.Elem().Kind() == reflect.Struct { - compileStructSpec(f.Type.Elem(), depth, append(index, i), ss) - } - } - default: - fs := &fieldSpec{name: f.Name} - tag := f.Tag.Get("redis") - p := strings.Split(tag, ",") - if len(p) > 0 { - if p[0] == "-" { - continue - } - if len(p[0]) > 0 { - fs.name = p[0] - } - for _, s := range p[1:] { - switch s { - case "omitempty": - fs.omitEmpty = true - default: - panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name())) - } - } - } - d, found := depth[fs.name] - if !found { - d = 1 << 30 - } - switch { - case len(index) == d: - // At same depth, remove from result. - delete(ss.m, fs.name) - j := 0 - for i := 0; i < len(ss.l); i++ { - if fs.name != ss.l[i].name { - ss.l[j] = ss.l[i] - j += 1 - } - } - ss.l = ss.l[:j] - case len(index) < d: - fs.index = make([]int, len(index)+1) - copy(fs.index, index) - fs.index[len(index)] = i - depth[fs.name] = len(index) - ss.m[fs.name] = fs - ss.l = append(ss.l, fs) - } - } - } -} - -var ( - structSpecMutex sync.RWMutex - structSpecCache = make(map[reflect.Type]*structSpec) - defaultFieldSpec = &fieldSpec{} -) - -func structSpecForType(t reflect.Type) *structSpec { - - structSpecMutex.RLock() - ss, found := structSpecCache[t] - structSpecMutex.RUnlock() - if found { - return ss - } - - structSpecMutex.Lock() - defer structSpecMutex.Unlock() - ss, found = structSpecCache[t] - if found { - return ss - } - - ss = &structSpec{m: make(map[string]*fieldSpec)} - compileStructSpec(t, make(map[string]int), nil, ss) - structSpecCache[t] = ss - return ss -} - -var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct") - -// ScanStruct scans alternating names and values from src to a struct. The -// HGETALL and CONFIG GET commands return replies in this format. -// -// ScanStruct uses exported field names to match values in the response. Use -// 'redis' field tag to override the name: -// -// Field int `redis:"myName"` -// -// Fields with the tag redis:"-" are ignored. -// -// Each field uses RedisScan if available otherwise: -// Integer, float, boolean, string and []byte fields are supported. Scan uses the -// standard strconv package to convert bulk string values to numeric and -// boolean types. -// -// If a src element is nil, then the corresponding field is not modified. -func ScanStruct(src []interface{}, dest interface{}) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return errScanStructValue - } - d = d.Elem() - if d.Kind() != reflect.Struct { - return errScanStructValue - } - ss := structSpecForType(d.Type()) - - if len(src)%2 != 0 { - return errors.New("redigo.ScanStruct: number of values not a multiple of 2") - } - - for i := 0; i < len(src); i += 2 { - s := src[i+1] - if s == nil { - continue - } - name, ok := src[i].([]byte) - if !ok { - return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) - } - fs := ss.fieldSpec(name) - if fs == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err) - } - } - return nil -} - -var ( - errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") -) - -// ScanSlice scans src to the slice pointed to by dest. -// -// If the target is a slice of types which implement Scanner then the custom -// RedisScan method is used otherwise the following rules apply: -// -// The elements in the dest slice must be integer, float, boolean, string, struct -// or pointer to struct values. -// -// Struct fields must be integer, float, boolean or string values. All struct -// fields are used unless a subset is specified using fieldNames. -func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return errScanSliceValue - } - d = d.Elem() - if d.Kind() != reflect.Slice { - return errScanSliceValue - } - - isPtr := false - t := d.Type().Elem() - st := t - if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { - isPtr = true - t = t.Elem() - } - - if t.Kind() != reflect.Struct || st.Implements(scannerType) { - ensureLen(d, len(src)) - for i, s := range src { - if s == nil { - continue - } - if err := convertAssignValue(d.Index(i), s); err != nil { - return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err) - } - } - return nil - } - - ss := structSpecForType(t) - fss := ss.l - if len(fieldNames) > 0 { - fss = make([]*fieldSpec, len(fieldNames)) - for i, name := range fieldNames { - fss[i] = ss.m[name] - if fss[i] == nil { - return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name) - } - } - } - - if len(fss) == 0 { - return errors.New("redigo.ScanSlice: no struct fields") - } - - n := len(src) / len(fss) - if n*len(fss) != len(src) { - return errors.New("redigo.ScanSlice: length not a multiple of struct field count") - } - - ensureLen(d, n) - for i := 0; i < n; i++ { - d := d.Index(i) - if isPtr { - if d.IsNil() { - d.Set(reflect.New(t)) - } - d = d.Elem() - } - for j, fs := range fss { - s := src[i*len(fss)+j] - if s == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err) - } - } - } - return nil -} - -// Args is a helper for constructing command arguments from structured values. -type Args []interface{} - -// Add returns the result of appending value to args. -func (args Args) Add(value ...interface{}) Args { - return append(args, value...) -} - -// AddFlat returns the result of appending the flattened value of v to args. -// -// Maps are flattened by appending the alternating keys and map values to args. -// -// Slices are flattened by appending the slice elements to args. -// -// Structs are flattened by appending the alternating names and values of -// exported fields to args. If v is a nil struct pointer, then nothing is -// appended. The 'redis' field tag overrides struct field names. See ScanStruct -// for more information on the use of the 'redis' field tag. -// -// Other types are appended to args as is. -func (args Args) AddFlat(v interface{}) Args { - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Struct: - args = flattenStruct(args, rv) - case reflect.Slice: - for i := 0; i < rv.Len(); i++ { - args = append(args, rv.Index(i).Interface()) - } - case reflect.Map: - for _, k := range rv.MapKeys() { - args = append(args, k.Interface(), rv.MapIndex(k).Interface()) - } - case reflect.Ptr: - if rv.Type().Elem().Kind() == reflect.Struct { - if !rv.IsNil() { - args = flattenStruct(args, rv.Elem()) - } - } else { - args = append(args, v) - } - default: - args = append(args, v) - } - return args -} - -func flattenStruct(args Args, v reflect.Value) Args { - ss := structSpecForType(v.Type()) - for _, fs := range ss.l { - fv := v.FieldByIndex(fs.index) - if fs.omitEmpty { - var empty = false - switch fv.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - empty = fv.Len() == 0 - case reflect.Bool: - empty = !fv.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - empty = fv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - empty = fv.Uint() == 0 - case reflect.Float32, reflect.Float64: - empty = fv.Float() == 0 - case reflect.Interface, reflect.Ptr: - empty = fv.IsNil() - } - if empty { - continue - } - } - if arg, ok := fv.Interface().(Argument); ok { - args = append(args, fs.name, arg.RedisArg()) - } else if fv.Kind() == reflect.Ptr { - if !fv.IsNil() { - args = append(args, fs.name, fv.Elem().Interface()) - } - } else { - args = append(args, fs.name, fv.Interface()) - } - } - return args -} diff --git a/vendor/github.com/gomodule/redigo/redis/script.go b/vendor/github.com/gomodule/redigo/redis/script.go deleted file mode 100644 index 0ef1c821..00000000 --- a/vendor/github.com/gomodule/redigo/redis/script.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "crypto/sha1" - "encoding/hex" - "io" - "strings" -) - -// Script encapsulates the source, hash and key count for a Lua script. See -// http://redis.io/commands/eval for information on scripts in Redis. -type Script struct { - keyCount int - src string - hash string -} - -// NewScript returns a new script object. If keyCount is greater than or equal -// to zero, then the count is automatically inserted in the EVAL command -// argument list. If keyCount is less than zero, then the application supplies -// the count as the first value in the keysAndArgs argument to the Do, Send and -// SendHash methods. -func NewScript(keyCount int, src string) *Script { - h := sha1.New() - io.WriteString(h, src) - return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} -} - -func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { - var args []interface{} - if s.keyCount < 0 { - args = make([]interface{}, 1+len(keysAndArgs)) - args[0] = spec - copy(args[1:], keysAndArgs) - } else { - args = make([]interface{}, 2+len(keysAndArgs)) - args[0] = spec - args[1] = s.keyCount - copy(args[2:], keysAndArgs) - } - return args -} - -// Hash returns the script hash. -func (s *Script) Hash() string { - return s.hash -} - -// Do evaluates the script. Under the covers, Do optimistically evaluates the -// script using the EVALSHA command. If the command fails because the script is -// not loaded, then Do evaluates the script using the EVAL command (thus -// causing the script to load). -func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { - v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) - if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { - v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) - } - return v, err -} - -// SendHash evaluates the script without waiting for the reply. The script is -// evaluated with the EVALSHA command. The application must ensure that the -// script is loaded by a previous call to Send, Do or Load methods. -func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) -} - -// Send evaluates the script without waiting for the reply. -func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVAL", s.args(s.src, keysAndArgs)...) -} - -// Load loads the script without evaluating it. -func (s *Script) Load(c Conn) error { - _, err := c.Do("SCRIPT", "LOAD", s.src) - return err -} diff --git a/vendor/github.com/redis/go-redis/extra/rediscmd/v9/LICENSE b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/LICENSE new file mode 100644 index 00000000..f4967dbc --- /dev/null +++ b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/redis/go-redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go new file mode 100644 index 00000000..c97689f9 --- /dev/null +++ b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go @@ -0,0 +1,149 @@ +package rediscmd + +import ( + "encoding/hex" + "fmt" + "strconv" + "strings" + "time" + + "github.com/redis/go-redis/v9" +) + +func CmdString(cmd redis.Cmder) string { + b := make([]byte, 0, 32) + b = AppendCmd(b, cmd) + return String(b) +} + +func CmdsString(cmds []redis.Cmder) (string, string) { + const numCmdLimit = 100 + const numNameLimit = 10 + + seen := make(map[string]struct{}, numNameLimit) + unqNames := make([]string, 0, numNameLimit) + + b := make([]byte, 0, 32*len(cmds)) + + for i, cmd := range cmds { + if i > numCmdLimit { + break + } + + if i > 0 { + b = append(b, '\n') + } + b = AppendCmd(b, cmd) + + if len(unqNames) >= numNameLimit { + continue + } + + name := cmd.FullName() + if _, ok := seen[name]; !ok { + seen[name] = struct{}{} + unqNames = append(unqNames, name) + } + } + + summary := strings.Join(unqNames, " ") + return summary, String(b) +} + +func AppendCmd(b []byte, cmd redis.Cmder) []byte { + const numArgLimit = 32 + + for i, arg := range cmd.Args() { + if i > numArgLimit { + break + } + if i > 0 { + b = append(b, ' ') + } + b = appendArg(b, arg) + } + + if err := cmd.Err(); err != nil { + b = append(b, ": "...) + b = append(b, err.Error()...) + } + + return b +} + +func appendArg(b []byte, v interface{}) []byte { + const argLenLimit = 64 + + switch v := v.(type) { + case nil: + return append(b, ""...) + case string: + if len(v) > argLenLimit { + v = v[:argLenLimit] + } + return appendUTF8String(b, Bytes(v)) + case []byte: + if len(v) > argLenLimit { + v = v[:argLenLimit] + } + return appendUTF8String(b, v) + case int: + return strconv.AppendInt(b, int64(v), 10) + case int8: + return strconv.AppendInt(b, int64(v), 10) + case int16: + return strconv.AppendInt(b, int64(v), 10) + case int32: + return strconv.AppendInt(b, int64(v), 10) + case int64: + return strconv.AppendInt(b, v, 10) + case uint: + return strconv.AppendUint(b, uint64(v), 10) + case uint8: + return strconv.AppendUint(b, uint64(v), 10) + case uint16: + return strconv.AppendUint(b, uint64(v), 10) + case uint32: + return strconv.AppendUint(b, uint64(v), 10) + case uint64: + return strconv.AppendUint(b, v, 10) + case float32: + return strconv.AppendFloat(b, float64(v), 'f', -1, 64) + case float64: + return strconv.AppendFloat(b, v, 'f', -1, 64) + case bool: + if v { + return append(b, "true"...) + } + return append(b, "false"...) + case time.Time: + return v.AppendFormat(b, time.RFC3339Nano) + default: + return append(b, fmt.Sprint(v)...) + } +} + +func appendUTF8String(dst []byte, src []byte) []byte { + if isSimple(src) { + dst = append(dst, src...) + return dst + } + + s := len(dst) + dst = append(dst, make([]byte, hex.EncodedLen(len(src)))...) + hex.Encode(dst[s:], src) + return dst +} + +func isSimple(b []byte) bool { + for _, c := range b { + if !isSimpleByte(c) { + return false + } + } + return true +} + +func isSimpleByte(c byte) bool { + return c >= 0x21 && c <= 0x7e +} diff --git a/vendor/github.com/redis/go-redis/extra/rediscmd/v9/safe.go b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/safe.go new file mode 100644 index 00000000..6d3a8b7a --- /dev/null +++ b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/safe.go @@ -0,0 +1,12 @@ +//go:build appengine +// +build appengine + +package rediscmd + +func String(b []byte) string { + return string(b) +} + +func Bytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/redis/go-redis/extra/rediscmd/v9/unsafe.go b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/unsafe.go new file mode 100644 index 00000000..7ccdf2fe --- /dev/null +++ b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/unsafe.go @@ -0,0 +1,21 @@ +//go:build !appengine +// +build !appengine + +package rediscmd + +import "unsafe" + +// String converts byte slice to string. +func String(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// Bytes converts string to byte slice. +func Bytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/LICENSE b/vendor/github.com/redis/go-redis/extra/redisotel/v9/LICENSE new file mode 100644 index 00000000..f4967dbc --- /dev/null +++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/redis/go-redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/README.md b/vendor/github.com/redis/go-redis/extra/redisotel/v9/README.md new file mode 100644 index 00000000..997c17d1 --- /dev/null +++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/README.md @@ -0,0 +1,34 @@ +# OpenTelemetry instrumentation for go-redis + +## Installation + +```bash +go get github.com/redis/go-redis/extra/redisotel/v9 +``` + +## Usage + +Tracing is enabled by adding a hook: + +```go +import ( + "github.com/redis/go-redis/v9" + "github.com/redis/go-redis/extra/redisotel/v9" +) + +rdb := rdb.NewClient(&rdb.Options{...}) + +// Enable tracing instrumentation. +if err := redisotel.InstrumentTracing(rdb); err != nil { + panic(err) +} + +// Enable metrics instrumentation. +if err := redisotel.InstrumentMetrics(rdb); err != nil { + panic(err) +} +``` + +See [example](../../example/otel) and +[Monitoring Go Redis Performance and Errors](https://redis.uptrace.dev/guide/go-redis-monitoring.html) +for details. diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go b/vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go new file mode 100644 index 00000000..c24f8967 --- /dev/null +++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go @@ -0,0 +1,138 @@ +package redisotel + +import ( + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + "go.opentelemetry.io/otel/trace" +) + +type config struct { + // Common options. + + dbSystem string + attrs []attribute.KeyValue + + // Tracing options. + + tp trace.TracerProvider + tracer trace.Tracer + + dbStmtEnabled bool + + // Metrics options. + + mp metric.MeterProvider + meter metric.Meter + + poolName string +} + +type baseOption interface { + apply(conf *config) +} + +type Option interface { + baseOption + tracing() + metrics() +} + +type option func(conf *config) + +func (fn option) apply(conf *config) { + fn(conf) +} + +func (fn option) tracing() {} + +func (fn option) metrics() {} + +func newConfig(opts ...baseOption) *config { + conf := &config{ + dbSystem: "redis", + attrs: []attribute.KeyValue{}, + + tp: otel.GetTracerProvider(), + mp: otel.GetMeterProvider(), + dbStmtEnabled: true, + } + + for _, opt := range opts { + opt.apply(conf) + } + + conf.attrs = append(conf.attrs, semconv.DBSystemKey.String(conf.dbSystem)) + + return conf +} + +func WithDBSystem(dbSystem string) Option { + return option(func(conf *config) { + conf.dbSystem = dbSystem + }) +} + +// WithAttributes specifies additional attributes to be added to the span. +func WithAttributes(attrs ...attribute.KeyValue) Option { + return option(func(conf *config) { + conf.attrs = append(conf.attrs, attrs...) + }) +} + +//------------------------------------------------------------------------------ + +type TracingOption interface { + baseOption + tracing() +} + +type tracingOption func(conf *config) + +var _ TracingOption = (*tracingOption)(nil) + +func (fn tracingOption) apply(conf *config) { + fn(conf) +} + +func (fn tracingOption) tracing() {} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) TracingOption { + return tracingOption(func(conf *config) { + conf.tp = provider + }) +} + +// WithDBStatement tells the tracing hook not to log raw redis commands. +func WithDBStatement(on bool) TracingOption { + return tracingOption(func(conf *config) { + conf.dbStmtEnabled = on + }) +} + +//------------------------------------------------------------------------------ + +type MetricsOption interface { + baseOption + metrics() +} + +type metricsOption func(conf *config) + +var _ MetricsOption = (*metricsOption)(nil) + +func (fn metricsOption) apply(conf *config) { + fn(conf) +} + +func (fn metricsOption) metrics() {} + +// WithMeterProvider configures a metric.Meter used to create instruments. +func WithMeterProvider(mp metric.MeterProvider) MetricsOption { + return metricsOption(func(conf *config) { + conf.mp = mp + }) +} diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go b/vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go new file mode 100644 index 00000000..695c7ee3 --- /dev/null +++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go @@ -0,0 +1,253 @@ +package redisotel + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/redis/go-redis/v9" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +// InstrumentMetrics starts reporting OpenTelemetry Metrics. +// +// Based on https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/database-metrics.md +func InstrumentMetrics(rdb redis.UniversalClient, opts ...MetricsOption) error { + baseOpts := make([]baseOption, len(opts)) + for i, opt := range opts { + baseOpts[i] = opt + } + conf := newConfig(baseOpts...) + + if conf.meter == nil { + conf.meter = conf.mp.Meter( + instrumName, + metric.WithInstrumentationVersion("semver:"+redis.Version()), + ) + } + + switch rdb := rdb.(type) { + case *redis.Client: + if conf.poolName == "" { + opt := rdb.Options() + conf.poolName = opt.Addr + } + conf.attrs = append(conf.attrs, attribute.String("pool.name", conf.poolName)) + + if err := reportPoolStats(rdb, conf); err != nil { + return err + } + if err := addMetricsHook(rdb, conf); err != nil { + return err + } + return nil + case *redis.ClusterClient: + rdb.OnNewNode(func(rdb *redis.Client) { + if conf.poolName == "" { + opt := rdb.Options() + conf.poolName = opt.Addr + } + conf.attrs = append(conf.attrs, attribute.String("pool.name", conf.poolName)) + + if err := reportPoolStats(rdb, conf); err != nil { + otel.Handle(err) + } + if err := addMetricsHook(rdb, conf); err != nil { + otel.Handle(err) + } + }) + return nil + case *redis.Ring: + rdb.OnNewNode(func(rdb *redis.Client) { + if conf.poolName == "" { + opt := rdb.Options() + conf.poolName = opt.Addr + } + conf.attrs = append(conf.attrs, attribute.String("pool.name", conf.poolName)) + + if err := reportPoolStats(rdb, conf); err != nil { + otel.Handle(err) + } + if err := addMetricsHook(rdb, conf); err != nil { + otel.Handle(err) + } + }) + return nil + default: + return fmt.Errorf("redisotel: %T not supported", rdb) + } +} + +func reportPoolStats(rdb *redis.Client, conf *config) error { + labels := conf.attrs + idleAttrs := append(labels, attribute.String("state", "idle")) + usedAttrs := append(labels, attribute.String("state", "used")) + + idleMax, err := conf.meter.Int64ObservableUpDownCounter( + "db.client.connections.idle.max", + metric.WithDescription("The maximum number of idle open connections allowed"), + ) + if err != nil { + return err + } + + idleMin, err := conf.meter.Int64ObservableUpDownCounter( + "db.client.connections.idle.min", + metric.WithDescription("The minimum number of idle open connections allowed"), + ) + if err != nil { + return err + } + + connsMax, err := conf.meter.Int64ObservableUpDownCounter( + "db.client.connections.max", + metric.WithDescription("The maximum number of open connections allowed"), + ) + if err != nil { + return err + } + + usage, err := conf.meter.Int64ObservableUpDownCounter( + "db.client.connections.usage", + metric.WithDescription("The number of connections that are currently in state described by the state attribute"), + ) + if err != nil { + return err + } + + timeouts, err := conf.meter.Int64ObservableUpDownCounter( + "db.client.connections.timeouts", + metric.WithDescription("The number of connection timeouts that have occurred trying to obtain a connection from the pool"), + ) + if err != nil { + return err + } + + redisConf := rdb.Options() + _, err = conf.meter.RegisterCallback( + func(ctx context.Context, o metric.Observer) error { + stats := rdb.PoolStats() + + o.ObserveInt64(idleMax, int64(redisConf.MaxIdleConns), metric.WithAttributes(labels...)) + o.ObserveInt64(idleMin, int64(redisConf.MinIdleConns), metric.WithAttributes(labels...)) + o.ObserveInt64(connsMax, int64(redisConf.PoolSize), metric.WithAttributes(labels...)) + + o.ObserveInt64(usage, int64(stats.IdleConns), metric.WithAttributes(idleAttrs...)) + o.ObserveInt64(usage, int64(stats.TotalConns-stats.IdleConns), metric.WithAttributes(usedAttrs...)) + + o.ObserveInt64(timeouts, int64(stats.Timeouts), metric.WithAttributes(labels...)) + return nil + }, + idleMax, + idleMin, + connsMax, + usage, + timeouts, + ) + + return err +} + +func addMetricsHook(rdb *redis.Client, conf *config) error { + createTime, err := conf.meter.Float64Histogram( + "db.client.connections.create_time", + metric.WithDescription("The time it took to create a new connection."), + metric.WithUnit("ms"), + ) + if err != nil { + return err + } + + useTime, err := conf.meter.Float64Histogram( + "db.client.connections.use_time", + metric.WithDescription("The time between borrowing a connection and returning it to the pool."), + metric.WithUnit("ms"), + ) + if err != nil { + return err + } + + rdb.AddHook(&metricsHook{ + createTime: createTime, + useTime: useTime, + attrs: conf.attrs, + }) + return nil +} + +type metricsHook struct { + createTime metric.Float64Histogram + useTime metric.Float64Histogram + attrs []attribute.KeyValue +} + +var _ redis.Hook = (*metricsHook)(nil) + +func (mh *metricsHook) DialHook(hook redis.DialHook) redis.DialHook { + return func(ctx context.Context, network, addr string) (net.Conn, error) { + start := time.Now() + + conn, err := hook(ctx, network, addr) + + attrs := make([]attribute.KeyValue, 0, len(mh.attrs)+1) + attrs = append(attrs, mh.attrs...) + attrs = append(attrs, statusAttr(err)) + + mh.createTime.Record(ctx, milliseconds(time.Since(start)), metric.WithAttributes(attrs...)) + return conn, err + } +} + +func (mh *metricsHook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook { + return func(ctx context.Context, cmd redis.Cmder) error { + start := time.Now() + + err := hook(ctx, cmd) + + dur := time.Since(start) + + attrs := make([]attribute.KeyValue, 0, len(mh.attrs)+2) + attrs = append(attrs, mh.attrs...) + attrs = append(attrs, attribute.String("type", "command")) + attrs = append(attrs, statusAttr(err)) + + mh.useTime.Record(ctx, milliseconds(dur), metric.WithAttributes(attrs...)) + + return err + } +} + +func (mh *metricsHook) ProcessPipelineHook( + hook redis.ProcessPipelineHook, +) redis.ProcessPipelineHook { + return func(ctx context.Context, cmds []redis.Cmder) error { + start := time.Now() + + err := hook(ctx, cmds) + + dur := time.Since(start) + + attrs := make([]attribute.KeyValue, 0, len(mh.attrs)+2) + attrs = append(attrs, mh.attrs...) + attrs = append(attrs, attribute.String("type", "pipeline")) + attrs = append(attrs, statusAttr(err)) + + mh.useTime.Record(ctx, milliseconds(dur), metric.WithAttributes(attrs...)) + + return err + } +} + +func milliseconds(d time.Duration) float64 { + return float64(d) / float64(time.Millisecond) +} + +func statusAttr(err error) attribute.KeyValue { + if err != nil { + return attribute.String("status", "error") + } + return attribute.String("status", "ok") +} diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go b/vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go new file mode 100644 index 00000000..0bbf692a --- /dev/null +++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go @@ -0,0 +1,215 @@ +package redisotel + +import ( + "context" + "fmt" + "net" + "runtime" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.10.0" + "go.opentelemetry.io/otel/trace" + + "github.com/redis/go-redis/extra/rediscmd/v9" + "github.com/redis/go-redis/v9" +) + +const ( + instrumName = "github.com/redis/go-redis/extra/redisotel" +) + +func InstrumentTracing(rdb redis.UniversalClient, opts ...TracingOption) error { + switch rdb := rdb.(type) { + case *redis.Client: + opt := rdb.Options() + connString := formatDBConnString(opt.Network, opt.Addr) + rdb.AddHook(newTracingHook(connString, opts...)) + return nil + case *redis.ClusterClient: + rdb.AddHook(newTracingHook("", opts...)) + + rdb.OnNewNode(func(rdb *redis.Client) { + opt := rdb.Options() + connString := formatDBConnString(opt.Network, opt.Addr) + rdb.AddHook(newTracingHook(connString, opts...)) + }) + return nil + case *redis.Ring: + rdb.AddHook(newTracingHook("", opts...)) + + rdb.OnNewNode(func(rdb *redis.Client) { + opt := rdb.Options() + connString := formatDBConnString(opt.Network, opt.Addr) + rdb.AddHook(newTracingHook(connString, opts...)) + }) + return nil + default: + return fmt.Errorf("redisotel: %T not supported", rdb) + } +} + +type tracingHook struct { + conf *config + + spanOpts []trace.SpanStartOption +} + +var _ redis.Hook = (*tracingHook)(nil) + +func newTracingHook(connString string, opts ...TracingOption) *tracingHook { + baseOpts := make([]baseOption, len(opts)) + for i, opt := range opts { + baseOpts[i] = opt + } + conf := newConfig(baseOpts...) + + if conf.tracer == nil { + conf.tracer = conf.tp.Tracer( + instrumName, + trace.WithInstrumentationVersion("semver:"+redis.Version()), + ) + } + if connString != "" { + conf.attrs = append(conf.attrs, semconv.DBConnectionStringKey.String(connString)) + } + + return &tracingHook{ + conf: conf, + + spanOpts: []trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(conf.attrs...), + }, + } +} + +func (th *tracingHook) DialHook(hook redis.DialHook) redis.DialHook { + return func(ctx context.Context, network, addr string) (net.Conn, error) { + if !trace.SpanFromContext(ctx).IsRecording() { + return hook(ctx, network, addr) + } + + ctx, span := th.conf.tracer.Start(ctx, "redis.dial", th.spanOpts...) + defer span.End() + + conn, err := hook(ctx, network, addr) + if err != nil { + recordError(span, err) + return nil, err + } + return conn, nil + } +} + +func (th *tracingHook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook { + return func(ctx context.Context, cmd redis.Cmder) error { + if !trace.SpanFromContext(ctx).IsRecording() { + return hook(ctx, cmd) + } + + fn, file, line := funcFileLine("github.com/redis/go-redis") + + attrs := make([]attribute.KeyValue, 0, 8) + attrs = append(attrs, + semconv.CodeFunctionKey.String(fn), + semconv.CodeFilepathKey.String(file), + semconv.CodeLineNumberKey.Int(line), + ) + + if th.conf.dbStmtEnabled { + cmdString := rediscmd.CmdString(cmd) + attrs = append(attrs, semconv.DBStatementKey.String(cmdString)) + } + + opts := th.spanOpts + opts = append(opts, trace.WithAttributes(attrs...)) + + ctx, span := th.conf.tracer.Start(ctx, cmd.FullName(), opts...) + defer span.End() + + if err := hook(ctx, cmd); err != nil { + recordError(span, err) + return err + } + return nil + } +} + +func (th *tracingHook) ProcessPipelineHook( + hook redis.ProcessPipelineHook, +) redis.ProcessPipelineHook { + return func(ctx context.Context, cmds []redis.Cmder) error { + if !trace.SpanFromContext(ctx).IsRecording() { + return hook(ctx, cmds) + } + + fn, file, line := funcFileLine("github.com/redis/go-redis") + + attrs := make([]attribute.KeyValue, 0, 8) + attrs = append(attrs, + semconv.CodeFunctionKey.String(fn), + semconv.CodeFilepathKey.String(file), + semconv.CodeLineNumberKey.Int(line), + attribute.Int("db.redis.num_cmd", len(cmds)), + ) + + summary, cmdsString := rediscmd.CmdsString(cmds) + if th.conf.dbStmtEnabled { + attrs = append(attrs, semconv.DBStatementKey.String(cmdsString)) + } + + opts := th.spanOpts + opts = append(opts, trace.WithAttributes(attrs...)) + + ctx, span := th.conf.tracer.Start(ctx, "redis.pipeline "+summary, opts...) + defer span.End() + + if err := hook(ctx, cmds); err != nil { + recordError(span, err) + return err + } + return nil + } +} + +func recordError(span trace.Span, err error) { + if err != redis.Nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } +} + +func formatDBConnString(network, addr string) string { + if network == "tcp" { + network = "redis" + } + return fmt.Sprintf("%s://%s", network, addr) +} + +func funcFileLine(pkg string) (string, string, int) { + const depth = 16 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + ff := runtime.CallersFrames(pcs[:n]) + + var fn, file string + var line int + for { + f, ok := ff.Next() + if !ok { + break + } + fn, file, line = f.Function, f.File, f.Line + if !strings.Contains(fn, pkg) { + break + } + } + + if ind := strings.LastIndexByte(fn, '/'); ind != -1 { + fn = fn[ind+1:] + } + + return fn, file, line +} diff --git a/vendor/github.com/redis/go-redis/v9/.gitignore b/vendor/github.com/redis/go-redis/v9/.gitignore new file mode 100644 index 00000000..64a7cb51 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/.gitignore @@ -0,0 +1,4 @@ +*.rdb +testdata/* +.idea/ +.DS_Store diff --git a/vendor/github.com/redis/go-redis/v9/.golangci.yml b/vendor/github.com/redis/go-redis/v9/.golangci.yml new file mode 100644 index 00000000..de514554 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/.golangci.yml @@ -0,0 +1,4 @@ +run: + concurrency: 8 + deadline: 5m + tests: false diff --git a/vendor/github.com/redis/go-redis/v9/.prettierrc.yml b/vendor/github.com/redis/go-redis/v9/.prettierrc.yml new file mode 100644 index 00000000..8b7f044a --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/.prettierrc.yml @@ -0,0 +1,4 @@ +semi: false +singleQuote: true +proseWrap: always +printWidth: 100 diff --git a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md new file mode 100644 index 00000000..297438a9 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md @@ -0,0 +1,124 @@ +## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29) + + +### Features + +* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602)) +* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe)) +* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af)) + + + +## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01) + + +### Bug Fixes + +* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241)) + + +### Features + +* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e)) +* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8)) +* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af)) + + + +## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02) + +### New Features + +- feat(scan): scan time.Time sets the default decoding (#2413) +- Add support for CLUSTER LINKS command (#2504) +- Add support for acl dryrun command (#2502) +- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500) +- Add support for LCS Command (#2480) +- Add support for BZMPOP (#2456) +- Adding support for ZMPOP command (#2408) +- Add support for LMPOP (#2440) +- feat: remove pool unused fields (#2438) +- Expiretime and PExpireTime (#2426) +- Implement `FUNCTION` group of commands (#2475) +- feat(zadd): add ZAddLT and ZAddGT (#2429) +- Add: Support for COMMAND LIST command (#2491) +- Add support for BLMPOP (#2442) +- feat: check pipeline.Do to prevent confusion with Exec (#2517) +- Function stats, function kill, fcall and fcall_ro (#2486) +- feat: Add support for CLUSTER SHARDS command (#2507) +- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498) + +### Fixed + +- fix: eval api cmd.SetFirstKeyPos (#2501) +- fix: limit the number of connections created (#2441) +- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479) +- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458) +- fix: group lag can be null (#2448) + +### Maintenance + +- Updating to the latest version of redis (#2508) +- Allowing for running tests on a port other than the fixed 6380 (#2466) +- redis 7.0.8 in tests (#2450) +- docs: Update redisotel example for v9 (#2425) +- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476) +- chore: add Chinese translation (#2436) +- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421) +- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420) +- chore(deps): bump actions/setup-go from 3 to 4 (#2495) +- docs: add instructions for the HSet api (#2503) +- docs: add reading lag field comment (#2451) +- test: update go mod before testing(go mod tidy) (#2423) +- docs: fix comment typo (#2505) +- test: remove testify (#2463) +- refactor: change ListElementCmd to KeyValuesCmd. (#2443) +- fix(appendArg): appendArg case special type (#2489) + +## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01) + +### Features + +* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65)) + +## v9 2023-01-30 + +### Breaking + +- Changed Pipelines to not be thread-safe any more. + +### Added + +- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was + contributed by @monkey92t who has done the majority of work in this release. +- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts + and deadlines. See + [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details. +- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example, + `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`. +- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See + [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html) +- Added `redis.HasErrorPrefix` to help working with errors. + +### Changed + +- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is + completely gone in v9. +- Reworked hook interface and added `DialHook`. +- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See + [example](example/otel) and + [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html). +- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making + an allocation. +- Renamed the option `MaxConnAge` to `ConnMaxLifetime`. +- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`. +- Removed connection reaper in favor of `MaxIdleConns`. +- Removed `WithContext` since `context.Context` can be passed directly as an arg. +- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and + it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to + reset commands for some reason. + +### Fixed + +- Improved and fixed pipeline retries. +- As usually, added support for more commands and fixed some bugs. diff --git a/vendor/github.com/redis/go-redis/v9/LICENSE b/vendor/github.com/redis/go-redis/v9/LICENSE new file mode 100644 index 00000000..f4967dbc --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/redis/go-redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile new file mode 100644 index 00000000..b59c3955 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/Makefile @@ -0,0 +1,41 @@ +GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) + +test: testdeps + set -e; for dir in $(GO_MOD_DIRS); do \ + echo "go test in $${dir}"; \ + (cd "$${dir}" && \ + go mod tidy -compat=1.18 && \ + go test && \ + go test ./... -short -race && \ + go test ./... -run=NONE -bench=. -benchmem && \ + env GOOS=linux GOARCH=386 go test && \ + go vet); \ + done + cd internal/customvet && go build . + go vet -vettool ./internal/customvet/customvet + +testdeps: testdata/redis/src/redis-server + +bench: testdeps + go test ./... -test.run=NONE -test.bench=. -test.benchmem + +.PHONY: all test testdeps bench + +testdata/redis: + mkdir -p $@ + wget -qO- https://download.redis.io/releases/redis-7.2-rc3.tar.gz | tar xvz --strip-components=1 -C $@ + +testdata/redis/src/redis-server: testdata/redis + cd $< && make all + +fmt: + gofmt -w -s ./ + goimports -w -local github.com/redis/go-redis ./ + +go_mod_tidy: + set -e; for dir in $(GO_MOD_DIRS); do \ + echo "go mod tidy in $${dir}"; \ + (cd "$${dir}" && \ + go get -u ./... && \ + go mod tidy -compat=1.18); \ + done diff --git a/vendor/github.com/redis/go-redis/v9/README.md b/vendor/github.com/redis/go-redis/v9/README.md new file mode 100644 index 00000000..3486e8e5 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/README.md @@ -0,0 +1,224 @@ +# Redis client for Go + +[![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc) +[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) +[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) + +> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can +> use it to monitor applications and set up automatic alerts to receive notifications via email, +> Slack, Telegram, and others. +> +> See [OpenTelemetry](example/otel) example which demonstrates how you can use Uptrace to monitor +> go-redis. + +## Documentation + +- [English](https://redis.uptrace.dev) +- [简体中文](https://redis.uptrace.dev/zh/) + +## Resources + +- [Discussions](https://github.com/redis/go-redis/discussions) +- [Chat](https://discord.gg/rWtp5Aj) +- [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9) +- [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples) + +## Ecosystem + +- [Redis Mock](https://github.com/go-redis/redismock) +- [Distributed Locks](https://github.com/bsm/redislock) +- [Redis Cache](https://github.com/go-redis/cache) +- [Rate limiting](https://github.com/go-redis/redis_rate) + +This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed +key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol. + +## Features + +- Redis 3 commands except QUIT, MONITOR, and SYNC. +- Automatic connection pooling with +- [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html). +- [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html). +- [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html). +- [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html). +- [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html). +- [Redis Ring](https://redis.uptrace.dev/guide/ring.html). +- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html). +- [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/) + +## Installation + +go-redis supports 2 last Go versions and requires a Go version with +[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go +module: + +```shell +go mod init github.com/my/repo +``` + +Then install go-redis/**v9**: + +```shell +go get github.com/redis/go-redis/v9 +``` + +## Quickstart + +```go +import ( + "context" + "github.com/redis/go-redis/v9" + "fmt" +) + +var ctx = context.Background() + +func ExampleClient() { + rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + }) + + err := rdb.Set(ctx, "key", "value", 0).Err() + if err != nil { + panic(err) + } + + val, err := rdb.Get(ctx, "key").Result() + if err != nil { + panic(err) + } + fmt.Println("key", val) + + val2, err := rdb.Get(ctx, "key2").Result() + if err == redis.Nil { + fmt.Println("key2 does not exist") + } else if err != nil { + panic(err) + } else { + fmt.Println("key2", val2) + } + // Output: key value + // key2 does not exist +} +``` + +The above can be modified to specify the version of the RESP protocol by adding the `protocol` option to the `Options` struct: + +```go + rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3 + }) + +``` + +### Connecting via a redis url + +go-redis also supports connecting via the [redis uri specification](https://github.com/redis/redis-specifications/tree/master/uri/redis.txt). The example below demonstrates how the connection can easily be configured using a string, adhering to this specification. + +```go +import ( + "context" + "github.com/redis/go-redis/v9" + "fmt" +) + +var ctx = context.Background() + +func ExampleClient() { + url := "redis://localhost:6379?password=hello&protocol=3" + opts, err := redis.ParseURL(url) + if err != nil { + panic(err) + } + rdb := redis.NewClient(opts) +``` + +## Look and feel + +Some corner cases: + +```go +// SET key value EX 10 NX +set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result() + +// SET key value keepttl NX +set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result() + +// SORT list LIMIT 0 2 ASC +vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + +// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 +vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, +}).Result() + +// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM +vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{ + Keys: []string{"zset1", "zset2"}, + Weights: []int64{2, 3} +}).Result() + +// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" +vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() + +// custom command +res, err := rdb.Do(ctx, "set", "key", "value").Result() +``` + +## Run the test + +go-redis will start a redis-server and run the test cases. + +The paths of redis-server bin file and redis config file are defined in `main_test.go`: + +```go +var ( + redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) + redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) +) +``` + +For local testing, you can change the variables to refer to your local files, or create a soft link +to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: + +```shell +ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src +cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ +``` + +Lastly, run: + +```shell +go test +``` + +Another option is to run your specific tests with an already running redis. The example below, tests against a redis running on port 9999.: + +```shell +REDIS_PORT=9999 go test +``` + +## See also + +- [Golang ORM](https://bun.uptrace.dev) for PostgreSQL, MySQL, MSSQL, and SQLite +- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/) +- [Golang HTTP router](https://bunrouter.uptrace.dev/) +- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse) + +## Contributors + +Thanks to all the people who already contributed! + + + + diff --git a/vendor/github.com/redis/go-redis/v9/RELEASING.md b/vendor/github.com/redis/go-redis/v9/RELEASING.md new file mode 100644 index 00000000..1115db4e --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/RELEASING.md @@ -0,0 +1,15 @@ +# Releasing + +1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub: + +```shell +TAG=v1.0.0 ./scripts/release.sh +``` + +2. Open a pull request and wait for the build to finish. + +3. Merge the pull request and run `tag.sh` to create tags for packages: + +```shell +TAG=v1.0.0 ./scripts/tag.sh +``` diff --git a/vendor/github.com/redis/go-redis/v9/cluster.go b/vendor/github.com/redis/go-redis/v9/cluster.go new file mode 100644 index 00000000..941838dd --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/cluster.go @@ -0,0 +1,1901 @@ +package redis + +import ( + "context" + "crypto/tls" + "fmt" + "math" + "net" + "net/url" + "runtime" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/hashtag" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/rand" +) + +var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. + ClientName string + + // NewClient creates a cluster node client with provided name and options. + NewClient func(opt *Options) *Client + + // The maximum number of retries before giving up. Command is retried + // on network errors and MOVED/ASK redirects. + // Default is 3 retries. + MaxRedirects int + + // Enables read-only commands on slave nodes. + ReadOnly bool + // Allows routing read-only commands to the closest master or slave node. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // It automatically enables ReadOnly. + RouteRandomly bool + + // Optional function that returns cluster slots information. + // It is useful to manually create cluster of standalone Redis servers + // and load-balance read/write operations between master and slaves. + // It can use service like ZooKeeper to maintain configuration information + // and Cluster.ReloadState to manually trigger state reloading. + ClusterSlots func(context.Context) ([]ClusterSlot, error) + + // Following options are copied from Options struct. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + + OnConnect func(ctx context.Context, cn *Conn) error + + Protocol int + Username string + Password string + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + ContextTimeoutEnabled bool + + PoolFIFO bool + PoolSize int // applies per cluster node and not for the whole cluster + PoolTimeout time.Duration + MinIdleConns int + MaxIdleConns int + ConnMaxIdleTime time.Duration + ConnMaxLifetime time.Duration + + TLSConfig *tls.Config +} + +func (opt *ClusterOptions) init() { + if opt.MaxRedirects == -1 { + opt.MaxRedirects = 0 + } else if opt.MaxRedirects == 0 { + opt.MaxRedirects = 3 + } + + if opt.RouteByLatency || opt.RouteRandomly { + opt.ReadOnly = true + } + + if opt.PoolSize == 0 { + opt.PoolSize = 5 * runtime.GOMAXPROCS(0) + } + + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + + if opt.MaxRetries == 0 { + opt.MaxRetries = -1 + } + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } + + if opt.NewClient == nil { + opt.NewClient = NewClient + } +} + +// ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis. +// The URL must be in the form: +// +// redis://:@: +// or +// rediss://:@: +// +// To add additional addresses, specify the query parameter, "addr" one or more times. e.g: +// +// redis://:@:?addr=:&addr=: +// or +// rediss://:@:?addr=:&addr=: +// +// Most Option fields can be set using query parameters, with the following restrictions: +// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries +// - only scalar type fields are supported (bool, int, time.Duration) +// - for time.Duration fields, values must be a valid input for time.ParseDuration(); +// additionally a plain integer as value (i.e. without unit) is intepreted as seconds +// - to disable a duration field, use value less than or equal to 0; to use the default +// value, leave the value blank or remove the parameter +// - only the last value is interpreted if a parameter is given multiple times +// - fields "network", "addr", "username" and "password" can only be set using other +// URL attributes (scheme, host, userinfo, resp.), query paremeters using these +// names will be treated as unknown parameters +// - unknown parameter names will result in an error +// +// Example: +// +// redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791 +// is equivalent to: +// &ClusterOptions{ +// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"] +// DialTimeout: 3 * time.Second, // no time unit = seconds +// ReadTimeout: 6 * time.Second, +// } +func ParseClusterURL(redisURL string) (*ClusterOptions, error) { + o := &ClusterOptions{} + + u, err := url.Parse(redisURL) + if err != nil { + return nil, err + } + + // add base URL to the array of addresses + // more addresses may be added through the URL params + h, p := getHostPortWithDefaults(u) + o.Addrs = append(o.Addrs, net.JoinHostPort(h, p)) + + // setup username, password, and other configurations + o, err = setupClusterConn(u, h, o) + if err != nil { + return nil, err + } + + return o, nil +} + +// setupClusterConn gets the username and password from the URL and the query parameters. +func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) { + switch u.Scheme { + case "rediss": + o.TLSConfig = &tls.Config{ServerName: host} + fallthrough + case "redis": + o.Username, o.Password = getUserPassword(u) + default: + return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) + } + + // retrieve the configuration from the query parameters + o, err := setupClusterQueryParams(u, o) + if err != nil { + return nil, err + } + + return o, nil +} + +// setupClusterQueryParams converts query parameters in u to option value in o. +func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) { + q := queryOptions{q: u.Query()} + + o.Protocol = q.int("protocol") + o.ClientName = q.string("client_name") + o.MaxRedirects = q.int("max_redirects") + o.ReadOnly = q.bool("read_only") + o.RouteByLatency = q.bool("route_by_latency") + o.RouteRandomly = q.bool("route_randomly") + o.MaxRetries = q.int("max_retries") + o.MinRetryBackoff = q.duration("min_retry_backoff") + o.MaxRetryBackoff = q.duration("max_retry_backoff") + o.DialTimeout = q.duration("dial_timeout") + o.ReadTimeout = q.duration("read_timeout") + o.WriteTimeout = q.duration("write_timeout") + o.PoolFIFO = q.bool("pool_fifo") + o.PoolSize = q.int("pool_size") + o.MinIdleConns = q.int("min_idle_conns") + o.PoolTimeout = q.duration("pool_timeout") + o.ConnMaxLifetime = q.duration("conn_max_lifetime") + o.ConnMaxIdleTime = q.duration("conn_max_idle_time") + + if q.err != nil { + return nil, q.err + } + + // addr can be specified as many times as needed + addrs := q.strings("addr") + for _, addr := range addrs { + h, p, err := net.SplitHostPort(addr) + if err != nil || h == "" || p == "" { + return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr) + } + + o.Addrs = append(o.Addrs, net.JoinHostPort(h, p)) + } + + // any parameters left? + if r := q.remaining(); len(r) > 0 { + return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) + } + + return o, nil +} + +func (opt *ClusterOptions) clientOptions() *Options { + return &Options{ + ClientName: opt.ClientName, + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + MinIdleConns: opt.MinIdleConns, + MaxIdleConns: opt.MaxIdleConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, + + TLSConfig: opt.TLSConfig, + // If ClusterSlots is populated, then we probably have an artificial + // cluster whose nodes are not in clustering mode (otherwise there isn't + // much use for ClusterSlots config). This means we cannot execute the + // READONLY command against that node -- setting readOnly to false in such + // situations in the options below will prevent that from happening. + readOnly: opt.ReadOnly && opt.ClusterSlots == nil, + } +} + +//------------------------------------------------------------------------------ + +type clusterNode struct { + Client *Client + + latency uint32 // atomic + generation uint32 // atomic + failing uint32 // atomic +} + +func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { + opt := clOpt.clientOptions() + opt.Addr = addr + node := clusterNode{ + Client: clOpt.NewClient(opt), + } + + node.latency = math.MaxUint32 + if clOpt.RouteByLatency { + go node.updateLatency() + } + + return &node +} + +func (n *clusterNode) String() string { + return n.Client.String() +} + +func (n *clusterNode) Close() error { + return n.Client.Close() +} + +func (n *clusterNode) updateLatency() { + const numProbe = 10 + var dur uint64 + + successes := 0 + for i := 0; i < numProbe; i++ { + time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond) + + start := time.Now() + err := n.Client.Ping(context.TODO()).Err() + if err == nil { + dur += uint64(time.Since(start) / time.Microsecond) + successes++ + } + } + + var latency float64 + if successes == 0 { + // If none of the pings worked, set latency to some arbitrarily high value so this node gets + // least priority. + latency = float64((1 * time.Minute) / time.Microsecond) + } else { + latency = float64(dur) / float64(successes) + } + atomic.StoreUint32(&n.latency, uint32(latency+0.5)) +} + +func (n *clusterNode) Latency() time.Duration { + latency := atomic.LoadUint32(&n.latency) + return time.Duration(latency) * time.Microsecond +} + +func (n *clusterNode) MarkAsFailing() { + atomic.StoreUint32(&n.failing, uint32(time.Now().Unix())) +} + +func (n *clusterNode) Failing() bool { + const timeout = 15 // 15 seconds + + failing := atomic.LoadUint32(&n.failing) + if failing == 0 { + return false + } + if time.Now().Unix()-int64(failing) < timeout { + return true + } + atomic.StoreUint32(&n.failing, 0) + return false +} + +func (n *clusterNode) Generation() uint32 { + return atomic.LoadUint32(&n.generation) +} + +func (n *clusterNode) SetGeneration(gen uint32) { + for { + v := atomic.LoadUint32(&n.generation) + if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { + break + } + } +} + +//------------------------------------------------------------------------------ + +type clusterNodes struct { + opt *ClusterOptions + + mu sync.RWMutex + addrs []string + nodes map[string]*clusterNode + activeAddrs []string + closed bool + onNewNode []func(rdb *Client) + + _generation uint32 // atomic +} + +func newClusterNodes(opt *ClusterOptions) *clusterNodes { + return &clusterNodes{ + opt: opt, + + addrs: opt.Addrs, + nodes: make(map[string]*clusterNode), + } +} + +func (c *clusterNodes) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, node := range c.nodes { + if err := node.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + c.nodes = nil + c.activeAddrs = nil + + return firstErr +} + +func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) { + c.mu.Lock() + c.onNewNode = append(c.onNewNode, fn) + c.mu.Unlock() +} + +func (c *clusterNodes) Addrs() ([]string, error) { + var addrs []string + + c.mu.RLock() + closed := c.closed //nolint:ifshort + if !closed { + if len(c.activeAddrs) > 0 { + addrs = c.activeAddrs + } else { + addrs = c.addrs + } + } + c.mu.RUnlock() + + if closed { + return nil, pool.ErrClosed + } + if len(addrs) == 0 { + return nil, errClusterNoNodes + } + return addrs, nil +} + +func (c *clusterNodes) NextGeneration() uint32 { + return atomic.AddUint32(&c._generation, 1) +} + +// GC removes unused nodes. +func (c *clusterNodes) GC(generation uint32) { + //nolint:prealloc + var collected []*clusterNode + + c.mu.Lock() + + c.activeAddrs = c.activeAddrs[:0] + for addr, node := range c.nodes { + if node.Generation() >= generation { + c.activeAddrs = append(c.activeAddrs, addr) + if c.opt.RouteByLatency { + go node.updateLatency() + } + continue + } + + delete(c.nodes, addr) + collected = append(collected, node) + } + + c.mu.Unlock() + + for _, node := range collected { + _ = node.Client.Close() + } +} + +func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { + node, err := c.get(addr) + if err != nil { + return nil, err + } + if node != nil { + return node, nil + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + node, ok := c.nodes[addr] + if ok { + return node, nil + } + + node = newClusterNode(c.opt, addr) + for _, fn := range c.onNewNode { + fn(node.Client) + } + + c.addrs = appendIfNotExists(c.addrs, addr) + c.nodes[addr] = node + + return node, nil +} + +func (c *clusterNodes) get(addr string) (*clusterNode, error) { + var node *clusterNode + var err error + c.mu.RLock() + if c.closed { + err = pool.ErrClosed + } else { + node = c.nodes[addr] + } + c.mu.RUnlock() + return node, err +} + +func (c *clusterNodes) All() ([]*clusterNode, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.closed { + return nil, pool.ErrClosed + } + + cp := make([]*clusterNode, 0, len(c.nodes)) + for _, node := range c.nodes { + cp = append(cp, node) + } + return cp, nil +} + +func (c *clusterNodes) Random() (*clusterNode, error) { + addrs, err := c.Addrs() + if err != nil { + return nil, err + } + + n := rand.Intn(len(addrs)) + return c.GetOrCreate(addrs[n]) +} + +//------------------------------------------------------------------------------ + +type clusterSlot struct { + start, end int + nodes []*clusterNode +} + +type clusterSlotSlice []*clusterSlot + +func (p clusterSlotSlice) Len() int { + return len(p) +} + +func (p clusterSlotSlice) Less(i, j int) bool { + return p[i].start < p[j].start +} + +func (p clusterSlotSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type clusterState struct { + nodes *clusterNodes + Masters []*clusterNode + Slaves []*clusterNode + + slots []*clusterSlot + + generation uint32 + createdAt time.Time +} + +func newClusterState( + nodes *clusterNodes, slots []ClusterSlot, origin string, +) (*clusterState, error) { + c := clusterState{ + nodes: nodes, + + slots: make([]*clusterSlot, 0, len(slots)), + + generation: nodes.NextGeneration(), + createdAt: time.Now(), + } + + originHost, _, _ := net.SplitHostPort(origin) + isLoopbackOrigin := isLoopback(originHost) + + for _, slot := range slots { + var nodes []*clusterNode + for i, slotNode := range slot.Nodes { + addr := slotNode.Addr + if !isLoopbackOrigin { + addr = replaceLoopbackHost(addr, originHost) + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return nil, err + } + + node.SetGeneration(c.generation) + nodes = append(nodes, node) + + if i == 0 { + c.Masters = appendUniqueNode(c.Masters, node) + } else { + c.Slaves = appendUniqueNode(c.Slaves, node) + } + } + + c.slots = append(c.slots, &clusterSlot{ + start: slot.Start, + end: slot.End, + nodes: nodes, + }) + } + + sort.Sort(clusterSlotSlice(c.slots)) + + time.AfterFunc(time.Minute, func() { + nodes.GC(c.generation) + }) + + return &c, nil +} + +func replaceLoopbackHost(nodeAddr, originHost string) string { + nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) + if err != nil { + return nodeAddr + } + + nodeIP := net.ParseIP(nodeHost) + if nodeIP == nil { + return nodeAddr + } + + if !nodeIP.IsLoopback() { + return nodeAddr + } + + // Use origin host which is not loopback and node port. + return net.JoinHostPort(originHost, nodePort) +} + +func isLoopback(host string) bool { + ip := net.ParseIP(host) + if ip == nil { + return true + } + return ip.IsLoopback() +} + +func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + switch len(nodes) { + case 0: + return c.nodes.Random() + case 1: + return nodes[0], nil + case 2: + if slave := nodes[1]; !slave.Failing() { + return slave, nil + } + return nodes[0], nil + default: + var slave *clusterNode + for i := 0; i < 10; i++ { + n := rand.Intn(len(nodes)-1) + 1 + slave = nodes[n] + if !slave.Failing() { + return slave, nil + } + } + + // All slaves are loading - use master. + return nodes[0], nil + } +} + +func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + var node *clusterNode + for _, n := range nodes { + if n.Failing() { + continue + } + if node == nil || n.Latency() < node.Latency() { + node = n + } + } + if node != nil { + return node, nil + } + + // If all nodes are failing - return random node + return c.nodes.Random() +} + +func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + if len(nodes) == 1 { + return nodes[0], nil + } + randomNodes := rand.Perm(len(nodes)) + for _, idx := range randomNodes { + if node := nodes[idx]; !node.Failing() { + return node, nil + } + } + return nodes[randomNodes[0]], nil +} + +func (c *clusterState) slotNodes(slot int) []*clusterNode { + i := sort.Search(len(c.slots), func(i int) bool { + return c.slots[i].end >= slot + }) + if i >= len(c.slots) { + return nil + } + x := c.slots[i] + if slot >= x.start && slot <= x.end { + return x.nodes + } + return nil +} + +//------------------------------------------------------------------------------ + +type clusterStateHolder struct { + load func(ctx context.Context) (*clusterState, error) + + state atomic.Value + reloading uint32 // atomic +} + +func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder { + return &clusterStateHolder{ + load: fn, + } +} + +func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) { + state, err := c.load(ctx) + if err != nil { + return nil, err + } + c.state.Store(state) + return state, nil +} + +func (c *clusterStateHolder) LazyReload() { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go func() { + defer atomic.StoreUint32(&c.reloading, 0) + + _, err := c.Reload(context.Background()) + if err != nil { + return + } + time.Sleep(200 * time.Millisecond) + }() +} + +func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) { + v := c.state.Load() + if v == nil { + return c.Reload(ctx) + } + + state := v.(*clusterState) + if time.Since(state.createdAt) > 10*time.Second { + c.LazyReload() + } + return state, nil +} + +func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) { + state, err := c.Reload(ctx) + if err == nil { + return state, nil + } + return c.Get(ctx) +} + +//------------------------------------------------------------------------------ + +// ClusterClient is a Redis Cluster client representing a pool of zero +// or more underlying connections. It's safe for concurrent use by +// multiple goroutines. +type ClusterClient struct { + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder + cmdsInfoCache *cmdsInfoCache + cmdable + hooksMixin +} + +// NewClusterClient returns a Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + opt.init() + + c := &ClusterClient{ + opt: opt, + nodes: newClusterNodes(opt), + } + + c.state = newClusterStateHolder(c.loadState) + c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) + c.cmdable = c.Process + + c.initHooks(hooks{ + dial: nil, + process: c.process, + pipeline: c.processPipeline, + txPipeline: c.processTxPipeline, + }) + + return c +} + +// Options returns read-only Options that were used to create the client. +func (c *ClusterClient) Options() *ClusterOptions { + return c.opt +} + +// ReloadState reloads cluster state. If available it calls ClusterSlots func +// to get cluster slots information. +func (c *ClusterClient) ReloadState(ctx context.Context) { + c.state.LazyReload() +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a ClusterClient, as the ClusterClient is meant +// to be long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + return c.nodes.Close() +} + +// Do create a Cmd from the args and processes the cmd. +func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { + err := c.processHook(ctx, cmd) + cmd.SetErr(err) + return err +} + +func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { + cmdInfo := c.cmdInfo(ctx, cmd.Name()) + slot := c.cmdSlot(ctx, cmd) + var node *clusterNode + var ask bool + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + if node == nil { + var err error + node, err = c.cmdNode(ctx, cmdInfo, slot) + if err != nil { + return err + } + } + + if ask { + ask = false + + pipe := node.Client.Pipeline() + _ = pipe.Process(ctx, NewCmd(ctx, "asking")) + _ = pipe.Process(ctx, cmd) + _, lastErr = pipe.Exec(ctx) + } else { + lastErr = node.Client.Process(ctx, cmd) + } + + // If there is no error - we are done. + if lastErr == nil { + return nil + } + if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed { + if isReadOnly { + c.state.LazyReload() + } + node = nil + continue + } + + // If slave is loading - pick another node. + if c.opt.ReadOnly && isLoadingError(lastErr) { + node.MarkAsFailing() + node = nil + continue + } + + var moved bool + var addr string + moved, ask, addr = isMovedError(lastErr) + if moved || ask { + c.state.LazyReload() + + var err error + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if shouldRetry(lastErr, cmd.readTimeout() == nil) { + // First retry the same node. + if attempt == 0 { + continue + } + + // Second try another node. + node.MarkAsFailing() + node = nil + continue + } + + return lastErr + } + return lastErr +} + +func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) { + c.nodes.OnNewNode(fn) +} + +// ForEachMaster concurrently calls the fn on each master node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachMaster( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + for _, master := range state.Masters { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(master) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachSlave concurrently calls the fn on each slave node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachSlave( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + for _, slave := range state.Slaves { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(slave) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachShard concurrently calls the fn on each known node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachShard( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + worker := func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + } + + for _, node := range state.Masters { + wg.Add(1) + go worker(node) + } + for _, node := range state.Slaves { + wg.Add(1) + go worker(node) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// PoolStats returns accumulated connection pool stats. +func (c *ClusterClient) PoolStats() *PoolStats { + var acc PoolStats + + state, _ := c.state.Get(context.TODO()) + if state == nil { + return &acc + } + + for _, node := range state.Masters { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + for _, node := range state.Slaves { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + return &acc +} + +func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { + if c.opt.ClusterSlots != nil { + slots, err := c.opt.ClusterSlots(ctx) + if err != nil { + return nil, err + } + return newClusterState(c.nodes, slots, "") + } + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + + for _, idx := range rand.Perm(len(addrs)) { + addr := addrs[idx] + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + slots, err := node.Client.ClusterSlots(ctx).Result() + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + return newClusterState(c.nodes, slots, node.Client.opt.Addr) + } + + /* + * No node is connectable. It's possible that all nodes' IP has changed. + * Clear activeAddrs to let client be able to re-connect using the initial + * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]), + * which might have chance to resolve domain name and get updated IP address. + */ + c.nodes.mu.Lock() + c.nodes.activeAddrs = nil + c.nodes.mu.Unlock() + + return nil, firstErr +} + +func (c *ClusterClient) Pipeline() Pipeliner { + pipe := Pipeline{ + exec: pipelineExecer(c.processPipelineHook), + } + pipe.init() + return &pipe +} + +func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { + cmdsMap := newCmdsMap() + + if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil { + setCmdsErr(cmds, err) + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err + } + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap.m { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + c.processPipelineNode(ctx, node, cmds, failedCmds) + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error { + state, err := c.state.Get(ctx) + if err != nil { + return err + } + + if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) { + for _, cmd := range cmds { + slot := c.cmdSlot(ctx, cmd) + node, err := c.slotReadOnlyNode(state, slot) + if err != nil { + return err + } + cmdsMap.Add(node, cmd) + } + return nil + } + + for _, cmd := range cmds { + slot := c.cmdSlot(ctx, cmd) + node, err := state.slotMasterNode(slot) + if err != nil { + return err + } + cmdsMap.Add(node, cmd) + } + return nil +} + +func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool { + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(ctx, cmd.Name()) + if cmdInfo == nil || !cmdInfo.ReadOnly { + return false + } + } + return true +} + +func (c *ClusterClient) processPipelineNode( + ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) { + _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + cn, err := node.Client.getConn(ctx) + if err != nil { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + setCmdsErr(cmds, err) + return err + } + + var processErr error + defer func() { + node.Client.releaseConn(ctx, cn, processErr) + }() + processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds) + + return processErr + }) +} + +func (c *ClusterClient) processPipelineNodeConn( + ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, +) error { + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }); err != nil { + if shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + } + setCmdsErr(cmds, err) + return err + } + + return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { + return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) + }) +} + +func (c *ClusterClient) pipelineReadCmds( + ctx context.Context, + node *clusterNode, + rd *proto.Reader, + cmds []Cmder, + failedCmds *cmdsMap, +) error { + for i, cmd := range cmds { + err := cmd.readReply(rd) + cmd.SetErr(err) + + if err == nil { + continue + } + + if c.checkMovedErr(ctx, cmd, err, failedCmds) { + continue + } + + if c.opt.ReadOnly { + node.MarkAsFailing() + } + + if !isRedisError(err) { + if shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + } + setCmdsErr(cmds[i+1:], err) + return err + } + } + + if err := cmds[0].Err(); err != nil && shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + return err + } + + return nil +} + +func (c *ClusterClient) checkMovedErr( + ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap, +) bool { + moved, ask, addr := isMovedError(err) + if !moved && !ask { + return false + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + if moved { + c.state.LazyReload() + failedCmds.Add(node, cmd) + return true + } + + if ask { + failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) + return true + } + + panic("not reached") +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *ClusterClient) TxPipeline() Pipeliner { + pipe := Pipeline{ + exec: func(ctx context.Context, cmds []Cmder) error { + cmds = wrapMultiExec(ctx, cmds) + return c.processTxPipelineHook(ctx, cmds) + }, + } + pipe.init() + return &pipe +} + +func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { + // Trim multi .. exec. + cmds = cmds[1 : len(cmds)-1] + + state, err := c.state.Get(ctx) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + cmdsMap := c.mapCmdsBySlot(ctx, cmds) + for slot, cmds := range cmdsMap { + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + continue + } + + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err + } + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + c.processTxPipelineNode(ctx, node, cmds, failedCmds) + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds.m + } + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder { + cmdsMap := make(map[int][]Cmder) + for _, cmd := range cmds { + slot := c.cmdSlot(ctx, cmd) + cmdsMap[slot] = append(cmdsMap[slot], cmd) + } + return cmdsMap +} + +func (c *ClusterClient) processTxPipelineNode( + ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) { + cmds = wrapMultiExec(ctx, cmds) + _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + cn, err := node.Client.getConn(ctx) + if err != nil { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + setCmdsErr(cmds, err) + return err + } + + var processErr error + defer func() { + node.Client.releaseConn(ctx, cn, processErr) + }() + processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds) + + return processErr + }) +} + +func (c *ClusterClient) processTxPipelineNodeConn( + ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, +) error { + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }); err != nil { + if shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + } + setCmdsErr(cmds, err) + return err + } + + return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { + statusCmd := cmds[0].(*StatusCmd) + // Trim multi and exec. + trimmedCmds := cmds[1 : len(cmds)-1] + + if err := c.txPipelineReadQueued( + ctx, rd, statusCmd, trimmedCmds, failedCmds, + ); err != nil { + setCmdsErr(cmds, err) + + moved, ask, addr := isMovedError(err) + if moved || ask { + return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds) + } + + return err + } + + return pipelineReadCmds(rd, trimmedCmds) + }) +} + +func (c *ClusterClient) txPipelineReadQueued( + ctx context.Context, + rd *proto.Reader, + statusCmd *StatusCmd, + cmds []Cmder, + failedCmds *cmdsMap, +) error { + // Parse queued replies. + if err := statusCmd.readReply(rd); err != nil { + return err + } + + for _, cmd := range cmds { + err := statusCmd.readReply(rd) + if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) { + continue + } + return err + } + + // Parse number of replies. + line, err := rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + if line[0] != proto.RespArray { + return fmt.Errorf("redis: expected '*', but got line %q", line) + } + + return nil +} + +func (c *ClusterClient) cmdsMoved( + ctx context.Context, cmds []Cmder, + moved, ask bool, + addr string, + failedCmds *cmdsMap, +) error { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + + if moved { + c.state.LazyReload() + for _, cmd := range cmds { + failedCmds.Add(node, cmd) + } + return nil + } + + if ask { + for _, cmd := range cmds { + failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) + } + return nil + } + + return nil +} + +func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + slot := hashtag.Slot(keys[0]) + for _, key := range keys[1:] { + if hashtag.Slot(key) != slot { + err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") + return err + } + } + + node, err := c.slotMasterNode(ctx, slot) + if err != nil { + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + err = node.Client.Watch(ctx, fn, keys...) + if err == nil { + break + } + + moved, ask, addr := isMovedError(err) + if moved || ask { + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed { + if isReadOnly { + c.state.LazyReload() + } + node, err = c.slotMasterNode(ctx, slot) + if err != nil { + return err + } + continue + } + + if shouldRetry(err, true) { + continue + } + + return err + } + + return err +} + +func (c *ClusterClient) pubSub() *PubSub { + var node *clusterNode + pubsub := &PubSub{ + opt: c.opt.clientOptions(), + + newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + if node != nil { + panic("node != nil") + } + + var err error + if len(channels) > 0 { + slot := hashtag.Slot(channels[0]) + node, err = c.slotMasterNode(ctx, slot) + } else { + node, err = c.nodes.Random() + } + if err != nil { + return nil, err + } + + cn, err := node.Client.newConn(context.TODO()) + if err != nil { + node = nil + + return nil, err + } + + return cn, nil + }, + closeConn: func(cn *pool.Conn) error { + err := node.Client.connPool.CloseConn(cn) + node = nil + return err + }, + } + pubsub.init() + + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(ctx, channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(ctx, channels...) + } + return pubsub +} + +// SSubscribe Subscribes the client to the specified shard channels. +func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.SSubscribe(ctx, channels...) + } + return pubsub +} + +func (c *ClusterClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { + // Try 3 random nodes. + const nodeLimit = 3 + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + + perm := rand.Perm(len(addrs)) + if len(perm) > nodeLimit { + perm = perm[:nodeLimit] + } + + for _, idx := range perm { + addr := addrs[idx] + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + info, err := node.Client.Command(ctx).Result() + if err == nil { + return info, nil + } + if firstErr == nil { + firstErr = err + } + } + + if firstErr == nil { + panic("not reached") + } + return nil, firstErr +} + +func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get(ctx) + if err != nil { + internal.Logger.Printf(context.TODO(), "getting command info: %s", err) + return nil + } + + info := cmdsInfo[name] + if info == nil { + internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name) + } + return info +} + +func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int { + args := cmd.Args() + if args[0] == "cluster" && args[1] == "getkeysinslot" { + return args[2].(int) + } + + cmdInfo := c.cmdInfo(ctx, cmd.Name()) + return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) +} + +func cmdSlot(cmd Cmder, pos int) int { + if pos == 0 { + return hashtag.RandomSlot() + } + firstKey := cmd.stringArg(pos) + return hashtag.Slot(firstKey) +} + +func (c *ClusterClient) cmdNode( + ctx context.Context, + cmdInfo *CommandInfo, + slot int, +) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + + if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly { + return c.slotReadOnlyNode(state, slot) + } + return state.slotMasterNode(slot) +} + +func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { + if c.opt.RouteByLatency { + return state.slotClosestNode(slot) + } + if c.opt.RouteRandomly { + return state.slotRandomNode(slot) + } + return state.slotSlaveNode(slot) +} + +func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + return state.slotMasterNode(slot) +} + +// SlaveForKey gets a client for a replica node to run any command on it. +// This is especially useful if we want to run a particular lua script which has +// only read only commands on the replica. +// This is because other redis commands generally have a flag that points that +// they are read only and automatically run on the replica nodes +// if ClusterOptions.ReadOnly flag is set to true. +func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + slot := hashtag.Slot(key) + node, err := c.slotReadOnlyNode(state, slot) + if err != nil { + return nil, err + } + return node.Client, err +} + +// MasterForKey return a client to the master node for a particular key. +func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) { + slot := hashtag.Slot(key) + node, err := c.slotMasterNode(ctx, slot) + if err != nil { + return nil, err + } + return node.Client, err +} + +func (c *ClusterClient) context(ctx context.Context) context.Context { + if c.opt.ContextTimeoutEnabled { + return ctx + } + return context.Background() +} + +func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { + for _, n := range nodes { + if n == node { + return nodes + } + } + return append(nodes, node) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} + +//------------------------------------------------------------------------------ + +type cmdsMap struct { + mu sync.Mutex + m map[*clusterNode][]Cmder +} + +func newCmdsMap() *cmdsMap { + return &cmdsMap{ + m: make(map[*clusterNode][]Cmder), + } +} + +func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) { + m.mu.Lock() + m.m[node] = append(m.m[node], cmds...) + m.mu.Unlock() +} diff --git a/vendor/github.com/redis/go-redis/v9/cluster_commands.go b/vendor/github.com/redis/go-redis/v9/cluster_commands.go new file mode 100644 index 00000000..b13f8e7e --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/cluster_commands.go @@ -0,0 +1,109 @@ +package redis + +import ( + "context" + "sync" + "sync/atomic" +) + +func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "dbsize") + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { + var size int64 + err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { + n, err := master.DBSize(ctx).Result() + if err != nil { + return err + } + atomic.AddInt64(&size, n) + return nil + }) + if err != nil { + cmd.SetErr(err) + } else { + cmd.val = size + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd { + cmd := NewStringCmd(ctx, "script", "load", script) + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { + var mu sync.Mutex + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + val, err := shard.ScriptLoad(ctx, script).Result() + if err != nil { + return err + } + + mu.Lock() + if cmd.Val() == "" { + cmd.val = val + } + mu.Unlock() + + return nil + }) + if err != nil { + cmd.SetErr(err) + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "script", "flush") + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + return shard.ScriptFlush(ctx).Err() + }) + if err != nil { + cmd.SetErr(err) + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd { + args := make([]interface{}, 2+len(hashes)) + args[0] = "script" + args[1] = "exists" + for i, hash := range hashes { + args[2+i] = hash + } + cmd := NewBoolSliceCmd(ctx, args...) + + result := make([]bool, len(hashes)) + for i := range result { + result[i] = true + } + + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { + var mu sync.Mutex + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + val, err := shard.ScriptExists(ctx, hashes...).Result() + if err != nil { + return err + } + + mu.Lock() + for i, v := range val { + result[i] = result[i] && v + } + mu.Unlock() + + return nil + }) + if err != nil { + cmd.SetErr(err) + } else { + cmd.val = result + } + return nil + }) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go new file mode 100644 index 00000000..1bd4d5db --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/command.go @@ -0,0 +1,5235 @@ +package redis + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/hscan" + "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/util" +) + +type Cmder interface { + Name() string + FullName() string + Args() []interface{} + String() string + stringArg(int) string + firstKeyPos() int8 + SetFirstKeyPos(int8) + + readTimeout() *time.Duration + readReply(rd *proto.Reader) error + + SetErr(error) + Err() error +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.SetErr(e) + } + } +} + +func cmdsFirstErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmds(wr *proto.Writer, cmds []Cmder) error { + for _, cmd := range cmds { + if err := writeCmd(wr, cmd); err != nil { + return err + } + } + return nil +} + +func writeCmd(wr *proto.Writer, cmd Cmder) error { + return wr.WriteArgs(cmd.Args()) +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + if pos := cmd.firstKeyPos(); pos != 0 { + return int(pos) + } + + switch cmd.Name() { + case "eval", "evalsha", "eval_ro", "evalsha_ro": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + case "memory": + // https://github.com/redis/redis/issues/7493 + if cmd.stringArg(1) == "usage" { + return 2 + } + } + + if info != nil { + return int(info.FirstKeyPos) + } + return 1 +} + +func cmdString(cmd Cmder, val interface{}) string { + b := make([]byte, 0, 64) + + for i, arg := range cmd.Args() { + if i > 0 { + b = append(b, ' ') + } + b = internal.AppendArg(b, arg) + } + + if err := cmd.Err(); err != nil { + b = append(b, ": "...) + b = append(b, err.Error()...) + } else if val != nil { + b = append(b, ": "...) + b = internal.AppendArg(b, val) + } + + return util.BytesToString(b) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + ctx context.Context + args []interface{} + err error + keyPos int8 + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Name() string { + if len(cmd.args) == 0 { + return "" + } + // Cmd name must be lower cased. + return internal.ToLower(cmd.stringArg(0)) +} + +func (cmd *baseCmd) FullName() string { + switch name := cmd.Name(); name { + case "cluster", "command": + if len(cmd.args) == 1 { + return name + } + if s2, ok := cmd.args[1].(string); ok { + return name + " " + s2 + } + return name + default: + return name + } +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd.args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd.args) { + return "" + } + arg := cmd.args[pos] + switch v := arg.(type) { + case string: + return v + default: + // TODO: consider using appendArg + return fmt.Sprint(v) + } +} + +func (cmd *baseCmd) firstKeyPos() int8 { + return cmd.keyPos +} + +func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { + cmd.keyPos = keyPos +} + +func (cmd *baseCmd) SetErr(e error) { + cmd.err = e +} + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(ctx context.Context, args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) SetVal(val interface{}) { + cmd.val = val +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) Text() (string, error) { + if cmd.err != nil { + return "", cmd.err + } + return toString(cmd.val) +} + +func toString(val interface{}) (string, error) { + switch val := val.(type) { + case string: + return val, nil + default: + err := fmt.Errorf("redis: unexpected type=%T for String", val) + return "", err + } +} + +func (cmd *Cmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return int(val), nil + case string: + return strconv.Atoi(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int", val) + return 0, err + } +} + +func (cmd *Cmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toInt64(cmd.val) +} + +func toInt64(val interface{}) (int64, error) { + switch val := val.(type) { + case int64: + return val, nil + case string: + return strconv.ParseInt(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int64", val) + return 0, err + } +} + +func (cmd *Cmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toUint64(cmd.val) +} + +func toUint64(val interface{}) (uint64, error) { + switch val := val.(type) { + case int64: + return uint64(val), nil + case string: + return strconv.ParseUint(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) + return 0, err + } +} + +func (cmd *Cmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toFloat32(cmd.val) +} + +func toFloat32(val interface{}) (float32, error) { + switch val := val.(type) { + case int64: + return float32(val), nil + case string: + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil + default: + err := fmt.Errorf("redis: unexpected type=%T for Float32", val) + return 0, err + } +} + +func (cmd *Cmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toFloat64(cmd.val) +} + +func toFloat64(val interface{}) (float64, error) { + switch val := val.(type) { + case int64: + return float64(val), nil + case string: + return strconv.ParseFloat(val, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Float64", val) + return 0, err + } +} + +func (cmd *Cmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + return toBool(cmd.val) +} + +func toBool(val interface{}) (bool, error) { + switch val := val.(type) { + case bool: + return val, nil + case int64: + return val != 0, nil + case string: + return strconv.ParseBool(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Bool", val) + return false, err + } +} + +func (cmd *Cmd) Slice() ([]interface{}, error) { + if cmd.err != nil { + return nil, cmd.err + } + switch val := cmd.val.(type) { + case []interface{}: + return val, nil + default: + return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val) + } +} + +func (cmd *Cmd) StringSlice() ([]string, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + ss := make([]string, len(slice)) + for i, iface := range slice { + val, err := toString(iface) + if err != nil { + return nil, err + } + ss[i] = val + } + return ss, nil +} + +func (cmd *Cmd) Int64Slice() ([]int64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + nums := make([]int64, len(slice)) + for i, iface := range slice { + val, err := toInt64(iface) + if err != nil { + return nil, err + } + nums[i] = val + } + return nums, nil +} + +func (cmd *Cmd) Uint64Slice() ([]uint64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + nums := make([]uint64, len(slice)) + for i, iface := range slice { + val, err := toUint64(iface) + if err != nil { + return nil, err + } + nums[i] = val + } + return nums, nil +} + +func (cmd *Cmd) Float32Slice() ([]float32, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + floats := make([]float32, len(slice)) + for i, iface := range slice { + val, err := toFloat32(iface) + if err != nil { + return nil, err + } + floats[i] = val + } + return floats, nil +} + +func (cmd *Cmd) Float64Slice() ([]float64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + floats := make([]float64, len(slice)) + for i, iface := range slice { + val, err := toFloat64(iface) + if err != nil { + return nil, err + } + floats[i] = val + } + return floats, nil +} + +func (cmd *Cmd) BoolSlice() ([]bool, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + bools := make([]bool, len(slice)) + for i, iface := range slice { + val, err := toBool(iface) + if err != nil { + return nil, err + } + bools[i] = val + } + return bools, nil +} + +func (cmd *Cmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadReply() + return err +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SliceCmd) SetVal(val []interface{}) { + cmd.val = val +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Scan scans the results from the map into a destination struct. The map keys +// are matched in the Redis struct fields by the `redis:"field"` tag. +func (cmd *SliceCmd) Scan(dst interface{}) error { + if cmd.err != nil { + return cmd.err + } + + // Pass the list of keys and values. + // Skip the first two args for: HMGET key + var args []interface{} + if cmd.args[0] == "hmget" { + args = cmd.args[2:] + } else { + // Otherwise, it's: MGET field field ... + args = cmd.args[1:] + } + + return hscan.Scan(dst, args, cmd.val) +} + +func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadSlice() + return err +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StatusCmd) SetVal(val string) { + cmd.val = val +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadString() + return err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntCmd) SetVal(val int64) { + cmd.val = val +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) Uint64() (uint64, error) { + return uint64(cmd.val), cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadInt() + return err +} + +//------------------------------------------------------------------------------ + +type IntSliceCmd struct { + baseCmd + + val []int64 +} + +var _ Cmder = (*IntSliceCmd)(nil) + +func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { + return &IntSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntSliceCmd) SetVal(val []int64) { + cmd.val = val +} + +func (cmd *IntSliceCmd) Val() []int64 { + return cmd.val +} + +func (cmd *IntSliceCmd) Result() ([]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]int64, n) + for i := 0; i < len(cmd.val); i++ { + if cmd.val[i], err = rd.ReadInt(); err != nil { + return err + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + precision: precision, + } +} + +func (cmd *DurationCmd) SetVal(val time.Duration) { + cmd.val = val +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadInt() + if err != nil { + return err + } + switch n { + // -2 if the key does not exist + // -1 if the key exists but has no associated expire + case -2, -1: + cmd.val = time.Duration(n) + default: + cmd.val = time.Duration(n) * cmd.precision + } + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *TimeCmd) SetVal(val time.Time) { + cmd.val = val +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(rd *proto.Reader) error { + if err := rd.ReadFixedArrayLen(2); err != nil { + return err + } + second, err := rd.ReadInt() + if err != nil { + return err + } + microsecond, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val = time.Unix(second, microsecond*1000) + return nil +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolCmd) SetVal(val bool) { + cmd.val = val +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadBool() + + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + if err == Nil { + cmd.val = false + err = nil + } + return err +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringCmd) SetVal(val string) { + cmd.val = val +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return util.StringToBytes(cmd.val), cmd.err +} + +func (cmd *StringCmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + return strconv.ParseBool(cmd.val) +} + +func (cmd *StringCmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.Atoi(cmd.Val()) +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + f, err := strconv.ParseFloat(cmd.Val(), 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Time() (time.Time, error) { + if cmd.err != nil { + return time.Time{}, cmd.err + } + return time.Parse(time.RFC3339Nano, cmd.Val()) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan([]byte(cmd.val), val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadString() + return err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatCmd) SetVal(val float64) { + cmd.val = val +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadFloat() + return err +} + +//------------------------------------------------------------------------------ + +type FloatSliceCmd struct { + baseCmd + + val []float64 +} + +var _ Cmder = (*FloatSliceCmd)(nil) + +func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd { + return &FloatSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatSliceCmd) SetVal(val []float64) { + cmd.val = val +} + +func (cmd *FloatSliceCmd) Val() []float64 { + return cmd.val +} + +func (cmd *FloatSliceCmd) Result() ([]float64, error) { + return cmd.val, cmd.err +} + +func (cmd *FloatSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]float64, n) + for i := 0; i < len(cmd.val); i++ { + switch num, err := rd.ReadFloat(); { + case err == Nil: + cmd.val[i] = 0 + case err != nil: + return err + default: + cmd.val[i] = num + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringSliceCmd) SetVal(val []string) { + cmd.val = val +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]string, n) + for i := 0; i < len(cmd.val); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.val[i] = "" + case err != nil: + return err + default: + cmd.val[i] = s + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type KeyValue struct { + Key string + Value string +} + +type KeyValueSliceCmd struct { + baseCmd + + val []KeyValue +} + +var _ Cmder = (*KeyValueSliceCmd)(nil) + +func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd { + return &KeyValueSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) { + cmd.val = val +} + +func (cmd *KeyValueSliceCmd) Val() []KeyValue { + return cmd.val +} + +func (cmd *KeyValueSliceCmd) Result() ([]KeyValue, error) { + return cmd.val, cmd.err +} + +func (cmd *KeyValueSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Many commands will respond to two formats: +// 1. 1) "one" +// 2. (double) 1 +// 2. 1) "two" +// 2. (double) 2 +// +// OR: +// 1. "two" +// 2. (double) 2 +// 3. "one" +// 4. (double) 1 +func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + // If the n is 0, can't continue reading. + if n == 0 { + cmd.val = make([]KeyValue, 0) + return nil + } + + typ, err := rd.PeekReplyType() + if err != nil { + return err + } + array := typ == proto.RespArray + + if array { + cmd.val = make([]KeyValue, n) + } else { + cmd.val = make([]KeyValue, n/2) + } + + for i := 0; i < len(cmd.val); i++ { + if array { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + } + + if cmd.val[i].Key, err = rd.ReadString(); err != nil { + return err + } + + if cmd.val[i].Value, err = rd.ReadString(); err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolSliceCmd) SetVal(val []bool) { + cmd.val = val +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]bool, n) + for i := 0; i < len(cmd.val); i++ { + if cmd.val[i], err = rd.ReadBool(); err != nil { + return err + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type MapStringStringCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*MapStringStringCmd)(nil) + +func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd { + return &MapStringStringCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapStringStringCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *MapStringStringCmd) SetVal(val map[string]string) { + cmd.val = val +} + +func (cmd *MapStringStringCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *MapStringStringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Scan scans the results from the map into a destination struct. The map keys +// are matched in the Redis struct fields by the `redis:"field"` tag. +func (cmd *MapStringStringCmd) Scan(dest interface{}) error { + if cmd.err != nil { + return cmd.err + } + + strct, err := hscan.Struct(dest) + if err != nil { + return err + } + + for k, v := range cmd.val { + if err := strct.Scan(k, v); err != nil { + return err + } + } + + return nil +} + +func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = make(map[string]string, n) + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + value, err := rd.ReadString() + if err != nil { + return err + } + + cmd.val[key] = value + } + return nil +} + +//------------------------------------------------------------------------------ + +type MapStringIntCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*MapStringIntCmd)(nil) + +func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd { + return &MapStringIntCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapStringIntCmd) SetVal(val map[string]int64) { + cmd.val = val +} + +func (cmd *MapStringIntCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *MapStringIntCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *MapStringIntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = make(map[string]int64, n) + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + nn, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[key] = nn + } + return nil +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) { + cmd.val = val +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make(map[string]struct{}, n) + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + cmd.val[key] = struct{}{} + } + return nil +} + +//------------------------------------------------------------------------------ + +type XMessage struct { + ID string + Values map[string]interface{} +} + +type XMessageSliceCmd struct { + baseCmd + + val []XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XMessageSliceCmd) SetVal(val []XMessage) { + cmd.val = val +} + +func (cmd *XMessageSliceCmd) Val() []XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = readXMessageSlice(rd) + return err +} + +func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + msgs := make([]XMessage, n) + for i := 0; i < len(msgs); i++ { + if msgs[i], err = readXMessage(rd); err != nil { + return nil, err + } + } + return msgs, nil +} + +func readXMessage(rd *proto.Reader) (XMessage, error) { + if err := rd.ReadFixedArrayLen(2); err != nil { + return XMessage{}, err + } + + id, err := rd.ReadString() + if err != nil { + return XMessage{}, err + } + + v, err := stringInterfaceMapParser(rd) + if err != nil { + if err != proto.Nil { + return XMessage{}, err + } + } + + return XMessage{ + ID: id, + Values: v, + }, nil +} + +func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) { + n, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + m := make(map[string]interface{}, n) + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []XMessage +} + +type XStreamSliceCmd struct { + baseCmd + + val []XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XStreamSliceCmd) SetVal(val []XStream) { + cmd.val = val +} + +func (cmd *XStreamSliceCmd) Val() []XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { + typ, err := rd.PeekReplyType() + if err != nil { + return err + } + + var n int + if typ == proto.RespMap { + n, err = rd.ReadMapLen() + } else { + n, err = rd.ReadArrayLen() + } + if err != nil { + return err + } + cmd.val = make([]XStream, n) + for i := 0; i < len(cmd.val); i++ { + if typ != proto.RespMap { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + } + if cmd.val[i].Stream, err = rd.ReadString(); err != nil { + return err + } + if cmd.val[i].Messages, err = readXMessageSlice(rd); err != nil { + return err + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type XPending struct { + Count int64 + Lower string + Higher string + Consumers map[string]int64 +} + +type XPendingCmd struct { + baseCmd + val *XPending +} + +var _ Cmder = (*XPendingCmd)(nil) + +func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { + return &XPendingCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingCmd) SetVal(val *XPending) { + cmd.val = val +} + +func (cmd *XPendingCmd) Val() *XPending { + return cmd.val +} + +func (cmd *XPendingCmd) Result() (*XPending, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { + var err error + if err = rd.ReadFixedArrayLen(4); err != nil { + return err + } + cmd.val = &XPending{} + + if cmd.val.Count, err = rd.ReadInt(); err != nil { + return err + } + + if cmd.val.Lower, err = rd.ReadString(); err != nil && err != Nil { + return err + } + + if cmd.val.Higher, err = rd.ReadString(); err != nil && err != Nil { + return err + } + + n, err := rd.ReadArrayLen() + if err != nil && err != Nil { + return err + } + cmd.val.Consumers = make(map[string]int64, n) + for i := 0; i < n; i++ { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + consumerName, err := rd.ReadString() + if err != nil { + return err + } + consumerPending, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val.Consumers[consumerName] = consumerPending + } + return nil +} + +//------------------------------------------------------------------------------ + +type XPendingExt struct { + ID string + Consumer string + Idle time.Duration + RetryCount int64 +} + +type XPendingExtCmd struct { + baseCmd + val []XPendingExt +} + +var _ Cmder = (*XPendingExtCmd)(nil) + +func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { + return &XPendingExtCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) { + cmd.val = val +} + +func (cmd *XPendingExtCmd) Val() []XPendingExt { + return cmd.val +} + +func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingExtCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]XPendingExt, n) + + for i := 0; i < len(cmd.val); i++ { + if err = rd.ReadFixedArrayLen(4); err != nil { + return err + } + + if cmd.val[i].ID, err = rd.ReadString(); err != nil { + return err + } + + if cmd.val[i].Consumer, err = rd.ReadString(); err != nil && err != Nil { + return err + } + + idle, err := rd.ReadInt() + if err != nil && err != Nil { + return err + } + cmd.val[i].Idle = time.Duration(idle) * time.Millisecond + + if cmd.val[i].RetryCount, err = rd.ReadInt(); err != nil && err != Nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type XAutoClaimCmd struct { + baseCmd + + start string + val []XMessage +} + +var _ Cmder = (*XAutoClaimCmd)(nil) + +func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd { + return &XAutoClaimCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) { + cmd.val = val + cmd.start = start +} + +func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) { + return cmd.val, cmd.start +} + +func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) { + return cmd.val, cmd.start, cmd.err +} + +func (cmd *XAutoClaimCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + switch n { + case 2, // Redis 6 + 3: // Redis 7: + // ok + default: + return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n) + } + + cmd.start, err = rd.ReadString() + if err != nil { + return err + } + + cmd.val, err = readXMessageSlice(rd) + if err != nil { + return err + } + + if n >= 3 { + if err := rd.DiscardNext(); err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type XAutoClaimJustIDCmd struct { + baseCmd + + start string + val []string +} + +var _ Cmder = (*XAutoClaimJustIDCmd)(nil) + +func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd { + return &XAutoClaimJustIDCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) { + cmd.val = val + cmd.start = start +} + +func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) { + return cmd.val, cmd.start +} + +func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) { + return cmd.val, cmd.start, cmd.err +} + +func (cmd *XAutoClaimJustIDCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + switch n { + case 2, // Redis 6 + 3: // Redis 7: + // ok + default: + return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n) + } + + cmd.start, err = rd.ReadString() + if err != nil { + return err + } + + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]string, nn) + for i := 0; i < nn; i++ { + cmd.val[i], err = rd.ReadString() + if err != nil { + return err + } + } + + if n >= 3 { + if err := rd.DiscardNext(); err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type XInfoConsumersCmd struct { + baseCmd + val []XInfoConsumer +} + +type XInfoConsumer struct { + Name string + Pending int64 + Idle time.Duration + Inactive time.Duration +} + +var _ Cmder = (*XInfoConsumersCmd)(nil) + +func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd { + return &XInfoConsumersCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "consumers", stream, group}, + }, + } +} + +func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) { + cmd.val = val +} + +func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer { + return cmd.val +} + +func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoConsumersCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]XInfoConsumer, n) + + for i := 0; i < len(cmd.val); i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + for f := 0; f < nn; f++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "name": + cmd.val[i].Name, err = rd.ReadString() + case "pending": + cmd.val[i].Pending, err = rd.ReadInt() + case "idle": + var idle int64 + idle, err = rd.ReadInt() + cmd.val[i].Idle = time.Duration(idle) * time.Millisecond + case "inactive": + var inactive int64 + inactive, err = rd.ReadInt() + cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond + default: + return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key) + } + if err != nil { + return err + } + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type XInfoGroupsCmd struct { + baseCmd + val []XInfoGroup +} + +type XInfoGroup struct { + Name string + Consumers int64 + Pending int64 + LastDeliveredID string + EntriesRead int64 + Lag int64 +} + +var _ Cmder = (*XInfoGroupsCmd)(nil) + +func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { + return &XInfoGroupsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "groups", stream}, + }, + } +} + +func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) { + cmd.val = val +} + +func (cmd *XInfoGroupsCmd) Val() []XInfoGroup { + return cmd.val +} + +func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoGroupsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]XInfoGroup, n) + + for i := 0; i < len(cmd.val); i++ { + group := &cmd.val[i] + + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + for j := 0; j < nn; j++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "name": + group.Name, err = rd.ReadString() + if err != nil { + return err + } + case "consumers": + group.Consumers, err = rd.ReadInt() + if err != nil { + return err + } + case "pending": + group.Pending, err = rd.ReadInt() + if err != nil { + return err + } + case "last-delivered-id": + group.LastDeliveredID, err = rd.ReadString() + if err != nil { + return err + } + case "entries-read": + group.EntriesRead, err = rd.ReadInt() + if err != nil && err != Nil { + return err + } + case "lag": + group.Lag, err = rd.ReadInt() + + // lag: the number of entries in the stream that are still waiting to be delivered + // to the group's consumers, or a NULL(Nil) when that number can't be determined. + if err != nil && err != Nil { + return err + } + default: + return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key) + } + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type XInfoStreamCmd struct { + baseCmd + val *XInfoStream +} + +type XInfoStream struct { + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + Groups int64 + LastGeneratedID string + MaxDeletedEntryID string + EntriesAdded int64 + FirstEntry XMessage + LastEntry XMessage + RecordedFirstEntryID string +} + +var _ Cmder = (*XInfoStreamCmd)(nil) + +func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd { + return &XInfoStreamCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "stream", stream}, + }, + } +} + +func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) { + cmd.val = val +} + +func (cmd *XInfoStreamCmd) Val() *XInfoStream { + return cmd.val +} + +func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoStreamCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + cmd.val = &XInfoStream{} + + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + switch key { + case "length": + cmd.val.Length, err = rd.ReadInt() + if err != nil { + return err + } + case "radix-tree-keys": + cmd.val.RadixTreeKeys, err = rd.ReadInt() + if err != nil { + return err + } + case "radix-tree-nodes": + cmd.val.RadixTreeNodes, err = rd.ReadInt() + if err != nil { + return err + } + case "groups": + cmd.val.Groups, err = rd.ReadInt() + if err != nil { + return err + } + case "last-generated-id": + cmd.val.LastGeneratedID, err = rd.ReadString() + if err != nil { + return err + } + case "max-deleted-entry-id": + cmd.val.MaxDeletedEntryID, err = rd.ReadString() + if err != nil { + return err + } + case "entries-added": + cmd.val.EntriesAdded, err = rd.ReadInt() + if err != nil { + return err + } + case "first-entry": + cmd.val.FirstEntry, err = readXMessage(rd) + if err != nil && err != Nil { + return err + } + case "last-entry": + cmd.val.LastEntry, err = readXMessage(rd) + if err != nil && err != Nil { + return err + } + case "recorded-first-entry-id": + cmd.val.RecordedFirstEntryID, err = rd.ReadString() + if err != nil { + return err + } + default: + return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key) + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type XInfoStreamFullCmd struct { + baseCmd + val *XInfoStreamFull +} + +type XInfoStreamFull struct { + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + LastGeneratedID string + MaxDeletedEntryID string + EntriesAdded int64 + Entries []XMessage + Groups []XInfoStreamGroup + RecordedFirstEntryID string +} + +type XInfoStreamGroup struct { + Name string + LastDeliveredID string + EntriesRead int64 + Lag int64 + PelCount int64 + Pending []XInfoStreamGroupPending + Consumers []XInfoStreamConsumer +} + +type XInfoStreamGroupPending struct { + ID string + Consumer string + DeliveryTime time.Time + DeliveryCount int64 +} + +type XInfoStreamConsumer struct { + Name string + SeenTime time.Time + ActiveTime time.Time + PelCount int64 + Pending []XInfoStreamConsumerPending +} + +type XInfoStreamConsumerPending struct { + ID string + DeliveryTime time.Time + DeliveryCount int64 +} + +var _ Cmder = (*XInfoStreamFullCmd)(nil) + +func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd { + return &XInfoStreamFullCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) { + cmd.val = val +} + +func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull { + return cmd.val +} + +func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoStreamFullCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = &XInfoStreamFull{} + + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "length": + cmd.val.Length, err = rd.ReadInt() + if err != nil { + return err + } + case "radix-tree-keys": + cmd.val.RadixTreeKeys, err = rd.ReadInt() + if err != nil { + return err + } + case "radix-tree-nodes": + cmd.val.RadixTreeNodes, err = rd.ReadInt() + if err != nil { + return err + } + case "last-generated-id": + cmd.val.LastGeneratedID, err = rd.ReadString() + if err != nil { + return err + } + case "entries-added": + cmd.val.EntriesAdded, err = rd.ReadInt() + if err != nil { + return err + } + case "entries": + cmd.val.Entries, err = readXMessageSlice(rd) + if err != nil { + return err + } + case "groups": + cmd.val.Groups, err = readStreamGroups(rd) + if err != nil { + return err + } + case "max-deleted-entry-id": + cmd.val.MaxDeletedEntryID, err = rd.ReadString() + if err != nil { + return err + } + case "recorded-first-entry-id": + cmd.val.RecordedFirstEntryID, err = rd.ReadString() + if err != nil { + return err + } + default: + return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key) + } + } + return nil +} + +func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + groups := make([]XInfoStreamGroup, 0, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + group := XInfoStreamGroup{} + + for j := 0; j < nn; j++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch key { + case "name": + group.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + case "last-delivered-id": + group.LastDeliveredID, err = rd.ReadString() + if err != nil { + return nil, err + } + case "entries-read": + group.EntriesRead, err = rd.ReadInt() + if err != nil && err != Nil { + return nil, err + } + case "lag": + // lag: the number of entries in the stream that are still waiting to be delivered + // to the group's consumers, or a NULL(Nil) when that number can't be determined. + group.Lag, err = rd.ReadInt() + if err != nil && err != Nil { + return nil, err + } + case "pel-count": + group.PelCount, err = rd.ReadInt() + if err != nil { + return nil, err + } + case "pending": + group.Pending, err = readXInfoStreamGroupPending(rd) + if err != nil { + return nil, err + } + case "consumers": + group.Consumers, err = readXInfoStreamConsumers(rd) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key) + } + } + + groups = append(groups, group) + } + + return groups, nil +} + +func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + pending := make([]XInfoStreamGroupPending, 0, n) + + for i := 0; i < n; i++ { + if err = rd.ReadFixedArrayLen(4); err != nil { + return nil, err + } + + p := XInfoStreamGroupPending{} + + p.ID, err = rd.ReadString() + if err != nil { + return nil, err + } + + p.Consumer, err = rd.ReadString() + if err != nil { + return nil, err + } + + delivery, err := rd.ReadInt() + if err != nil { + return nil, err + } + p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) + + p.DeliveryCount, err = rd.ReadInt() + if err != nil { + return nil, err + } + + pending = append(pending, p) + } + + return pending, nil +} + +func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + consumers := make([]XInfoStreamConsumer, 0, n) + + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + c := XInfoStreamConsumer{} + + for f := 0; f < nn; f++ { + cKey, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch cKey { + case "name": + c.Name, err = rd.ReadString() + case "seen-time": + seen, err := rd.ReadInt() + if err != nil { + return nil, err + } + c.SeenTime = time.UnixMilli(seen) + case "active-time": + active, err := rd.ReadInt() + if err != nil { + return nil, err + } + c.ActiveTime = time.UnixMilli(active) + case "pel-count": + c.PelCount, err = rd.ReadInt() + case "pending": + pendingNumber, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber) + + for pn := 0; pn < pendingNumber; pn++ { + if err = rd.ReadFixedArrayLen(3); err != nil { + return nil, err + } + + p := XInfoStreamConsumerPending{} + + p.ID, err = rd.ReadString() + if err != nil { + return nil, err + } + + delivery, err := rd.ReadInt() + if err != nil { + return nil, err + } + p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) + + p.DeliveryCount, err = rd.ReadInt() + if err != nil { + return nil, err + } + + c.Pending = append(c.Pending, p) + } + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM FULL reply", cKey) + } + if err != nil { + return nil, err + } + } + consumers = append(consumers, c) + } + + return consumers, nil +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZSliceCmd) SetVal(val []Z) { + cmd.val = val +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + // If the n is 0, can't continue reading. + if n == 0 { + cmd.val = make([]Z, 0) + return nil + } + + typ, err := rd.PeekReplyType() + if err != nil { + return err + } + array := typ == proto.RespArray + + if array { + cmd.val = make([]Z, n) + } else { + cmd.val = make([]Z, n/2) + } + + for i := 0; i < len(cmd.val); i++ { + if array { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + } + + if cmd.val[i].Member, err = rd.ReadString(); err != nil { + return err + } + + if cmd.val[i].Score, err = rd.ReadFloat(); err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type ZWithKeyCmd struct { + baseCmd + + val *ZWithKey +} + +var _ Cmder = (*ZWithKeyCmd)(nil) + +func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { + return &ZWithKeyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) { + cmd.val = val +} + +func (cmd *ZWithKeyCmd) Val() *ZWithKey { + return cmd.val +} + +func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ZWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) { + if err = rd.ReadFixedArrayLen(3); err != nil { + return err + } + cmd.val = &ZWithKey{} + + if cmd.val.Key, err = rd.ReadString(); err != nil { + return err + } + if cmd.val.Member, err = rd.ReadString(); err != nil { + return err + } + if cmd.val.Score, err = rd.ReadFloat(); err != nil { + return err + } + + return nil +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process cmdable +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + process: process, + } +} + +func (cmd *ScanCmd) SetVal(page []string, cursor uint64) { + cmd.page = page + cmd.cursor = cursor +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(rd *proto.Reader) error { + if err := rd.ReadFixedArrayLen(2); err != nil { + return err + } + + cursor, err := rd.ReadUint() + if err != nil { + return err + } + cmd.cursor = cursor + + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.page = make([]string, n) + + for i := 0; i < len(cmd.page); i++ { + if cmd.page[i], err = rd.ReadString(); err != nil { + return err + } + } + return nil +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + ID string + Addr string + NetworkingMetadata map[string]string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) { + cmd.val = val +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]ClusterSlot, n) + + for i := 0; i < len(cmd.val); i++ { + n, err = rd.ReadArrayLen() + if err != nil { + return err + } + if n < 2 { + return fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) + } + + start, err := rd.ReadInt() + if err != nil { + return err + } + + end, err := rd.ReadInt() + if err != nil { + return err + } + + // subtract start and end. + nodes := make([]ClusterNode, n-2) + + for j := 0; j < len(nodes); j++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + if nn < 2 || nn > 4 { + return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n) + } + + ip, err := rd.ReadString() + if err != nil { + return err + } + + port, err := rd.ReadString() + if err != nil { + return err + } + + nodes[j].Addr = net.JoinHostPort(ip, port) + + if nn >= 3 { + id, err := rd.ReadString() + if err != nil { + return err + } + nodes[j].ID = id + } + + if nn >= 4 { + metadataLength, err := rd.ReadMapLen() + if err != nil { + return err + } + + networkingMetadata := make(map[string]string, metadataLength) + + for i := 0; i < metadataLength; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + value, err := rd.ReadString() + if err != nil { + return err + } + networkingMetadata[key] = value + } + + nodes[j].NetworkingMetadata = networkingMetadata + } + } + + cmd.val[i] = ClusterSlot{ + Start: int(start), + End: int(end), + Nodes: nodes, + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string + + // WithCoord+WithDist+WithGeoHash + withLen int +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + return &GeoLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: geoLocationArgs(q, args...), + }, + q: q, + } +} + +func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + q.withLen++ + } + if q.WithDist { + args = append(args, "withdist") + q.withLen++ + } + if q.WithGeoHash { + args = append(args, "withhash") + q.withLen++ + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return args +} + +func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) { + cmd.locations = locations +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.locations = make([]GeoLocation, n) + + for i := 0; i < len(cmd.locations); i++ { + // only name + if cmd.q.withLen == 0 { + if cmd.locations[i].Name, err = rd.ReadString(); err != nil { + return err + } + continue + } + + // +name + if err = rd.ReadFixedArrayLen(cmd.q.withLen + 1); err != nil { + return err + } + + if cmd.locations[i].Name, err = rd.ReadString(); err != nil { + return err + } + if cmd.q.WithDist { + if cmd.locations[i].Dist, err = rd.ReadFloat(); err != nil { + return err + } + } + if cmd.q.WithGeoHash { + if cmd.locations[i].GeoHash, err = rd.ReadInt(); err != nil { + return err + } + } + if cmd.q.WithCoord { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + if cmd.locations[i].Longitude, err = rd.ReadFloat(); err != nil { + return err + } + if cmd.locations[i].Latitude, err = rd.ReadFloat(); err != nil { + return err + } + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query. +type GeoSearchQuery struct { + Member string + + // Latitude and Longitude when using FromLonLat option. + Longitude float64 + Latitude float64 + + // Distance and unit when using ByRadius option. + // Can use m, km, ft, or mi. Default is km. + Radius float64 + RadiusUnit string + + // Height, width and unit when using ByBox option. + // Can be m, km, ft, or mi. Default is km. + BoxWidth float64 + BoxHeight float64 + BoxUnit string + + // Can be ASC or DESC. Default is no sort order. + Sort string + Count int + CountAny bool +} + +type GeoSearchLocationQuery struct { + GeoSearchQuery + + WithCoord bool + WithDist bool + WithHash bool +} + +type GeoSearchStoreQuery struct { + GeoSearchQuery + + // When using the StoreDist option, the command stores the items in a + // sorted set populated with their distance from the center of the circle or box, + // as a floating-point number, in the same unit specified for that shape. + StoreDist bool +} + +func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} { + args = geoSearchArgs(&q.GeoSearchQuery, args) + + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithHash { + args = append(args, "withhash") + } + + return args +} + +func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} { + if q.Member != "" { + args = append(args, "frommember", q.Member) + } else { + args = append(args, "fromlonlat", q.Longitude, q.Latitude) + } + + if q.Radius > 0 { + if q.RadiusUnit == "" { + q.RadiusUnit = "km" + } + args = append(args, "byradius", q.Radius, q.RadiusUnit) + } else { + if q.BoxUnit == "" { + q.BoxUnit = "km" + } + args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit) + } + + if q.Sort != "" { + args = append(args, q.Sort) + } + + if q.Count > 0 { + args = append(args, "count", q.Count) + if q.CountAny { + args = append(args, "any") + } + } + + return args +} + +type GeoSearchLocationCmd struct { + baseCmd + + opt *GeoSearchLocationQuery + val []GeoLocation +} + +var _ Cmder = (*GeoSearchLocationCmd)(nil) + +func NewGeoSearchLocationCmd( + ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{}, +) *GeoSearchLocationCmd { + return &GeoSearchLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + opt: opt, + } +} + +func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) { + cmd.val = val +} + +func (cmd *GeoSearchLocationCmd) Val() []GeoLocation { + return cmd.val +} + +func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) { + return cmd.val, cmd.err +} + +func (cmd *GeoSearchLocationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]GeoLocation, n) + for i := 0; i < n; i++ { + _, err = rd.ReadArrayLen() + if err != nil { + return err + } + + var loc GeoLocation + + loc.Name, err = rd.ReadString() + if err != nil { + return err + } + if cmd.opt.WithDist { + loc.Dist, err = rd.ReadFloat() + if err != nil { + return err + } + } + if cmd.opt.WithHash { + loc.GeoHash, err = rd.ReadInt() + if err != nil { + return err + } + } + if cmd.opt.WithCoord { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + loc.Longitude, err = rd.ReadFloat() + if err != nil { + return err + } + loc.Latitude, err = rd.ReadFloat() + if err != nil { + return err + } + } + + cmd.val[i] = loc + } + + return nil +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + val []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *GeoPosCmd) SetVal(val []*GeoPos) { + cmd.val = val +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.val +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]*GeoPos, n) + + for i := 0; i < len(cmd.val); i++ { + err = rd.ReadFixedArrayLen(2) + if err != nil { + if err == Nil { + cmd.val[i] = nil + continue + } + return err + } + + longitude, err := rd.ReadFloat() + if err != nil { + return err + } + latitude, err := rd.ReadFloat() + if err != nil { + return err + } + + cmd.val[i] = &GeoPos{ + Longitude: longitude, + Latitude: latitude, + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + ACLFlags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) { + cmd.val = val +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { + const numArgRedis5 = 6 + const numArgRedis6 = 7 + const numArgRedis7 = 10 + + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make(map[string]*CommandInfo, n) + + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + + switch nn { + case numArgRedis5, numArgRedis6, numArgRedis7: + // ok + default: + return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn) + } + + cmdInfo := &CommandInfo{} + if cmdInfo.Name, err = rd.ReadString(); err != nil { + return err + } + + arity, err := rd.ReadInt() + if err != nil { + return err + } + cmdInfo.Arity = int8(arity) + + flagLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmdInfo.Flags = make([]string, flagLen) + for f := 0; f < len(cmdInfo.Flags); f++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmdInfo.Flags[f] = "" + case err != nil: + return err + default: + if !cmdInfo.ReadOnly && s == "readonly" { + cmdInfo.ReadOnly = true + } + cmdInfo.Flags[f] = s + } + } + + firstKeyPos, err := rd.ReadInt() + if err != nil { + return err + } + cmdInfo.FirstKeyPos = int8(firstKeyPos) + + lastKeyPos, err := rd.ReadInt() + if err != nil { + return err + } + cmdInfo.LastKeyPos = int8(lastKeyPos) + + stepCount, err := rd.ReadInt() + if err != nil { + return err + } + cmdInfo.StepCount = int8(stepCount) + + if nn >= numArgRedis6 { + aclFlagLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmdInfo.ACLFlags = make([]string, aclFlagLen) + for f := 0; f < len(cmdInfo.ACLFlags); f++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmdInfo.ACLFlags[f] = "" + case err != nil: + return err + default: + cmdInfo.ACLFlags[f] = s + } + } + } + + if nn >= numArgRedis7 { + if err := rd.DiscardNext(); err != nil { + return err + } + if err := rd.DiscardNext(); err != nil { + return err + } + if err := rd.DiscardNext(); err != nil { + return err + } + } + + cmd.val[cmdInfo.Name] = cmdInfo + } + + return nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func(ctx context.Context) (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn(ctx) + if err != nil { + return err + } + + // Extensions have cmd names in upper case. Convert them to lower case. + for k, v := range cmds { + lower := internal.ToLower(k) + if lower != k { + cmds[lower] = v + } + } + + c.cmds = cmds + return nil + }) + return c.cmds, err +} + +//------------------------------------------------------------------------------ + +type SlowLog struct { + ID int64 + Time time.Time + Duration time.Duration + Args []string + // These are also optional fields emitted only by Redis 4.0 or greater: + // https://redis.io/commands/slowlog#output-format + ClientAddr string + ClientName string +} + +type SlowLogCmd struct { + baseCmd + + val []SlowLog +} + +var _ Cmder = (*SlowLogCmd)(nil) + +func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd { + return &SlowLogCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SlowLogCmd) SetVal(val []SlowLog) { + cmd.val = val +} + +func (cmd *SlowLogCmd) Val() []SlowLog { + return cmd.val +} + +func (cmd *SlowLogCmd) Result() ([]SlowLog, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *SlowLogCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]SlowLog, n) + + for i := 0; i < len(cmd.val); i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + if nn < 4 { + return fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", nn) + } + + if cmd.val[i].ID, err = rd.ReadInt(); err != nil { + return err + } + + createdAt, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[i].Time = time.Unix(createdAt, 0) + + costs, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[i].Duration = time.Duration(costs) * time.Microsecond + + cmdLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + if cmdLen < 1 { + return fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen) + } + + cmd.val[i].Args = make([]string, cmdLen) + for f := 0; f < len(cmd.val[i].Args); f++ { + cmd.val[i].Args[f], err = rd.ReadString() + if err != nil { + return err + } + } + + if nn >= 5 { + if cmd.val[i].ClientAddr, err = rd.ReadString(); err != nil { + return err + } + } + + if nn >= 6 { + if cmd.val[i].ClientName, err = rd.ReadString(); err != nil { + return err + } + } + } + + return nil +} + +//----------------------------------------------------------------------- + +type MapStringInterfaceCmd struct { + baseCmd + + val map[string]interface{} +} + +var _ Cmder = (*MapStringInterfaceCmd)(nil) + +func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd { + return &MapStringInterfaceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) { + cmd.val = val +} + +func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} { + return cmd.val +} + +func (cmd *MapStringInterfaceCmd) Result() (map[string]interface{}, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *MapStringInterfaceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = make(map[string]interface{}, n) + for i := 0; i < n; i++ { + k, err := rd.ReadString() + if err != nil { + return err + } + v, err := rd.ReadReply() + if err != nil { + if err == Nil { + cmd.val[k] = Nil + continue + } + if err, ok := err.(proto.RedisError); ok { + cmd.val[k] = err + continue + } + return err + } + cmd.val[k] = v + } + return nil +} + +//----------------------------------------------------------------------- + +type MapStringStringSliceCmd struct { + baseCmd + + val []map[string]string +} + +var _ Cmder = (*MapStringStringSliceCmd)(nil) + +func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd { + return &MapStringStringSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) { + cmd.val = val +} + +func (cmd *MapStringStringSliceCmd) Val() []map[string]string { + return cmd.val +} + +func (cmd *MapStringStringSliceCmd) Result() ([]map[string]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *MapStringStringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]map[string]string, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + cmd.val[i] = make(map[string]string, nn) + for f := 0; f < nn; f++ { + k, err := rd.ReadString() + if err != nil { + return err + } + + v, err := rd.ReadString() + if err != nil { + return err + } + cmd.val[i][k] = v + } + } + return nil +} + +//----------------------------------------------------------------------- + +type MapStringInterfaceSliceCmd struct { + baseCmd + + val []map[string]interface{} +} + +var _ Cmder = (*MapStringInterfaceSliceCmd)(nil) + +func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd { + return &MapStringInterfaceSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapStringInterfaceSliceCmd) SetVal(val []map[string]interface{}) { + cmd.val = val +} + +func (cmd *MapStringInterfaceSliceCmd) Val() []map[string]interface{} { + return cmd.val +} + +func (cmd *MapStringInterfaceSliceCmd) Result() ([]map[string]interface{}, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *MapStringInterfaceSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]map[string]interface{}, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + cmd.val[i] = make(map[string]interface{}, nn) + for f := 0; f < nn; f++ { + k, err := rd.ReadString() + if err != nil { + return err + } + v, err := rd.ReadReply() + if err != nil { + if err != Nil { + return err + } + } + cmd.val[i][k] = v + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type KeyValuesCmd struct { + baseCmd + + key string + val []string +} + +var _ Cmder = (*KeyValuesCmd)(nil) + +func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd { + return &KeyValuesCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *KeyValuesCmd) SetVal(key string, val []string) { + cmd.key = key + cmd.val = val +} + +func (cmd *KeyValuesCmd) Val() (string, []string) { + return cmd.key, cmd.val +} + +func (cmd *KeyValuesCmd) Result() (string, []string, error) { + return cmd.key, cmd.val, cmd.err +} + +func (cmd *KeyValuesCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + cmd.key, err = rd.ReadString() + if err != nil { + return err + } + + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]string, n) + for i := 0; i < n; i++ { + cmd.val[i], err = rd.ReadString() + if err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type ZSliceWithKeyCmd struct { + baseCmd + + key string + val []Z +} + +var _ Cmder = (*ZSliceWithKeyCmd)(nil) + +func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd { + return &ZSliceWithKeyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) { + cmd.key = key + cmd.val = val +} + +func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) { + return cmd.key, cmd.val +} + +func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) { + return cmd.key, cmd.val, cmd.err +} + +func (cmd *ZSliceWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + cmd.key, err = rd.ReadString() + if err != nil { + return err + } + + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + typ, err := rd.PeekReplyType() + if err != nil { + return err + } + array := typ == proto.RespArray + + if array { + cmd.val = make([]Z, n) + } else { + cmd.val = make([]Z, n/2) + } + + for i := 0; i < len(cmd.val); i++ { + if array { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + } + + if cmd.val[i].Member, err = rd.ReadString(); err != nil { + return err + } + + if cmd.val[i].Score, err = rd.ReadFloat(); err != nil { + return err + } + } + + return nil +} + +type Function struct { + Name string + Description string + Flags []string +} + +type Library struct { + Name string + Engine string + Functions []Function + Code string +} + +type FunctionListCmd struct { + baseCmd + + val []Library +} + +var _ Cmder = (*FunctionListCmd)(nil) + +func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd { + return &FunctionListCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FunctionListCmd) SetVal(val []Library) { + cmd.val = val +} + +func (cmd *FunctionListCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FunctionListCmd) Val() []Library { + return cmd.val +} + +func (cmd *FunctionListCmd) Result() ([]Library, error) { + return cmd.val, cmd.err +} + +func (cmd *FunctionListCmd) First() (*Library, error) { + if cmd.err != nil { + return nil, cmd.err + } + if len(cmd.val) > 0 { + return &cmd.val[0], nil + } + return nil, Nil +} + +func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + libraries := make([]Library, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + + library := Library{} + for f := 0; f < nn; f++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "library_name": + library.Name, err = rd.ReadString() + case "engine": + library.Engine, err = rd.ReadString() + case "functions": + library.Functions, err = cmd.readFunctions(rd) + case "library_code": + library.Code, err = rd.ReadString() + default: + return fmt.Errorf("redis: function list unexpected key %s", key) + } + + if err != nil { + return err + } + } + + libraries[i] = library + } + cmd.val = libraries + return nil +} + +func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + functions := make([]Function, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + function := Function{} + for f := 0; f < nn; f++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch key { + case "name": + if function.Name, err = rd.ReadString(); err != nil { + return nil, err + } + case "description": + if function.Description, err = rd.ReadString(); err != nil && err != Nil { + return nil, err + } + case "flags": + // resp set + nx, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + function.Flags = make([]string, nx) + for j := 0; j < nx; j++ { + if function.Flags[j], err = rd.ReadString(); err != nil { + return nil, err + } + } + default: + return nil, fmt.Errorf("redis: function list unexpected key %s", key) + } + } + + functions[i] = function + } + return functions, nil +} + +// FunctionStats contains information about the scripts currently executing on the server, and the available engines +// - Engines: +// Statistics about the engine like number of functions and number of libraries +// - RunningScript: +// The script currently running on the shard we're connecting to. +// For Redis Enterprise and Redis Cloud, this represents the +// function with the longest running time, across all the running functions, on all shards +// - RunningScripts +// All scripts currently running in a Redis Enterprise clustered database. +// Only available on Redis Enterprise +type FunctionStats struct { + Engines []Engine + isRunning bool + rs RunningScript + allrs []RunningScript +} + +func (fs *FunctionStats) Running() bool { + return fs.isRunning +} + +func (fs *FunctionStats) RunningScript() (RunningScript, bool) { + return fs.rs, fs.isRunning +} + +// AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database. +// Only available on Redis Enterprise +func (fs *FunctionStats) AllRunningScripts() []RunningScript { + return fs.allrs +} + +type RunningScript struct { + Name string + Command []string + Duration time.Duration +} + +type Engine struct { + Language string + LibrariesCount int64 + FunctionsCount int64 +} + +type FunctionStatsCmd struct { + baseCmd + val FunctionStats +} + +var _ Cmder = (*FunctionStatsCmd)(nil) + +func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd { + return &FunctionStatsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) { + cmd.val = val +} + +func (cmd *FunctionStatsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FunctionStatsCmd) Val() FunctionStats { + return cmd.val +} + +func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) { + return cmd.val, cmd.err +} + +func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + var result FunctionStats + for f := 0; f < n; f++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "running_script": + result.rs, result.isRunning, err = cmd.readRunningScript(rd) + case "engines": + result.Engines, err = cmd.readEngines(rd) + case "all_running_scripts": // Redis Enterprise only + result.allrs, result.isRunning, err = cmd.readRunningScripts(rd) + default: + return fmt.Errorf("redis: function stats unexpected key %s", key) + } + + if err != nil { + return err + } + } + + cmd.val = result + return nil +} + +func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) { + err := rd.ReadFixedMapLen(3) + if err != nil { + if err == Nil { + return RunningScript{}, false, nil + } + return RunningScript{}, false, err + } + + var runningScript RunningScript + for i := 0; i < 3; i++ { + key, err := rd.ReadString() + if err != nil { + return RunningScript{}, false, err + } + + switch key { + case "name": + runningScript.Name, err = rd.ReadString() + case "duration_ms": + runningScript.Duration, err = cmd.readDuration(rd) + case "command": + runningScript.Command, err = cmd.readCommand(rd) + default: + return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key) + } + + if err != nil { + return RunningScript{}, false, err + } + } + + return runningScript, true, nil +} + +func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) { + n, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + engines := make([]Engine, 0, n) + for i := 0; i < n; i++ { + engine := Engine{} + engine.Language, err = rd.ReadString() + if err != nil { + return nil, err + } + + err = rd.ReadFixedMapLen(2) + if err != nil { + return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language) + } + + for i := 0; i < 2; i++ { + key, err := rd.ReadString() + switch key { + case "libraries_count": + engine.LibrariesCount, err = rd.ReadInt() + case "functions_count": + engine.FunctionsCount, err = rd.ReadInt() + } + if err != nil { + return nil, err + } + } + + engines = append(engines, engine) + } + return engines, nil +} + +func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) { + t, err := rd.ReadInt() + if err != nil { + return time.Duration(0), err + } + return time.Duration(t) * time.Millisecond, nil +} + +func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) { + + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + command := make([]string, 0, n) + for i := 0; i < n; i++ { + x, err := rd.ReadString() + if err != nil { + return nil, err + } + command = append(command, x) + } + + return command, nil +} +func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, false, err + } + + runningScripts := make([]RunningScript, 0, n) + for i := 0; i < n; i++ { + rs, _, err := cmd.readRunningScript(rd) + if err != nil { + return nil, false, err + } + runningScripts = append(runningScripts, rs) + } + + return runningScripts, len(runningScripts) > 0, nil +} + +//------------------------------------------------------------------------------ + +// LCSQuery is a parameter used for the LCS command +type LCSQuery struct { + Key1 string + Key2 string + Len bool + Idx bool + MinMatchLen int + WithMatchLen bool +} + +// LCSMatch is the result set of the LCS command. +type LCSMatch struct { + MatchString string + Matches []LCSMatchedPosition + Len int64 +} + +type LCSMatchedPosition struct { + Key1 LCSPosition + Key2 LCSPosition + + // only for withMatchLen is true + MatchLen int64 +} + +type LCSPosition struct { + Start int64 + End int64 +} + +type LCSCmd struct { + baseCmd + + // 1: match string + // 2: match len + // 3: match idx LCSMatch + readType uint8 + val *LCSMatch +} + +func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd { + args := make([]interface{}, 3, 7) + args[0] = "lcs" + args[1] = q.Key1 + args[2] = q.Key2 + + cmd := &LCSCmd{readType: 1} + if q.Len { + cmd.readType = 2 + args = append(args, "len") + } else if q.Idx { + cmd.readType = 3 + args = append(args, "idx") + if q.MinMatchLen != 0 { + args = append(args, "minmatchlen", q.MinMatchLen) + } + if q.WithMatchLen { + args = append(args, "withmatchlen") + } + } + cmd.baseCmd = baseCmd{ + ctx: ctx, + args: args, + } + + return cmd +} + +func (cmd *LCSCmd) SetVal(val *LCSMatch) { + cmd.val = val +} + +func (cmd *LCSCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *LCSCmd) Val() *LCSMatch { + return cmd.val +} + +func (cmd *LCSCmd) Result() (*LCSMatch, error) { + return cmd.val, cmd.err +} + +func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) { + lcs := &LCSMatch{} + switch cmd.readType { + case 1: + // match string + if lcs.MatchString, err = rd.ReadString(); err != nil { + return err + } + case 2: + // match len + if lcs.Len, err = rd.ReadInt(); err != nil { + return err + } + case 3: + // read LCSMatch + if err = rd.ReadFixedMapLen(2); err != nil { + return err + } + + // read matches or len field + for i := 0; i < 2; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "matches": + // read array of matched positions + if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil { + return err + } + case "len": + // read match length + if lcs.Len, err = rd.ReadInt(); err != nil { + return err + } + } + } + } + + cmd.val = lcs + return nil +} + +func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + positions := make([]LCSMatchedPosition, n) + for i := 0; i < n; i++ { + pn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + if positions[i].Key1, err = cmd.readPosition(rd); err != nil { + return nil, err + } + if positions[i].Key2, err = cmd.readPosition(rd); err != nil { + return nil, err + } + + // read match length if WithMatchLen is true + if pn > 2 { + if positions[i].MatchLen, err = rd.ReadInt(); err != nil { + return nil, err + } + } + } + + return positions, nil +} + +func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) { + if err = rd.ReadFixedArrayLen(2); err != nil { + return pos, err + } + if pos.Start, err = rd.ReadInt(); err != nil { + return pos, err + } + if pos.End, err = rd.ReadInt(); err != nil { + return pos, err + } + + return pos, nil +} + +// ------------------------------------------------------------------------ + +type KeyFlags struct { + Key string + Flags []string +} + +type KeyFlagsCmd struct { + baseCmd + + val []KeyFlags +} + +var _ Cmder = (*KeyFlagsCmd)(nil) + +func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd { + return &KeyFlagsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) { + cmd.val = val +} + +func (cmd *KeyFlagsCmd) Val() []KeyFlags { + return cmd.val +} + +func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) { + return cmd.val, cmd.err +} + +func (cmd *KeyFlagsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + if n == 0 { + cmd.val = make([]KeyFlags, 0) + return nil + } + + cmd.val = make([]KeyFlags, n) + + for i := 0; i < len(cmd.val); i++ { + + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + if cmd.val[i].Key, err = rd.ReadString(); err != nil { + return err + } + flagsLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val[i].Flags = make([]string, flagsLen) + + for j := 0; j < flagsLen; j++ { + if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil { + return err + } + } + } + + return nil +} + +// --------------------------------------------------------------------------------------------------- + +type ClusterLink struct { + Direction string + Node string + CreateTime int64 + Events string + SendBufferAllocated int64 + SendBufferUsed int64 +} + +type ClusterLinksCmd struct { + baseCmd + + val []ClusterLink +} + +var _ Cmder = (*ClusterLinksCmd)(nil) + +func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd { + return &ClusterLinksCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) { + cmd.val = val +} + +func (cmd *ClusterLinksCmd) Val() []ClusterLink { + return cmd.val +} + +func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterLinksCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]ClusterLink, n) + + for i := 0; i < len(cmd.val); i++ { + m, err := rd.ReadMapLen() + if err != nil { + return err + } + + for j := 0; j < m; j++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "direction": + cmd.val[i].Direction, err = rd.ReadString() + case "node": + cmd.val[i].Node, err = rd.ReadString() + case "create-time": + cmd.val[i].CreateTime, err = rd.ReadInt() + case "events": + cmd.val[i].Events, err = rd.ReadString() + case "send-buffer-allocated": + cmd.val[i].SendBufferAllocated, err = rd.ReadInt() + case "send-buffer-used": + cmd.val[i].SendBufferUsed, err = rd.ReadInt() + default: + return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key) + } + + if err != nil { + return err + } + } + } + + return nil +} + +// ------------------------------------------------------------------------------------------------------------------ + +type SlotRange struct { + Start int64 + End int64 +} + +type Node struct { + ID string + Endpoint string + IP string + Hostname string + Port int64 + TLSPort int64 + Role string + ReplicationOffset int64 + Health string +} + +type ClusterShard struct { + Slots []SlotRange + Nodes []Node +} + +type ClusterShardsCmd struct { + baseCmd + + val []ClusterShard +} + +var _ Cmder = (*ClusterShardsCmd)(nil) + +func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd { + return &ClusterShardsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) { + cmd.val = val +} + +func (cmd *ClusterShardsCmd) Val() []ClusterShard { + return cmd.val +} + +func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterShardsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]ClusterShard, n) + + for i := 0; i < n; i++ { + m, err := rd.ReadMapLen() + if err != nil { + return err + } + + for j := 0; j < m; j++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "slots": + l, err := rd.ReadArrayLen() + if err != nil { + return err + } + for k := 0; k < l; k += 2 { + start, err := rd.ReadInt() + if err != nil { + return err + } + + end, err := rd.ReadInt() + if err != nil { + return err + } + + cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end}) + } + case "nodes": + nodesLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val[i].Nodes = make([]Node, nodesLen) + for k := 0; k < nodesLen; k++ { + nodeMapLen, err := rd.ReadMapLen() + if err != nil { + return err + } + + for l := 0; l < nodeMapLen; l++ { + nodeKey, err := rd.ReadString() + if err != nil { + return err + } + + switch nodeKey { + case "id": + cmd.val[i].Nodes[k].ID, err = rd.ReadString() + case "endpoint": + cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString() + case "ip": + cmd.val[i].Nodes[k].IP, err = rd.ReadString() + case "hostname": + cmd.val[i].Nodes[k].Hostname, err = rd.ReadString() + case "port": + cmd.val[i].Nodes[k].Port, err = rd.ReadInt() + case "tls-port": + cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt() + case "role": + cmd.val[i].Nodes[k].Role, err = rd.ReadString() + case "replication-offset": + cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt() + case "health": + cmd.val[i].Nodes[k].Health, err = rd.ReadString() + default: + return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey) + } + + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key) + } + } + } + + return nil +} + +// ----------------------------------------- + +type RankScore struct { + Rank int64 + Score float64 +} + +type RankWithScoreCmd struct { + baseCmd + + val RankScore +} + +var _ Cmder = (*RankWithScoreCmd)(nil) + +func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd { + return &RankWithScoreCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *RankWithScoreCmd) SetVal(val RankScore) { + cmd.val = val +} + +func (cmd *RankWithScoreCmd) Val() RankScore { + return cmd.val +} + +func (cmd *RankWithScoreCmd) Result() (RankScore, error) { + return cmd.val, cmd.err +} + +func (cmd *RankWithScoreCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error { + if err := rd.ReadFixedArrayLen(2); err != nil { + return err + } + + rank, err := rd.ReadInt() + if err != nil { + return err + } + + score, err := rd.ReadFloat() + if err != nil { + return err + } + + cmd.val = RankScore{Rank: rank, Score: score} + + return nil +} + +// -------------------------------------------------------------------------------------------------- + +// ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0) +type ClientFlags uint64 + +const ( + ClientSlave ClientFlags = 1 << 0 /* This client is a replica */ + ClientMaster ClientFlags = 1 << 1 /* This client is a master */ + ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */ + ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */ + ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */ + ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */ + ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */ + ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */ + ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */ + ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */ + ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */ + ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */ + ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */ + ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */ + ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */ + ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */ + ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */ + ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */ + ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */ + ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */ + ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */ + ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp + ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */ + ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */ + ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */ + ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */ + ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */ + ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */ + ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */ + ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */ + ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling + a command. usually this will be marked only during call() + however, blocked clients might have this flag kept until they + will try to reprocess the command. */ + ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */ + ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */ + ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */ + ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */ + ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */ + ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */ + ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */ + ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/ + ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */ + ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */ + ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */ + ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */ + ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */ + ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */ + ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */ + ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */ + ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */ +) + +// ClientInfo is redis-server ClientInfo, not go-redis *Client +type ClientInfo struct { + ID int64 // redis version 2.8.12, a unique 64-bit client ID + Addr string // address/port of the client + LAddr string // address/port of local address client connected to (bind address) + FD int64 // file descriptor corresponding to the socket + Name string // the name set by the client with CLIENT SETNAME + Age time.Duration // total duration of the connection in seconds + Idle time.Duration // idle time of the connection in seconds + Flags ClientFlags // client flags (see below) + DB int // current database ID + Sub int // number of channel subscriptions + PSub int // number of pattern matching subscriptions + SSub int // redis version 7.0.3, number of shard channel subscriptions + Multi int // number of commands in a MULTI/EXEC context + QueryBuf int // qbuf, query buffer length (0 means no query pending) + QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full) + ArgvMem int // incomplete arguments for the next command (already extracted from query buffer) + MultiMem int // redis version 7.0, memory is used up by buffered multi commands + BufferSize int // rbs, usable size of buffer + BufferPeak int // rbp, peak used size of buffer in last 5 sec interval + OutputBufferLength int // obl, output buffer length + OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full) + OutputMemory int // omem, output buffer memory usage + TotalMemory int // tot-mem, total memory consumed by this client in its various buffers + Events string // file descriptor events (see below) + LastCmd string // cmd, last command played + User string // the authenticated username of the client + Redir int64 // client id of current client tracking redirection + Resp int // redis version 7.0, client RESP protocol version + LibName string // redis version 7.2, client library name + LibVer string // redis version 7.2, client library version +} + +type ClientInfoCmd struct { + baseCmd + + val *ClientInfo +} + +var _ Cmder = (*ClientInfoCmd)(nil) + +func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd { + return &ClientInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) { + cmd.val = val +} + +func (cmd *ClientInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClientInfoCmd) Val() *ClientInfo { + return cmd.val +} + +func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) { + return cmd.val, cmd.err +} + +func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) { + txt, err := rd.ReadString() + if err != nil { + return err + } + + // sds o = catClientInfoString(sdsempty(), c); + // o = sdscatlen(o,"\n",1); + // addReplyVerbatim(c,o,sdslen(o),"txt"); + // sdsfree(o); + cmd.val, err = parseClientInfo(strings.TrimSpace(txt)) + return err +} + +// fmt.Sscanf() cannot handle null values +func parseClientInfo(txt string) (info *ClientInfo, err error) { + info = &ClientInfo{} + for _, s := range strings.Split(txt, " ") { + kv := strings.Split(s, "=") + if len(kv) != 2 { + return nil, fmt.Errorf("redis: unexpected client info data (%s)", s) + } + key, val := kv[0], kv[1] + + switch key { + case "id": + info.ID, err = strconv.ParseInt(val, 10, 64) + case "addr": + info.Addr = val + case "laddr": + info.LAddr = val + case "fd": + info.FD, err = strconv.ParseInt(val, 10, 64) + case "name": + info.Name = val + case "age": + var age int + if age, err = strconv.Atoi(val); err == nil { + info.Age = time.Duration(age) * time.Second + } + case "idle": + var idle int + if idle, err = strconv.Atoi(val); err == nil { + info.Idle = time.Duration(idle) * time.Second + } + case "flags": + if val == "N" { + break + } + + for i := 0; i < len(val); i++ { + switch val[i] { + case 'S': + info.Flags |= ClientSlave + case 'O': + info.Flags |= ClientSlave | ClientMonitor + case 'M': + info.Flags |= ClientMaster + case 'P': + info.Flags |= ClientPubSub + case 'x': + info.Flags |= ClientMulti + case 'b': + info.Flags |= ClientBlocked + case 't': + info.Flags |= ClientTracking + case 'R': + info.Flags |= ClientTrackingBrokenRedir + case 'B': + info.Flags |= ClientTrackingBCAST + case 'd': + info.Flags |= ClientDirtyCAS + case 'c': + info.Flags |= ClientCloseAfterCommand + case 'u': + info.Flags |= ClientUnBlocked + case 'A': + info.Flags |= ClientCloseASAP + case 'U': + info.Flags |= ClientUnixSocket + case 'r': + info.Flags |= ClientReadOnly + case 'e': + info.Flags |= ClientNoEvict + case 'T': + info.Flags |= ClientNoTouch + default: + return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i])) + } + } + case "db": + info.DB, err = strconv.Atoi(val) + case "sub": + info.Sub, err = strconv.Atoi(val) + case "psub": + info.PSub, err = strconv.Atoi(val) + case "ssub": + info.SSub, err = strconv.Atoi(val) + case "multi": + info.Multi, err = strconv.Atoi(val) + case "qbuf": + info.QueryBuf, err = strconv.Atoi(val) + case "qbuf-free": + info.QueryBufFree, err = strconv.Atoi(val) + case "argv-mem": + info.ArgvMem, err = strconv.Atoi(val) + case "multi-mem": + info.MultiMem, err = strconv.Atoi(val) + case "rbs": + info.BufferSize, err = strconv.Atoi(val) + case "rbp": + info.BufferPeak, err = strconv.Atoi(val) + case "obl": + info.OutputBufferLength, err = strconv.Atoi(val) + case "oll": + info.OutputListLength, err = strconv.Atoi(val) + case "omem": + info.OutputMemory, err = strconv.Atoi(val) + case "tot-mem": + info.TotalMemory, err = strconv.Atoi(val) + case "events": + info.Events = val + case "cmd": + info.LastCmd = val + case "user": + info.User = val + case "redir": + info.Redir, err = strconv.ParseInt(val, 10, 64) + case "resp": + info.Resp, err = strconv.Atoi(val) + case "lib-name": + info.LibName = val + case "lib-ver": + info.LibVer = val + default: + return nil, fmt.Errorf("redis: unexpected client info key(%s)", key) + } + + if err != nil { + return nil, err + } + } + + return info, nil +} + +// ------------------------------------------- + +type ACLLogEntry struct { + Count int64 + Reason string + Context string + Object string + Username string + AgeSeconds float64 + ClientInfo *ClientInfo + EntryID int64 + TimestampCreated int64 + TimestampLastUpdated int64 +} + +type ACLLogCmd struct { + baseCmd + + val []*ACLLogEntry +} + +var _ Cmder = (*ACLLogCmd)(nil) + +func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd { + return &ACLLogCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) { + cmd.val = val +} + +func (cmd *ACLLogCmd) Val() []*ACLLogEntry { + return cmd.val +} + +func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ACLLogCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]*ACLLogEntry, n) + for i := 0; i < n; i++ { + cmd.val[i] = &ACLLogEntry{} + entry := cmd.val[i] + respLen, err := rd.ReadMapLen() + if err != nil { + return err + } + for j := 0; j < respLen; j++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "count": + entry.Count, err = rd.ReadInt() + case "reason": + entry.Reason, err = rd.ReadString() + case "context": + entry.Context, err = rd.ReadString() + case "object": + entry.Object, err = rd.ReadString() + case "username": + entry.Username, err = rd.ReadString() + case "age-seconds": + entry.AgeSeconds, err = rd.ReadFloat() + case "client-info": + txt, err := rd.ReadString() + if err != nil { + return err + } + entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt)) + if err != nil { + return err + } + case "entry-id": + entry.EntryID, err = rd.ReadInt() + case "timestamp-created": + entry.TimestampCreated, err = rd.ReadInt() + case "timestamp-last-updated": + entry.TimestampLastUpdated, err = rd.ReadInt() + default: + return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key) + } + + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go new file mode 100644 index 00000000..07c8e2c8 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/commands.go @@ -0,0 +1,3970 @@ +package redis + +import ( + "context" + "encoding" + "errors" + "io" + "net" + "reflect" + "strings" + "time" + + "github.com/redis/go-redis/v9/internal" +) + +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +// For example: +// +// rdb.Set(ctx, key, value, redis.KeepTTL) +const KeepTTL = -1 + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(ctx context.Context, dur time.Duration) int64 { + if dur > 0 && dur < time.Millisecond { + internal.Logger.Printf( + ctx, + "specified duration is %s, but minimal supported value is %s - truncating to 1ms", + dur, time.Millisecond, + ) + return 1 + } + return int64(dur / time.Millisecond) +} + +func formatSec(ctx context.Context, dur time.Duration) int64 { + if dur > 0 && dur < time.Second { + internal.Logger.Printf( + ctx, + "specified duration is %s, but minimal supported value is %s - truncating to 1s", + dur, time.Second, + ) + return 1 + } + return int64(dur / time.Second) +} + +func appendArgs(dst, src []interface{}) []interface{} { + if len(src) == 1 { + return appendArg(dst, src[0]) + } + + dst = append(dst, src...) + return dst +} + +func appendArg(dst []interface{}, arg interface{}) []interface{} { + switch arg := arg.(type) { + case []string: + for _, s := range arg { + dst = append(dst, s) + } + return dst + case []interface{}: + dst = append(dst, arg...) + return dst + case map[string]interface{}: + for k, v := range arg { + dst = append(dst, k, v) + } + return dst + case map[string]string: + for k, v := range arg { + dst = append(dst, k, v) + } + return dst + case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP: + return append(dst, arg) + default: + // scan struct field + v := reflect.ValueOf(arg) + if v.Type().Kind() == reflect.Ptr { + if v.IsNil() { + // error: arg is not a valid object + return dst + } + v = v.Elem() + } + + if v.Type().Kind() == reflect.Struct { + return appendStructField(dst, v) + } + + return append(dst, arg) + } +} + +// appendStructField appends the field and value held by the structure v to dst, and returns the appended dst. +func appendStructField(dst []interface{}, v reflect.Value) []interface{} { + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + tag := typ.Field(i).Tag.Get("redis") + if tag == "" || tag == "-" { + continue + } + name, opt, _ := strings.Cut(tag, ",") + if name == "" { + continue + } + + field := v.Field(i) + + // miss field + if omitEmpty(opt) && isEmptyValue(field) { + continue + } + + if field.CanInterface() { + dst = append(dst, name, field.Interface()) + } + } + + return dst +} + +func omitEmpty(opt string) bool { + for opt != "" { + var name string + name, opt, _ = strings.Cut(opt, ",") + if name == "omitempty" { + return true + } + } + return false +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Pointer: + return v.IsNil() + } + return false +} + +type Cmdable interface { + Pipeline() Pipeliner + Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) + + TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) + TxPipeline() Pipeliner + + Command(ctx context.Context) *CommandsInfoCmd + CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd + CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd + CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd + ClientGetName(ctx context.Context) *StringCmd + Echo(ctx context.Context, message interface{}) *StringCmd + Ping(ctx context.Context) *StatusCmd + Quit(ctx context.Context) *StatusCmd + Del(ctx context.Context, keys ...string) *IntCmd + Unlink(ctx context.Context, keys ...string) *IntCmd + Dump(ctx context.Context, key string) *StringCmd + Exists(ctx context.Context, keys ...string) *IntCmd + Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + ExpireTime(ctx context.Context, key string) *DurationCmd + ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd + Keys(ctx context.Context, pattern string) *StringSliceCmd + Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd + Move(ctx context.Context, key string, db int) *BoolCmd + ObjectRefCount(ctx context.Context, key string) *IntCmd + ObjectEncoding(ctx context.Context, key string) *StringCmd + ObjectIdleTime(ctx context.Context, key string) *DurationCmd + Persist(ctx context.Context, key string) *BoolCmd + PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd + PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + PExpireTime(ctx context.Context, key string) *DurationCmd + PTTL(ctx context.Context, key string) *DurationCmd + RandomKey(ctx context.Context) *StringCmd + Rename(ctx context.Context, key, newkey string) *StatusCmd + RenameNX(ctx context.Context, key, newkey string) *BoolCmd + Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd + RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd + Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd + SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd + SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd + SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd + Touch(ctx context.Context, keys ...string) *IntCmd + TTL(ctx context.Context, key string) *DurationCmd + Type(ctx context.Context, key string) *StatusCmd + Append(ctx context.Context, key, value string) *IntCmd + Decr(ctx context.Context, key string) *IntCmd + DecrBy(ctx context.Context, key string, decrement int64) *IntCmd + Get(ctx context.Context, key string) *StringCmd + GetRange(ctx context.Context, key string, start, end int64) *StringCmd + GetSet(ctx context.Context, key string, value interface{}) *StringCmd + GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd + GetDel(ctx context.Context, key string) *StringCmd + Incr(ctx context.Context, key string) *IntCmd + IncrBy(ctx context.Context, key string, value int64) *IntCmd + IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd + MGet(ctx context.Context, keys ...string) *SliceCmd + MSet(ctx context.Context, values ...interface{}) *StatusCmd + MSetNX(ctx context.Context, values ...interface{}) *BoolCmd + Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd + SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd + SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd + SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd + StrLen(ctx context.Context, key string) *IntCmd + Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd + + GetBit(ctx context.Context, key string, offset int64) *IntCmd + SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd + BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd + BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpNot(ctx context.Context, destKey string, key string) *IntCmd + BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd + BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd + BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd + + Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd + ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd + SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + + HDel(ctx context.Context, key string, fields ...string) *IntCmd + HExists(ctx context.Context, key, field string) *BoolCmd + HGet(ctx context.Context, key, field string) *StringCmd + HGetAll(ctx context.Context, key string) *MapStringStringCmd + HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd + HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd + HKeys(ctx context.Context, key string) *StringSliceCmd + HLen(ctx context.Context, key string) *IntCmd + HMGet(ctx context.Context, key string, fields ...string) *SliceCmd + HSet(ctx context.Context, key string, values ...interface{}) *IntCmd + HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd + HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd + HVals(ctx context.Context, key string) *StringSliceCmd + HRandField(ctx context.Context, key string, count int) *StringSliceCmd + HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd + + BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd + BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd + LCS(ctx context.Context, q *LCSQuery) *LCSCmd + LIndex(ctx context.Context, key string, index int64) *StringCmd + LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd + LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd + LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd + LLen(ctx context.Context, key string) *IntCmd + LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd + LPop(ctx context.Context, key string) *StringCmd + LPopCount(ctx context.Context, key string, count int) *StringSliceCmd + LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd + LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd + LPush(ctx context.Context, key string, values ...interface{}) *IntCmd + LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd + LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd + LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd + LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd + RPop(ctx context.Context, key string) *StringCmd + RPopCount(ctx context.Context, key string, count int) *StringSliceCmd + RPopLPush(ctx context.Context, source, destination string) *StringCmd + RPush(ctx context.Context, key string, values ...interface{}) *IntCmd + RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd + LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd + BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd + + SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd + SCard(ctx context.Context, key string) *IntCmd + SDiff(ctx context.Context, keys ...string) *StringSliceCmd + SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd + SInter(ctx context.Context, keys ...string) *StringSliceCmd + SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd + SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd + SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd + SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd + SMembers(ctx context.Context, key string) *StringSliceCmd + SMembersMap(ctx context.Context, key string) *StringStructMapCmd + SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd + SPop(ctx context.Context, key string) *StringCmd + SPopN(ctx context.Context, key string, count int64) *StringSliceCmd + SRandMember(ctx context.Context, key string) *StringCmd + SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd + SRem(ctx context.Context, key string, members ...interface{}) *IntCmd + SUnion(ctx context.Context, keys ...string) *StringSliceCmd + SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd + + XAdd(ctx context.Context, a *XAddArgs) *StringCmd + XDel(ctx context.Context, stream string, ids ...string) *IntCmd + XLen(ctx context.Context, stream string) *IntCmd + XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd + XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd + XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd + XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd + XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd + XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd + XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd + XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd + XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd + XGroupDestroy(ctx context.Context, stream, group string) *IntCmd + XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd + XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd + XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd + XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd + XPending(ctx context.Context, stream, group string) *XPendingCmd + XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd + XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd + XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd + XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd + XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd + XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd + XTrimMinID(ctx context.Context, key string, minID string) *IntCmd + XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd + XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd + XInfoStream(ctx context.Context, key string) *XInfoStreamCmd + XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd + XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd + + BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd + + ZAdd(ctx context.Context, key string, members ...Z) *IntCmd + ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd + ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd + ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd + ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd + ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd + ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd + ZCard(ctx context.Context, key string) *IntCmd + ZCount(ctx context.Context, key, min, max string) *IntCmd + ZLexCount(ctx context.Context, key, min, max string) *IntCmd + ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd + ZInter(ctx context.Context, store *ZStore) *StringSliceCmd + ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd + ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd + ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd + ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd + ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd + ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd + ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd + ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd + ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd + ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd + ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd + ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd + ZRank(ctx context.Context, key, member string) *IntCmd + ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd + ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd + ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd + ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd + ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd + ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd + ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd + ZRevRank(ctx context.Context, key, member string) *IntCmd + ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd + ZScore(ctx context.Context, key, member string) *FloatCmd + ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd + ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd + ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd + ZUnion(ctx context.Context, store ZStore) *StringSliceCmd + ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd + ZDiff(ctx context.Context, keys ...string) *StringSliceCmd + ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd + ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd + + PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd + PFCount(ctx context.Context, keys ...string) *IntCmd + PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd + + BgRewriteAOF(ctx context.Context) *StatusCmd + BgSave(ctx context.Context) *StatusCmd + ClientKill(ctx context.Context, ipPort string) *StatusCmd + ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd + ClientList(ctx context.Context) *StringCmd + ClientInfo(ctx context.Context) *ClientInfoCmd + ClientPause(ctx context.Context, dur time.Duration) *BoolCmd + ClientUnpause(ctx context.Context) *BoolCmd + ClientID(ctx context.Context) *IntCmd + ClientUnblock(ctx context.Context, id int64) *IntCmd + ClientUnblockWithError(ctx context.Context, id int64) *IntCmd + ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd + ConfigResetStat(ctx context.Context) *StatusCmd + ConfigSet(ctx context.Context, parameter, value string) *StatusCmd + ConfigRewrite(ctx context.Context) *StatusCmd + DBSize(ctx context.Context) *IntCmd + FlushAll(ctx context.Context) *StatusCmd + FlushAllAsync(ctx context.Context) *StatusCmd + FlushDB(ctx context.Context) *StatusCmd + FlushDBAsync(ctx context.Context) *StatusCmd + Info(ctx context.Context, section ...string) *StringCmd + LastSave(ctx context.Context) *IntCmd + Save(ctx context.Context) *StatusCmd + Shutdown(ctx context.Context) *StatusCmd + ShutdownSave(ctx context.Context) *StatusCmd + ShutdownNoSave(ctx context.Context) *StatusCmd + SlaveOf(ctx context.Context, host, port string) *StatusCmd + SlowLogGet(ctx context.Context, num int64) *SlowLogCmd + Time(ctx context.Context) *TimeCmd + DebugObject(ctx context.Context, key string) *StringCmd + ReadOnly(ctx context.Context) *StatusCmd + ReadWrite(ctx context.Context) *StatusCmd + MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd + + Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd + EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd + EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd + EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd + ScriptFlush(ctx context.Context) *StatusCmd + ScriptKill(ctx context.Context) *StatusCmd + ScriptLoad(ctx context.Context, script string) *StringCmd + + FunctionLoad(ctx context.Context, code string) *StringCmd + FunctionLoadReplace(ctx context.Context, code string) *StringCmd + FunctionDelete(ctx context.Context, libName string) *StringCmd + FunctionFlush(ctx context.Context) *StringCmd + FunctionKill(ctx context.Context) *StringCmd + FunctionFlushAsync(ctx context.Context) *StringCmd + FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd + FunctionDump(ctx context.Context) *StringCmd + FunctionRestore(ctx context.Context, libDump string) *StringCmd + FunctionStats(ctx context.Context) *FunctionStatsCmd + FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd + FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd + FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd + + Publish(ctx context.Context, channel string, message interface{}) *IntCmd + SPublish(ctx context.Context, channel string, message interface{}) *IntCmd + PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd + PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd + PubSubNumPat(ctx context.Context) *IntCmd + PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd + PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd + + ClusterMyShardID(ctx context.Context) *StringCmd + ClusterSlots(ctx context.Context) *ClusterSlotsCmd + ClusterShards(ctx context.Context) *ClusterShardsCmd + ClusterLinks(ctx context.Context) *ClusterLinksCmd + ClusterNodes(ctx context.Context) *StringCmd + ClusterMeet(ctx context.Context, host, port string) *StatusCmd + ClusterForget(ctx context.Context, nodeID string) *StatusCmd + ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd + ClusterResetSoft(ctx context.Context) *StatusCmd + ClusterResetHard(ctx context.Context) *StatusCmd + ClusterInfo(ctx context.Context) *StringCmd + ClusterKeySlot(ctx context.Context, key string) *IntCmd + ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd + ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd + ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd + ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd + ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd + ClusterSaveConfig(ctx context.Context) *StatusCmd + ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd + ClusterFailover(ctx context.Context) *StatusCmd + ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd + ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd + + GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd + GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd + GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd + GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd + GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd + GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd + GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd + GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd + GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd + + ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd + ACLLog(ctx context.Context, count int64) *ACLLogCmd + ACLLogReset(ctx context.Context) *StatusCmd + + ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd + + gearsCmdable + probabilisticCmdable +} + +type StatefulCmdable interface { + Cmdable + Auth(ctx context.Context, password string) *StatusCmd + AuthACL(ctx context.Context, username, password string) *StatusCmd + Select(ctx context.Context, index int) *StatusCmd + SwapDB(ctx context.Context, index1, index2 int) *StatusCmd + ClientSetName(ctx context.Context, name string) *BoolCmd + Hello(ctx context.Context, ver int, username, password, clientName string) *MapStringInterfaceCmd +} + +var ( + _ Cmdable = (*Client)(nil) + _ Cmdable = (*Tx)(nil) + _ Cmdable = (*Ring)(nil) + _ Cmdable = (*ClusterClient)(nil) +) + +type cmdable func(ctx context.Context, cmd Cmder) error + +type statefulCmdable func(ctx context.Context, cmd Cmder) error + +//------------------------------------------------------------------------------ + +func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd { + cmd := NewStatusCmd(ctx, "auth", password) + _ = c(ctx, cmd) + return cmd +} + +// AuthACL Perform an AUTH command, using the given user and pass. +// Should be used to authenticate the current connection with one of the connections defined in the ACL list +// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system. +func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd { + cmd := NewStatusCmd(ctx, "auth", username, password) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd { + cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond)) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd { + cmd := NewStatusCmd(ctx, "select", index) + _ = c(ctx, cmd) + return cmd +} + +func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd { + cmd := NewStatusCmd(ctx, "swapdb", index1, index2) + _ = c(ctx, cmd) + return cmd +} + +// ClientSetName assigns a name to the connection. +func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd { + cmd := NewBoolCmd(ctx, "client", "setname", name) + _ = c(ctx, cmd) + return cmd +} + +// Hello Set the resp protocol used. +func (c statefulCmdable) Hello(ctx context.Context, + ver int, username, password, clientName string) *MapStringInterfaceCmd { + args := make([]interface{}, 0, 7) + args = append(args, "hello", ver) + if password != "" { + if username != "" { + args = append(args, "auth", username, password) + } else { + args = append(args, "auth", "default", password) + } + } + if clientName != "" { + args = append(args, "setname", clientName) + } + cmd := NewMapStringInterfaceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { + cmd := NewCommandsInfoCmd(ctx, "command") + _ = c(ctx, cmd) + return cmd +} + +// FilterBy is used for the `CommandList` command parameter. +type FilterBy struct { + Module string + ACLCat string + Pattern string +} + +func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd { + args := make([]interface{}, 0, 5) + args = append(args, "command", "list") + if filter != nil { + if filter.Module != "" { + args = append(args, "filterby", "module", filter.Module) + } else if filter.ACLCat != "" { + args = append(args, "filterby", "aclcat", filter.ACLCat) + } else if filter.Pattern != "" { + args = append(args, "filterby", "pattern", filter.Pattern) + } + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd { + args := make([]interface{}, 2+len(commands)) + args[0] = "command" + args[1] = "getkeys" + copy(args[2:], commands) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd { + args := make([]interface{}, 2+len(commands)) + args[0] = "command" + args[1] = "getkeysandflags" + copy(args[2:], commands) + cmd := NewKeyFlagsCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ClientGetName returns the name of the connection. +func (c cmdable) ClientGetName(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "client", "getname") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd { + cmd := NewStringCmd(ctx, "echo", message) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Ping(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "ping") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Quit(_ context.Context) *StatusCmd { + panic("not implemented") +} + +func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "del" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unlink" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Dump(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "dump", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "exists" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "") +} + +func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "NX") +} + +func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "XX") +} + +func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "GT") +} + +func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "LT") +} + +func (c cmdable) expire( + ctx context.Context, key string, expiration time.Duration, mode string, +) *BoolCmd { + args := make([]interface{}, 3, 4) + args[0] = "expire" + args[1] = key + args[2] = formatSec(ctx, expiration) + if mode != "" { + args = append(args, mode) + } + + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix()) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ExpireTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "expiretime", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "keys", pattern) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "migrate", + host, + port, + key, + db, + formatMs(ctx, timeout), + ) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd { + cmd := NewBoolCmd(ctx, "move", key, db) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "object", "refcount", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "object", "encoding", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd { + cmd := NewBoolCmd(ctx, "persist", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration)) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + ctx, + "pexpireat", + key, + tm.UnixNano()/int64(time.Millisecond), + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpireTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Millisecond, "pexpiretime", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RandomKey(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "randomkey") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd { + cmd := NewStatusCmd(ctx, "rename", key, newkey) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd { + cmd := NewBoolCmd(ctx, "renamenx", key, newkey) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "restore", + key, + formatMs(ctx, ttl), + value, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "restore", + key, + formatMs(ctx, ttl), + value, + "replace", + ) + _ = c(ctx, cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count int64 + Get []string + Order string + Alpha bool +} + +func (sort *Sort) args(command, key string) []interface{} { + args := []interface{}{command, key} + + if sort.By != "" { + args = append(args, "by", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "limit", sort.Offset, sort.Count) + } + for _, get := range sort.Get { + args = append(args, "get", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.Alpha { + args = append(args, "alpha") + } + return args +} + +func (c cmdable) SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, sort.args("sort_ro", key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, sort.args("sort", key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd { + args := sort.args("sort", key) + if store != "" { + args = append(args, "store", store) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd { + cmd := NewSliceCmd(ctx, sort.args("sort", key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, len(keys)+1) + args[0] = "touch" + for i, key := range keys { + args[i+1] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "ttl", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Type(ctx context.Context, key string) *StatusCmd { + cmd := NewStatusCmd(ctx, "type", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd { + cmd := NewIntCmd(ctx, "append", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Decr(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "decr", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd { + cmd := NewIntCmd(ctx, "decrby", key, decrement) + _ = c(ctx, cmd) + return cmd +} + +// Get Redis `GET key` command. It returns redis.Nil error when key does not exist. +func (c cmdable) Get(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "get", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd { + cmd := NewStringCmd(ctx, "getrange", key, start, end) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd { + cmd := NewStringCmd(ctx, "getset", key, value) + _ = c(ctx, cmd) + return cmd +} + +// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist). +// Requires Redis >= 6.2.0. +func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd { + args := make([]interface{}, 0, 4) + args = append(args, "getex", key) + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == 0 { + args = append(args, "persist") + } + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// GetDel redis-server version >= 6.2.0. +func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "getdel", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Incr(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "incr", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd { + cmd := NewIntCmd(ctx, "incrby", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd { + cmd := NewFloatCmd(ctx, "incrbyfloat", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "mget" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// MSet is like Set but accepts multiple values: +// - MSet("key1", "value1", "key2", "value2") +// - MSet([]string{"key1", "value1", "key2", "value2"}) +// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"}) +// - MSet(struct), For struct types, see HSet description. +func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { + args := make([]interface{}, 1, 1+len(values)) + args[0] = "mset" + args = appendArgs(args, values) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// MSetNX is like SetNX but accepts multiple values: +// - MSetNX("key1", "value1", "key2", "value2") +// - MSetNX([]string{"key1", "value1", "key2", "value2"}) +// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"}) +// - MSetNX(struct), For struct types, see HSet description. +func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { + args := make([]interface{}, 1, 1+len(values)) + args[0] = "msetnx" + args = appendArgs(args, values) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Set Redis `SET key value [expiration]` command. +// Use expiration for `SETEx`-like behavior. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 5) + args[0] = "set" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetArgs provides arguments for the SetArgs function. +type SetArgs struct { + // Mode can be `NX` or `XX` or empty. + Mode string + + // Zero `TTL` or `Expiration` means that the key has no expiration time. + TTL time.Duration + ExpireAt time.Time + + // When Get is true, the command returns the old value stored at key, or nil when key did not exist. + Get bool + + // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, + // otherwise you will receive an error: (error) ERR syntax error. + KeepTTL bool +} + +// SetArgs supports all the options that the SET command supports. +// It is the alternative to the Set function when you want +// to have more control over the options. +func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd { + args := []interface{}{"set", key, value} + + if a.KeepTTL { + args = append(args, "keepttl") + } + + if !a.ExpireAt.IsZero() { + args = append(args, "exat", a.ExpireAt.Unix()) + } + if a.TTL > 0 { + if usePrecise(a.TTL) { + args = append(args, "px", formatMs(ctx, a.TTL)) + } else { + args = append(args, "ex", formatSec(ctx, a.TTL)) + } + } + + if a.Mode != "" { + args = append(args, a.Mode) + } + + if a.Get { + args = append(args, "get") + } + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetEx Redis `SETEx key expiration value` command. +func (c cmdable) SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { + cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value) + _ = c(ctx, cmd) + return cmd +} + +// SetNX Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + switch expiration { + case 0: + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd(ctx, "setnx", key, value) + case KeepTTL: + cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx") + default: + if usePrecise(expiration) { + cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx") + } else { + cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx") + } + } + + _ = c(ctx, cmd) + return cmd +} + +// SetXX Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + switch expiration { + case 0: + cmd = NewBoolCmd(ctx, "set", key, value, "xx") + case KeepTTL: + cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx") + default: + if usePrecise(expiration) { + cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx") + } else { + cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx") + } + } + + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd(ctx, "setrange", key, offset, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "strlen", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd { + args := []interface{}{"copy", sourceKey, destKey, "DB", db} + if replace { + args = append(args, "REPLACE") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd { + cmd := NewIntCmd(ctx, "getbit", key, offset) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + ctx, + "setbit", + key, + offset, + value, + ) + _ = c(ctx, cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"bitcount", key} + if bitCount != nil { + args = append( + args, + bitCount.Start, + bitCount.End, + ) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "bitop" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "and", destKey, keys...) +} + +func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "or", destKey, keys...) +} + +func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "xor", destKey, keys...) +} + +func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd { + return c.bitOp(ctx, "not", destKey, key) +} + +// BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end +// if you need the `byte | bit` parameter, please use `BitPosSpan`. +func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "bitpos" + args[1] = key + args[2] = bit + switch len(pos) { + case 0: + case 1: + args[3] = pos[0] + case 2: + args[3] = pos[0] + args[4] = pos[1] + default: + panic("too many arguments") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// BitPosSpan supports the `byte | bit` parameters in redis version 7.0, +// the bitpos command defaults to using byte type for the `start-end` range, +// which means it counts in bytes from start to end. you can set the value +// of "span" to determine the type of `start-end`. +// span = "bit", cmd: bitpos key bit start end bit +// span = "byte", cmd: bitpos key bit start end byte +func (c cmdable) BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd { + cmd := NewIntCmd(ctx, "bitpos", key, bit, start, end, span) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd { + a := make([]interface{}, 0, 2+len(args)) + a = append(a, "bitfield") + a = append(a, key) + a = append(a, args...) + cmd := NewIntSliceCmd(ctx, a...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + if keyType != "" { + args = append(args, "type", keyType) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"sscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"zscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hdel" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd { + cmd := NewBoolCmd(ctx, "hexists", key, field) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd { + cmd := NewStringCmd(ctx, "hget", key, field) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGetAll(ctx context.Context, key string) *MapStringStringCmd { + cmd := NewMapStringStringCmd(ctx, "hgetall", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd { + cmd := NewIntCmd(ctx, "hincrby", key, field, incr) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hkeys", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "hlen", key) + _ = c(ctx, cmd) + return cmd +} + +// HMGet returns the values for the specified fields in the hash stored at key. +// It returns an interface{} to distinguish between empty string and nil value. +func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hmget" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HSet accepts values in following formats: +// +// - HSet("myhash", "key1", "value1", "key2", "value2") +// +// - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) +// +// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) +// +// Playing struct With "redis" tag. +// type MyHash struct { Key1 string `redis:"key1"`; Key2 int `redis:"key2"` } +// +// - HSet("myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0 +// +// For struct, can be a structure pointer type, we only parse the field whose tag is redis. +// if you don't want the field to be read, you can use the `redis:"-"` flag to ignore it, +// or you don't need to set the redis tag. +// For the type of structure field, we only support simple data types: +// string, int/uint(8,16,32,64), float(32,64), time.Time(to RFC3339Nano), time.Duration(to Nanoseconds ), +// if you are other more complex or custom data types, please implement the encoding.BinaryMarshaler interface. +// +// Note that in older versions of Redis server(redis-server < 4.0), HSet only supports a single key-value pair. +// redis-docs: https://redis.io/commands/hset (Starting with Redis version 4.0.0: Accepts multiple field and value arguments.) +// If you are using a Struct type and the number of fields is greater than one, +// you will receive an error similar to "ERR wrong number of arguments", you can use HMSet as a substitute. +func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "hset" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HMSet is a deprecated version of HSet left for compatibility with Redis 3. +func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "hmset" + args[1] = key + args = appendArgs(args, values) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "hsetnx", key, field, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hvals", key) + _ = c(ctx, cmd) + return cmd +} + +// HRandField redis-server version >= 6.2.0. +func (c cmdable) HRandField(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hrandfield", key, count) + _ = c(ctx, cmd) + return cmd +} + +// HRandFieldWithValues redis-server version >= 6.2.0. +func (c cmdable) HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd { + cmd := NewKeyValueSliceCmd(ctx, "hrandfield", key, count, "withvalues") + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "blpop" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewStringSliceCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd { + args := make([]interface{}, 3+len(keys), 6+len(keys)) + args[0] = "blmpop" + args[1] = formatSec(ctx, timeout) + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + args = append(args, strings.ToLower(direction), "count", count) + cmd := NewKeyValuesCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "brpop" + for i, key := range keys { + args[1+i] = key + } + args[len(keys)+1] = formatSec(ctx, timeout) + cmd := NewStringSliceCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + ctx, + "brpoplpush", + source, + destination, + formatSec(ctx, timeout), + ) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd { + cmd := NewLCSCmd(ctx, q) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd { + cmd := NewStringCmd(ctx, "lindex", key, index) + _ = c(ctx, cmd) + return cmd +} + +// LMPop Pops one or more elements from the first non-empty list key from the list of provided key names. +// direction: left or right, count: > 0 +// example: client.LMPop(ctx, "left", 3, "key1", "key2") +func (c cmdable) LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd { + args := make([]interface{}, 2+len(keys), 5+len(keys)) + args[0] = "lmpop" + args[1] = len(keys) + for i, key := range keys { + args[2+i] = key + } + args = append(args, strings.ToLower(direction), "count", count) + cmd := NewKeyValuesCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "llen", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "lpop", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "lpop", key, count) + _ = c(ctx, cmd) + return cmd +} + +type LPosArgs struct { + Rank, MaxLen int64 +} + +func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd { + args := []interface{}{"lpos", key, value} + if a.Rank != 0 { + args = append(args, "rank", a.Rank) + } + if a.MaxLen != 0 { + args = append(args, "maxlen", a.MaxLen) + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd { + args := []interface{}{"lpos", key, value, "count", count} + if a.Rank != 0 { + args = append(args, "rank", a.Rank) + } + if a.MaxLen != 0 { + args = append(args, "maxlen", a.MaxLen) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpushx" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + ctx, + "lrange", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "lrem", key, count, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd { + cmd := NewStatusCmd(ctx, "lset", key, index, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "ltrim", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "rpop", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "rpop", key, count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd { + cmd := NewStringCmd(ctx, "rpoplpush", source, destination) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpushx" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd { + cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BLMove( + ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration, +) *StringCmd { + cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout)) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "sadd" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "scard", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sdiff" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sdiffstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sinter" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { + args := make([]interface{}, 4+len(keys)) + args[0] = "sintercard" + numkeys := int64(0) + for i, key := range keys { + args[2+i] = key + numkeys++ + } + args[1] = numkeys + args[2+numkeys] = "limit" + args[3+numkeys] = limit + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sinterstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "sismember", key, member) + _ = c(ctx, cmd) + return cmd +} + +// SMIsMember Redis `SMISMEMBER key member [member ...]` command. +func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "smismember" + args[1] = key + args = appendArgs(args, members) + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SMembers Redis `SMEMBERS key` command output as a slice. +func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "smembers", key) + _ = c(ctx, cmd) + return cmd +} + +// SMembersMap Redis `SMEMBERS key` command output as a map. +func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd { + cmd := NewStringStructMapCmd(ctx, "smembers", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "smove", source, destination, member) + _ = c(ctx, cmd) + return cmd +} + +// SPop Redis `SPOP key` command. +func (c cmdable) SPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "spop", key) + _ = c(ctx, cmd) + return cmd +} + +// SPopN Redis `SPOP key count` command. +func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "spop", key, count) + _ = c(ctx, cmd) + return cmd +} + +// SRandMember Redis `SRANDMEMBER key` command. +func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "srandmember", key) + _ = c(ctx, cmd) + return cmd +} + +// SRandMemberN Redis `SRANDMEMBER key count` command. +func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "srandmember", key, count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "srem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sunion" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sunionstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// XAddArgs accepts values in the following formats: +// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"} +// - XAddArgs.Values = []string("key1", "value1", "key2", "value2") +// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"} +// +// Note that map will not preserve the order of key-value pairs. +// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used. +type XAddArgs struct { + Stream string + NoMkStream bool + MaxLen int64 // MAXLEN N + MinID string + // Approx causes MaxLen and MinID to use "~" matcher (instead of "="). + Approx bool + Limit int64 + ID string + Values interface{} +} + +func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { + args := make([]interface{}, 0, 11) + args = append(args, "xadd", a.Stream) + if a.NoMkStream { + args = append(args, "nomkstream") + } + switch { + case a.MaxLen > 0: + if a.Approx { + args = append(args, "maxlen", "~", a.MaxLen) + } else { + args = append(args, "maxlen", a.MaxLen) + } + case a.MinID != "": + if a.Approx { + args = append(args, "minid", "~", a.MinID) + } else { + args = append(args, "minid", a.MinID) + } + } + if a.Limit > 0 { + args = append(args, "limit", a.Limit) + } + if a.ID != "" { + args = append(args, a.ID) + } else { + args = append(args, "*") + } + args = appendArg(args, a.Values) + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd { + args := []interface{}{"xdel", stream} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd { + cmd := NewIntCmd(ctx, "xlen", stream) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count) + _ = c(ctx, cmd) + return cmd +} + +type XReadArgs struct { + Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 + Count int64 + Block time.Duration +} + +func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 6+len(a.Streams)) + args = append(args, "xread") + + keyPos := int8(1) + if a.Count > 0 { + args = append(args, "count") + args = append(args, a.Count) + keyPos += 2 + } + if a.Block >= 0 { + args = append(args, "block") + args = append(args, int64(a.Block/time.Millisecond)) + keyPos += 2 + } + args = append(args, "streams") + keyPos++ + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(ctx, args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + cmd.SetFirstKeyPos(keyPos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd { + return c.XRead(ctx, &XReadArgs{ + Streams: streams, + Block: -1, + }) +} + +func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer) + _ = c(ctx, cmd) + return cmd +} + +type XReadGroupArgs struct { + Group string + Consumer string + Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 + Count int64 + Block time.Duration + NoAck bool +} + +func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 10+len(a.Streams)) + args = append(args, "xreadgroup", "group", a.Group, a.Consumer) + + keyPos := int8(4) + if a.Count > 0 { + args = append(args, "count", a.Count) + keyPos += 2 + } + if a.Block >= 0 { + args = append(args, "block", int64(a.Block/time.Millisecond)) + keyPos += 2 + } + if a.NoAck { + args = append(args, "noack") + keyPos++ + } + args = append(args, "streams") + keyPos++ + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(ctx, args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + cmd.SetFirstKeyPos(keyPos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd { + args := []interface{}{"xack", stream, group} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd { + cmd := NewXPendingCmd(ctx, "xpending", stream, group) + _ = c(ctx, cmd) + return cmd +} + +type XPendingExtArgs struct { + Stream string + Group string + Idle time.Duration + Start string + End string + Count int64 + Consumer string +} + +func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd { + args := make([]interface{}, 0, 9) + args = append(args, "xpending", a.Stream, a.Group) + if a.Idle != 0 { + args = append(args, "idle", formatMs(ctx, a.Idle)) + } + args = append(args, a.Start, a.End, a.Count) + if a.Consumer != "" { + args = append(args, a.Consumer) + } + cmd := NewXPendingExtCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type XAutoClaimArgs struct { + Stream string + Group string + MinIdle time.Duration + Start string + Count int64 + Consumer string +} + +func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd { + args := xAutoClaimArgs(ctx, a) + cmd := NewXAutoClaimCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd { + args := xAutoClaimArgs(ctx, a) + args = append(args, "justid") + cmd := NewXAutoClaimJustIDCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} { + args := make([]interface{}, 0, 8) + args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start) + if a.Count > 0 { + args = append(args, "count", a.Count) + } + return args +} + +type XClaimArgs struct { + Stream string + Group string + Consumer string + MinIdle time.Duration + Messages []string +} + +func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd { + args := xClaimArgs(a) + cmd := NewXMessageSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd { + args := xClaimArgs(a) + args = append(args, "justid") + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func xClaimArgs(a *XClaimArgs) []interface{} { + args := make([]interface{}, 0, 5+len(a.Messages)) + args = append(args, + "xclaim", + a.Stream, + a.Group, a.Consumer, + int64(a.MinIdle/time.Millisecond)) + for _, id := range a.Messages { + args = append(args, id) + } + return args +} + +// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). +// example: +// +// XTRIM key MAXLEN/MINID threshold LIMIT limit. +// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. +// +// The redis-server version is lower than 6.2, please set limit to 0. +func (c cmdable) xTrim( + ctx context.Context, key, strategy string, + approx bool, threshold interface{}, limit int64, +) *IntCmd { + args := make([]interface{}, 0, 7) + args = append(args, "xtrim", key, strategy) + if approx { + args = append(args, "~") + } + args = append(args, threshold) + if limit > 0 { + args = append(args, "limit", limit) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// XTrimMaxLen No `~` rules are used, `limit` cannot be used. +// cmd: XTRIM key MAXLEN maxLen +func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) +} + +func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", true, maxLen, limit) +} + +func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd { + return c.xTrim(ctx, key, "minid", false, minID, 0) +} + +func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd { + return c.xTrim(ctx, key, "minid", true, minID, limit) +} + +func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd { + cmd := NewXInfoConsumersCmd(ctx, key, group) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd { + cmd := NewXInfoGroupsCmd(ctx, key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd { + cmd := NewXInfoStreamCmd(ctx, key) + _ = c(ctx, cmd) + return cmd +} + +// XInfoStreamFull XINFO STREAM FULL [COUNT count] +// redis-server >= 6.0. +func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd { + args := make([]interface{}, 0, 6) + args = append(args, "xinfo", "stream", key, "full") + if count > 0 { + args = append(args, "count", count) + } + cmd := NewXInfoStreamFullCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// Z represents sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// ZWithKey represents sorted set member including the name of the key where it was popped. +type ZWithKey struct { + Z + Key string +} + +// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore. +type ZStore struct { + Keys []string + Weights []float64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +func (z ZStore) len() (n int) { + n = len(z.Keys) + if len(z.Weights) > 0 { + n += 1 + len(z.Weights) + } + if z.Aggregate != "" { + n += 2 + } + return n +} + +func (z ZStore) appendArgs(args []interface{}) []interface{} { + for _, key := range z.Keys { + args = append(args, key) + } + if len(z.Weights) > 0 { + args = append(args, "weights") + for _, weights := range z.Weights { + args = append(args, weights) + } + } + if z.Aggregate != "" { + args = append(args, "aggregate", z.Aggregate) + } + return args +} + +// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command. +func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmax" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewZWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command. +func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmin" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewZWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +// BZMPop is the blocking variant of ZMPOP. +// When any of the sorted sets contains elements, this command behaves exactly like ZMPOP. +// When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the timeout elapses. +// A timeout of zero can be used to block indefinitely. +// example: client.BZMPop(ctx, 0,"max", 1, "set") +func (c cmdable) BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd { + args := make([]interface{}, 3+len(keys), 6+len(keys)) + args[0] = "bzmpop" + args[1] = formatSec(ctx, timeout) + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + args = append(args, strings.ToLower(order), "count", count) + cmd := NewZSliceWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive. +type ZAddArgs struct { + NX bool + XX bool + LT bool + GT bool + Ch bool + Members []Z +} + +func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} { + a := make([]interface{}, 0, 6+2*len(args.Members)) + a = append(a, "zadd", key) + + // The GT, LT and NX options are mutually exclusive. + if args.NX { + a = append(a, "nx") + } else { + if args.XX { + a = append(a, "xx") + } + if args.GT { + a = append(a, "gt") + } else if args.LT { + a = append(a, "lt") + } + } + if args.Ch { + a = append(a, "ch") + } + if incr { + a = append(a, "incr") + } + for _, m := range args.Members { + a = append(a, m.Score) + a = append(a, m.Member) + } + return a +} + +func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd { + cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd { + cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...) + _ = c(ctx, cmd) + return cmd +} + +// ZAdd Redis `ZADD key score member [score member ...]` command. +func (c cmdable) ZAdd(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ + Members: members, + }) +} + +// ZAddLT Redis `ZADD key LT score member [score member ...]` command. +func (c cmdable) ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ + LT: true, + Members: members, + }) +} + +// ZAddGT Redis `ZADD key GT score member [score member ...]` command. +func (c cmdable) ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ + GT: true, + Members: members, + }) +} + +// ZAddNX Redis `ZADD key NX score member [score member ...]` command. +func (c cmdable) ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ + NX: true, + Members: members, + }) +} + +// ZAddXX Redis `ZADD key XX score member [score member ...]` command. +func (c cmdable) ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ + XX: true, + Members: members, + }) +} + +func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "zcard", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zcount", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zlexcount", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd(ctx, "zincrby", key, increment, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zinterstore", destination, len(store.Keys)) + args = store.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + cmd.SetFirstKeyPos(3) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd { + args := make([]interface{}, 0, 2+store.len()) + args = append(args, "zinter", len(store.Keys)) + args = store.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zinter", len(store.Keys)) + args = store.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { + args := make([]interface{}, 4+len(keys)) + args[0] = "zintercard" + numkeys := int64(0) + for i, key := range keys { + args[2+i] = key + numkeys++ + } + args[1] = numkeys + args[2+numkeys] = "limit" + args[3+numkeys] = limit + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ZMPop Pops one or more elements with the highest or lowest score from the first non-empty sorted set key from the list of provided key names. +// direction: "max" (highest score) or "min" (lowest score), count: > 0 +// example: client.ZMPop(ctx, "max", 5, "set1", "set2") +func (c cmdable) ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd { + args := make([]interface{}, 2+len(keys), 5+len(keys)) + args[0] = "zmpop" + args[1] = len(keys) + for i, key := range keys { + args[2+i] = key + } + args = append(args, strings.ToLower(order), "count", count) + cmd := NewZSliceWithKeyCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd { + args := make([]interface{}, 2+len(members)) + args[0] = "zmscore" + args[1] = key + for i, member := range members { + args[2+i] = member + } + cmd := NewFloatSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmax", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmin", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ZRangeArgs is all the options of the ZRange command. +// In version> 6.2.0, you can replace the(cmd): +// +// ZREVRANGE, +// ZRANGEBYSCORE, +// ZREVRANGEBYSCORE, +// ZRANGEBYLEX, +// ZREVRANGEBYLEX. +// +// Please pay attention to your redis-server version. +// +// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher. +type ZRangeArgs struct { + Key string + + // When the ByScore option is provided, the open interval(exclusive) can be set. + // By default, the score intervals specified by and are closed (inclusive). + // It is similar to the deprecated(6.2.0+) ZRangeByScore command. + // For example: + // ZRangeArgs{ + // Key: "example-key", + // Start: "(3", + // Stop: 8, + // ByScore: true, + // } + // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8). + // + // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command. + // You can set the and options as follows: + // ZRangeArgs{ + // Key: "example-key", + // Start: "[abc", + // Stop: "(def", + // ByLex: true, + // } + // cmd: "ZRange example-key [abc (def ByLex" + // + // For normal cases (ByScore==false && ByLex==false), and should be set to the index range (int). + // You can read the documentation for more information: https://redis.io/commands/zrange + Start interface{} + Stop interface{} + + // The ByScore and ByLex options are mutually exclusive. + ByScore bool + ByLex bool + + Rev bool + + // limit offset count. + Offset int64 + Count int64 +} + +func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} { + // For Rev+ByScore/ByLex, we need to adjust the position of and . + if z.Rev && (z.ByScore || z.ByLex) { + args = append(args, z.Key, z.Stop, z.Start) + } else { + args = append(args, z.Key, z.Start, z.Stop) + } + + if z.ByScore { + args = append(args, "byscore") + } else if z.ByLex { + args = append(args, "bylex") + } + if z.Rev { + args = append(args, "rev") + } + if z.Offset != 0 || z.Count != 0 { + args = append(args, "limit", z.Offset, z.Count) + } + return args +} + +func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd { + args := make([]interface{}, 0, 9) + args = append(args, "zrange") + args = z.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd { + args := make([]interface{}, 0, 10) + args = append(args, "zrange") + args = z.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + return c.ZRangeArgs(ctx, ZRangeArgs{ + Key: key, + Start: start, + Stop: stop, + }) +} + +func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { + return c.ZRangeArgsWithScores(ctx, ZRangeArgs{ + Key: key, + Start: start, + Stop: stop, + }) +} + +type ZRangeBy struct { + Min, Max string + Offset, Count int64 +} + +func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Min, opt.Max} + if withScores { + args = append(args, "withscores") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRangeBy(ctx, "zrangebyscore", key, opt, false) +} + +func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRangeBy(ctx, "zrangebylex", key, opt, false) +} + +func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd { + args := make([]interface{}, 0, 10) + args = append(args, "zrangestore", dst) + args = z.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd { + cmd := NewIntCmd(ctx, "zrank", key, member) + _ = c(ctx, cmd) + return cmd +} + +// ZRankWithScore according to the Redis documentation, if member does not exist +// in the sorted set or key does not exist, it will return a redis.Nil error. +func (c cmdable) ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd { + cmd := NewRankWithScoreCmd(ctx, "zrank", key, member, "withscore") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "zrem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + ctx, + "zremrangebyrank", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop) + _ = c(ctx, cmd) + return cmd +} + +// ZRevRangeWithScores according to the Redis documentation, if member does not exist +// in the sorted set or key does not exist, it will return a redis.Nil error. +func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt) +} + +func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt) +} + +func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd { + cmd := NewIntCmd(ctx, "zrevrank", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd { + cmd := NewRankWithScoreCmd(ctx, "zrevrank", key, member, "withscore") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd { + cmd := NewFloatCmd(ctx, "zscore", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd { + args := make([]interface{}, 0, 2+store.len()) + args = append(args, "zunion", len(store.Keys)) + args = store.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zunion", len(store.Keys)) + args = store.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zunionstore", dest, len(store.Keys)) + args = store.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + cmd.SetFirstKeyPos(3) + _ = c(ctx, cmd) + return cmd +} + +// ZRandMember redis-server version >= 6.2.0. +func (c cmdable) ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "zrandmember", key, count) + _ = c(ctx, cmd) + return cmd +} + +// ZRandMemberWithScores redis-server version >= 6.2.0. +func (c cmdable) ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd { + cmd := NewZSliceCmd(ctx, "zrandmember", key, count, "withscores") + _ = c(ctx, cmd) + return cmd +} + +// ZDiff redis-server version >= 6.2.0. +func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "zdiff" + args[1] = len(keys) + for i, key := range keys { + args[i+2] = key + } + + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +// ZDiffWithScores redis-server version >= 6.2.0. +func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zdiff" + args[1] = len(keys) + for i, key := range keys { + args[i+2] = key + } + args[len(keys)+2] = "withscores" + + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +// ZDiffStore redis-server version >=6.2.0. +func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 0, 3+len(keys)) + args = append(args, "zdiffstore", destination, len(keys)) + for _, key := range keys { + args = append(args, key) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(els)) + args[0] = "pfadd" + args[1] = key + args = appendArgs(args, els) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "pfcount" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "pfmerge" + args[1] = dest + for i, key := range keys { + args[2+i] = key + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "bgrewriteaof") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BgSave(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "bgsave") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { + cmd := NewStatusCmd(ctx, "client", "kill", ipPort) + _ = c(ctx, cmd) + return cmd +} + +// ClientKillByFilter is new style syntax, while the ClientKill is old +// +// CLIENT KILL