diff --git a/.formatter.exs b/.formatter.exs index 6634694..2d3b1c3 100644 --- a/.formatter.exs +++ b/.formatter.exs @@ -5,6 +5,7 @@ locals_without_parens = [ ] [ + import_deps: [:nebulex], inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"], line_length: 100, locals_without_parens: locals_without_parens, diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aa96f5b..6d43461 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ on: jobs: nebulex_test: name: >- - NebulexRedisAdapter Test (Elixir ${{ matrix.elixir }} / OTP ${{ matrix.otp }} / + Nebulex.Adapters.Redis Test (Elixir ${{ matrix.elixir }} / OTP ${{ matrix.otp }} / OS ${{ matrix.os }}) runs-on: ${{ matrix.os }} @@ -33,9 +33,6 @@ jobs: - elixir: 1.15.x otp: 25.x os: 'ubuntu-latest' - - elixir: 1.14.x - otp: 23.x - os: 'ubuntu-20.04' env: GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' @@ -94,13 +91,13 @@ jobs: - name: Run tests run: | epmd -daemon - mix test --trace + mix test --exclude nebulex_test if: ${{ !matrix.coverage }} - name: Run tests with coverage run: | epmd -daemon - mix coveralls.github + mix coveralls.github --exclude nebulex_test if: ${{ matrix.coverage }} - name: Restore PLT Cache diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ad077f..caf0dae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,16 +4,6 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [v2.4.2](https://github.com/cabol/nebulex_redis_adapter/tree/v2.4.2) (2024-11-01) - -[Full Changelog](https://github.com/cabol/nebulex_redis_adapter/compare/v2.4.1...v2.4.2) - -**Closed issues:** - -- `NebulexRedisAdapter.RedisCluster.Keyslot` incorrectly computes slot for - hash tags. - [#64](https://github.com/cabol/nebulex_redis_adapter/issues/64) - ## [v2.4.1](https://github.com/cabol/nebulex_redis_adapter/tree/v2.4.1) (2024-09-01) [Full Changelog](https://github.com/cabol/nebulex_redis_adapter/compare/v2.4.0...v2.4.1) diff --git a/README.md b/README.md index f5b3a82..c576b11 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# NebulexRedisAdapter +# Nebulex.Adapters.Redis > Nebulex adapter for Redis (including [Redis Cluster][redis_cluster] support). ![CI](https://github.com/cabol/nebulex_redis_adapter/workflows/CI/badge.svg) @@ -15,7 +15,7 @@ next sections. See also [online documentation][nbx_redis_adapter] and [Redis cache example][nbx_redis_example]. -[nbx_redis_adapter]: http://hexdocs.pm/nebulex_redis_adapter/NebulexRedisAdapter.html +[nbx_redis_adapter]: http://hexdocs.pm/nebulex_redis_adapter/Nebulex.Adapters.Redis.html [nbx_redis_example]: https://github.com/cabol/nebulex_examples/tree/master/redis_cache [redis_cluster]: https://redis.io/topics/cluster-tutorial @@ -26,9 +26,9 @@ Add `:nebulex_redis_adapter` to your list of dependencies in `mix.exs`: ```elixir defp deps do [ - {:nebulex_redis_adapter, "~> 2.3"}, - {:crc, "~> 0.10"}, #=> Needed when using Redis Cluster - {:jchash, "~> 0.1.4"} #=> Needed when using consistent-hashing + {:nebulex_redis_adapter, "~> 3.0"}, + {:crc, "~> 0.10"}, #=> Needed when using `:redis_cluster` mode + {:ex_hash_ring, "~> 6.0"} #=> Needed when using `:client_side_cluster` mode ] end ``` @@ -38,8 +38,8 @@ needed ones. For example: * `:crc` - Required when using the adapter in mode `:redis_cluster`. See [Redis Cluster][redis_cluster]. - * `:jchash` - Required if you want to use consistent-hashing when using the - adapter in mode `:client_side_cluster`. + * `:ex_hash_ring` - Required when using the adapter in mode + `:client_side_cluster`. Then run `mix deps.get` to fetch the dependencies. @@ -51,7 +51,7 @@ After installing, we can define our cache to use Redis adapter as follows: defmodule MyApp.RedisCache do use Nebulex.Cache, otp_app: :my_app, - adapter: NebulexRedisAdapter + adapter: Nebulex.Adapters.Redis end ``` @@ -69,7 +69,7 @@ config :my_app, MyApp.RedisCache, Since this adapter is implemented by means of `Redix`, it inherits the same options, including regular Redis options and connection options as well. For -more information about the options, please check out `NebulexRedisAdapter` +more information about the options, please check out `Nebulex.Adapters.Redis` module and also [Redix](https://github.com/whatyouhide/redix). See also [Redis cache example][nbx_redis_example]. @@ -77,7 +77,7 @@ See also [Redis cache example][nbx_redis_example]. ## Distributed Caching There are different ways to support distributed caching when using -**NebulexRedisAdapter**. +**Nebulex.Adapters.Redis**. ### Redis Cluster @@ -95,7 +95,7 @@ Then we can define our cache which will use **Redis Cluster**: defmodule MyApp.RedisClusterCache do use Nebulex.Cache, otp_app: :my_app, - adapter: NebulexRedisAdapter + adapter: Nebulex.Adapters.Redis end ``` @@ -127,12 +127,12 @@ The pool of connections to the different master nodes is automatically configured by the adapter once it gets the cluster slots info. > This one could be the easiest and recommended way for distributed caching - using Redis and **NebulexRedisAdapter**. + using Redis and **Nebulex.Adapters.Redis**. -### Client-side Cluster based on Sharding +### Client-side Cluster -**NebulexRedisAdapter** also brings with a simple client-side cluster -implementation based on Sharding distribution model. +**Nebulex.Adapters.Redis** also brings with a simple client-side cluster +implementation based on sharding distribution model. We define our cache normally: @@ -140,7 +140,7 @@ We define our cache normally: defmodule MyApp.ClusteredCache do use Nebulex.Cache, otp_app: :my_app, - adapter: NebulexRedisAdapter + adapter: Nebulex.Adapters.Redis end ``` @@ -182,70 +182,11 @@ config :my_app, MyApp.ClusteredCache, ] ``` -By default, the adapter uses `NebulexRedisAdapter.ClientCluster.Keyslot` for the -keyslot. Besides, if `:jchash` is defined as dependency, the adapter will use -consistent-hashing automatically. - -> **NOTE:** It is highly recommended to define the `:jchash` dependency - when using the adapter in `:client_side_cluster` mode. - -However, you can also provide your own implementation by implementing the -`Nebulex.Adapter.Keyslot` and set it into the `:keyslot` option. For example: - -```elixir -defmodule MyApp.ClusteredCache.Keyslot do - use Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - # your implementation goes here - end -end -``` - -And the config: - -```elixir -config :my_app, MyApp.ClusteredCache, - # Enable client-side cluster mode - mode: :client_side_cluster, - - client_side_cluster: [ - # Provided Keyslot implementation - keyslot: MyApp.ClusteredCache.Keyslot, - - # Nodes config (each node has its own options) - nodes: [ - ... - ] - ] -``` - -### Using `Nebulex.Adapters.Partitioned` - -Another simple option is to use the `Nebulex.Adapters.Partitioned` and set as -local cache the `NebulexRedisAdapter`. The idea here is each Elixir node running -the distributed cache (`Nebulex.Adapters.Partitioned`) will have as local -backend or cache a Redis instance (handled by `NebulexRedisAdapter`). - - -This example shows how the setup a distributed cache using -`Nebulex.Adapters.Partitioned` and `NebulexRedisAdapter`: - -```elixir -defmodule MyApp.DistributedCache do - use Nebulex.Cache, - otp_app: :my_app, - adapter: Nebulex.Adapters.Partitioned, - primary_storage_adapter: NebulexRedisAdapter -end -``` - ### Using a Redis Proxy The other option is to use a proxy, like [Envoy proxy][envoy] or [Twemproxy][twemproxy] on top of Redis. In this case, the proxy does the -distribution work, and from the adparter's side (**NebulexRedisAdapter**), +distribution work, and from the adparter's side (**Nebulex.Adapters.Redis**), it would be only configuration. Instead of connect the adapter against the Redis nodes, we connect it against the proxy nodes, this means, in the config, we setup the pool with the host and port pointing to the proxy. @@ -253,30 +194,29 @@ we setup the pool with the host and port pointing to the proxy. [envoy]: https://www.envoyproxy.io/ [twemproxy]: https://github.com/twitter/twemproxy -## Running Redis commands and/or pipelines +## Using the adapter as a Redis client -Since `NebulexRedisAdapter` works on top of `Redix` and provides features like -connection pools and "Redis Cluster" support, it may be seen also as a sort of -Redis client, but it is meant to be used mainly with the Nebulex cache API. -However, Redis API is quite extensive and there are a lot of useful commands -we may want to run taking advantage of the `NebulexRedisAdapter` features. -Therefore, the adapter injects two additional/extended functions to the -defined cache: `command!/2` and `pipeline!/2`. +Since the Redis adapter works on top of `Redix` and provides features like +connection pools, "Redis Cluster", etc., it may also work as a Redis client. +The Redis API is quite extensive, and there are many useful commands we may +want to run, leveraging the Redis adapter features. Therefore, the adapter +provides additional functions to do so. ```elixir -iex> MyCache.command!(["LPUSH", "mylist", "world"], key: "mylist") +iex> conn = MyCache.fetch_conn!() +iex> Redix.command!(conn, ["LPUSH", "mylist", "world"]) 1 -iex> MyCache.command!(["LPUSH", "mylist", "hello"], key: "mylist") +iex> Redix.command!(conn, ["LPUSH", "mylist", "hello"]) 2 -iex> MyCache.command!(["LRANGE", "mylist", "0", "-1"], key: "mylist") +iex> Redix.command!(conn, ["LRANGE", "mylist", "0", "-1"]) ["hello", "world"] -iex> [ +iex> conn = MyCache.fetch_conn!(key: "mylist") +iex> Redix.pipeline!(conn, [ ...> ["LPUSH", "mylist", "world"], ...> ["LPUSH", "mylist", "hello"], ...> ["LRANGE", "mylist", "0", "-1"] -...> ] -...> |> cache.pipeline!(key: "mylist") +...> ]) [1, 2, ["hello", "world"]] ``` @@ -287,8 +227,8 @@ you have to pass the cache name explicitly. ## Testing -To run the **NebulexRedisAdapter** tests you will have to have Redis running -locally. **NebulexRedisAdapter** requires a complex setup for running tests +To run the **Nebulex.Adapters.Redis** tests you will have to have Redis running +locally. **Nebulex.Adapters.Redis** requires a complex setup for running tests (since it needs a few instances running, for standalone, cluster and Redis Cluster). For this reason, there is a [docker-compose.yml](docker-compose.yml) file in the repo so that you can use [Docker][docker] and @@ -302,7 +242,7 @@ $ docker-compose up [docker]: https://www.docker.com/ [docker_compose]: https://docs.docker.com/compose/ -Since `NebulexRedisAdapter` uses the support modules and shared tests +Since `Nebulex.Adapters.Redis` uses the support modules and shared tests from `Nebulex` and by default its test folder is not included in the Hex dependency, the following steps are required for running the tests. @@ -373,4 +313,4 @@ all checks run successfully. Copyright (c) 2018, Carlos BolaƱos. -NebulexRedisAdapter source code is licensed under the [MIT License](LICENSE). +Nebulex.Adapters.Redis source code is licensed under the [MIT License](LICENSE). diff --git a/config/test.exs b/config/test.exs index 6be93c4..79db341 100644 --- a/config/test.exs +++ b/config/test.exs @@ -1,14 +1,14 @@ import Config # Standalone mode -config :nebulex_redis_adapter, NebulexRedisAdapter.TestCache.Standalone, +config :nebulex_redis_adapter, Nebulex.Adapters.Redis.TestCache.Standalone, conn_opts: [ host: "127.0.0.1", port: 6379 ] -# Cluster mode -config :nebulex_redis_adapter, NebulexRedisAdapter.TestCache.ClientCluster, +# Client-side cluster mode +config :nebulex_redis_adapter, Nebulex.Adapters.Redis.TestCache.ClientSideCluster, mode: :client_side_cluster, client_side_cluster: [ nodes: [ @@ -34,7 +34,7 @@ config :nebulex_redis_adapter, NebulexRedisAdapter.TestCache.ClientCluster, ] # Redis Cluster mode (with Redis >= 7) -config :nebulex_redis_adapter, NebulexRedisAdapter.TestCache.RedisCluster, +config :nebulex_redis_adapter, Nebulex.Adapters.Redis.TestCache.RedisCluster, mode: :redis_cluster, redis_cluster: [ # Configuration endpoints @@ -51,7 +51,7 @@ config :nebulex_redis_adapter, NebulexRedisAdapter.TestCache.RedisCluster, ] # Redis Cluster mode with errors -config :nebulex_redis_adapter, NebulexRedisAdapter.TestCache.RedisClusterConnError, +config :nebulex_redis_adapter, Nebulex.Adapters.Redis.TestCache.RedisClusterConnError, mode: :redis_cluster, pool_size: 2, redis_cluster: [ diff --git a/coveralls.json b/coveralls.json index f4f0f80..cc9bdd9 100644 --- a/coveralls.json +++ b/coveralls.json @@ -1,8 +1,10 @@ { "skip_files": [ - "lib/nebulex_redis_adapter/exceptions.ex", - "lib/nebulex_redis_adapter/helpers.ex", - "lib/nebulex_redis_adapter/serializer.ex", + "lib/nebulex/adapters/redis/helpers.ex", + "lib/nebulex/adapters/redis/serializer.ex", + "lib/nebulex/adapters/redis/serializer/serializable.ex", + "lib/nebulex/adapters/redis/options.ex", + "lib/nebulex/adapters/redis/error_formatter.ex", "test/*" ], "coverage_options": { diff --git a/lib/nebulex/adapters/redis.ex b/lib/nebulex/adapters/redis.ex new file mode 100644 index 0000000..c5037ff --- /dev/null +++ b/lib/nebulex/adapters/redis.ex @@ -0,0 +1,1087 @@ +defmodule Nebulex.Adapters.Redis do + @moduledoc """ + Nebulex adapter for Redis. This adapter is implemented using `Redix` + (a Redis driver for Elixir). + + The adapter provides three setup alternatives: + + * **Standalone** - The adapter establishes a pool of connections + with a single Redis node. The `:standalone` is the default mode. + + * **Redis Cluster** - [Redis Cluster](https://redis.io/topics/cluster-tutorial) + is a built-in feature in Redis since version 3, and it may be the most + convenient and recommendable way to set up Redis in a cluster and have + a distributed cache storage out-of-box. This adapter provides the + `:redis_cluster` mode to set up **Redis Cluster** from the client-side + automatically and be able to use it transparently. + + * **Built-in client-side cluster** - The `:client_side_cluster` mode + provides a simple client-side cluster implementation based on + sharding distribution model. + + ## Standalone + + A cache that uses Redis is defined as follows: + + defmodule MyApp.RedisCache do + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.Adapters.Redis + end + + The configuration for the cache must be in your application environment, + usually defined in your `config/config.exs`: + + config :my_app, MyApp.RedisCache, + conn_opts: [ + host: "127.0.0.1", + port: 6379 + ] + + ## Redis Cluster + + A cache that uses Redis Cluster can be defined as follows: + + defmodule MyApp.RedisClusterCache do + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.Adapters.Redis + end + + As you may notices, nothing has changed, it is defined the same as the + standalone mode. The change is in the configuration: + + config :my_app, MyApp.RedisClusterCache, + mode: :redis_cluster, + redis_cluster: [ + configuration_endpoints: [ + endpoint1_conn_opts: [ + host: "127.0.0.1", + port: 6379, + # Add the password if 'requirepass' is on + password: "password" + ], + ... + ] + ] + + ## Client-side Cluster + + Same as the previous modes, a cache is defined as: + + defmodule MyApp.ClusteredCache do + use Nebulex.Cache, + otp_app: :nebulex, + adapter: Nebulex.Adapters.Redis + end + + The config: + + config :my_app, MyApp.ClusteredCache, + mode: :client_side_cluster, + client_side_cluster: [ + nodes: [ + node1: [ + pool_size: 10, + conn_opts: [ + host: "127.0.0.1", + port: 9001 + ] + ], + node2: [ + pool_size: 4, + conn_opts: [ + url: "redis://127.0.0.1:9002" + ] + ], + node3: [ + conn_opts: [ + host: "127.0.0.1", + port: 9003 + ] + ], + ... + ] + ] + + > #### Redis Proxy Alternative {: .warning} + > + > Consider using a proxy instead, since it may provide more and better + > features. See the "Redis Proxy" section below for more information. + + ## Redis Proxy + + Another option for "Redis Cluster" or the built-in "Client-side cluster" is + using a proxy such as [Envoy proxy][envoy] or [Twemproxy][twemproxy] on top + of Redis. In this case, the proxy does the distribution work, and from the + adparter's side (**Nebulex.Adapters.Redis**), it would be only configuration. + Instead of connect the adapter against the Redis nodes, we connect it against + the proxy nodes, this means, in the config, we setup the pool with the host + and port pointing to the proxy. + + [envoy]: https://www.envoyproxy.io/ + [twemproxy]: https://github.com/twitter/twemproxy + + ## Configuration options + + In addition to `Nebulex.Cache` config options, the adapter supports the + following options: + + #{Nebulex.Adapters.Redis.Options.start_options_docs()} + + ## Shared runtime options + + Since the adapter runs on top of `Redix`, all commands accept their options + (e.g.: `:timeout`, and `:telemetry_metadata`). See `Redix` docs for more + information. + + ### Redis Cluster runtime options + + The following options are only for the `:redis_cluster` mode and apply to all + commands: + + * `:lock_retries` - When the config manager is running and setting up + the hash slot map, all Redis commands get blocked until the cluster + is properly configured and the hash slot map is ready to use. This + option defines the max retry attempts to acquire the lock before + executing the command. Defaults to `:infinity`. + + ## Queryable API + + Since the queryable API is implemented by using `KEYS` command, + keep in mind the following caveats: + + * Only keys can be queried. + * Only strings and predefined queries are allowed as query values. + + See ["KEYS" command](https://redis.io/docs/latest/commands/keys/). + + ### Examples + + iex> MyApp.RedisCache.put_all(%{ + ...> "firstname" => "Albert", + ...> "lastname" => "Einstein", + ...> "age" => 76 + ...> }) + :ok + + # returns key/value pairs by default + iex> MyApp.RedisCache.get_all!("**name**") |> Map.new() + %{"firstname" => "Albert", "lastname" => "Einstein"} + + iex> MyApp.RedisCache.get_all!("**name**", select: :key) + ["firstname", "lastname"] + + iex> MyApp.RedisCache.get_all!("a??", select: :key) + ["age"] + + iex> MyApp.RedisCache.get_all!(select: :key) + ["age", "firstname", "lastname"] + + iex> MyApp.RedisCache.stream!("**name**", select: :key) |> Enum.to_list() + ["firstname", "lastname"] + + ### Deleting/counting keys + + iex> MyApp.RedisCache.delete_all!({:in, ["foo", "bar"]}) + 2 + iex> MyApp.RedisCache.count_all!({:in, ["foo", "bar"]}) + 2 + + ## Transactions + + This adapter doesn't provide support for transactions. However, in the future, + it is planned support [Redis Transactions][redis_transactions] by using the + commands `MULTI`, `EXEC`, `DISCARD` and `WATCH`. + + [redis_transactions]: https://redis.io/docs/manual/transactions/ + + ## Using the adapter as a Redis client + + Since the Redis adapter works on top of `Redix` and provides features like + connection pools, "Redis Cluster", etc., it may also work as a Redis client. + The Redis API is quite extensive, and there are many useful commands we may + want to run, leveraging the Redis adapter features. Therefore, the adapter + provides additional functions to do so. + + ### `fetch_conn(opts \\\\ [])` + + The function accepts the following options: + + * `:name` - The name of the cache in case you are using dynamic caches, + otherwise it is not required. + * `:key` - The key is used to compute the node against which to perform + the command. It is only required for `:redis_cluster` and + `:client_side_cluster` modes. + + Let's see some examples: + + iex> MyCache.fetch_conn!() + ...> |> Redix.command!(["LPUSH", "mylist", "hello"]) + 1 + iex> MyCache.fetch_conn!() + ...> |> Redix.command!(["LPUSH", "mylist", "world"]) + 2 + iex> MyCache.fetch_conn!() + ...> |> Redix.command!(["LRANGE", "mylist", "0", "-1"]) + ["hello", "world"] + + When working with `:redis_cluster` or `:client_side_cluster` modes the option + `:key` is required: + + iex> {:ok, conn} = MyCache.fetch_conn(key: "mylist") + iex> Redix.pipeline!([ + ...> ["LPUSH", "mylist", "hello"], + ...> ["LPUSH", "mylist", "world"], + ...> ["LRANGE", "mylist", "0", "-1"] + ...> ]) + [1, 2, ["hello", "world"]] + + Since these functions run on top of `Redix`, they also accept their options + (e.g.: `:timeout`, and `:telemetry_metadata`). See `Redix` docs for more + information. + + ### Encoding/decoding functions + + The following functions are available to encode/decode Elixir terms. It is + useful whenever you want to work with Elixir terms in addition to strings + or other specific Redis data types. + + * `encode_key(name \\\\ __MODULE__, key)` - Encodes an Elixir term into a + string. The argument `name` is optional and should be used in case of + dynamic caches (Defaults to the defined cache module). + * `encode_value(name \\\\ __MODULE__, value)` - Same as `encode_key` but + it is specific for encoding values, in case the encoding for keys and + values are different. + * `decode_key(name \\\\ __MODULE__, key)` - Decodes binary into an Elixir + term. The argument `name` is optional and should be used in case of + dynamic caches (Defaults to the defined cache module). + * `decode_value(name \\\\ __MODULE__, value)` - Same as `decode_key` but + it is specific for decoding values, in case the decoding for keys and + values are different. + + Let's see some examples: + + iex> conn = MyCache.fetch_conn!() + iex> key = MyCache.encode_key({:key, "key"}) + iex> value = MyCache.encode_value({:value, "value"}) + iex> Redix.command!(conn, ["SET", key, value], timeout: 5000) + "OK" + iex> Redix.command!(conn, ["GET", key]) |> MyCache.decode_value() + {:value, "value"} + + ## Adapter-specific telemetry events for the `:redis_cluster` mode + + Aside from the recommended Telemetry events by `Nebulex.Cache`, this adapter + exposes the following Telemetry events for the `:redis_cluster` mode: + + * `telemetry_prefix ++ [:redis_cluster, :setup, :start]` - This event is + specific to the `:redis_cluster` mode. Before the configuration manager + calls Redis to set up the cluster shards, this event should be invoked. + + The `:measurements` map will include the following: + + * `:system_time` - The current system time in native units from calling: + `System.system_time()`. + + A Telemetry `:metadata` map including the following fields: + + * `:adapter_meta` - The adapter metadata. + * `:pid` - The configuration manager PID. + + * `telemetry_prefix ++ [:redis_cluster, :setup, :stop]` - This event is + specific to the `:redis_cluster` mode. After the configuration manager + set up the cluster shards, this event should be invoked. + + The `:measurements` map will include the following: + + * `:duration` - The time spent configuring the cluster. The measurement + is given in the `:native` time unit. You can read more about it in the + docs for `System.convert_time_unit/3`. + + A Telemetry `:metadata` map including the following fields: + + * `:adapter_meta` - The adapter metadata. + * `:pid` - The configuration manager PID. + * `:status` - The cluster setup status. If the cluster was configured + successfully, the status will be set to `:ok`, otherwise, will be + set to `:error`. + * `:reason` - The status reason. When the status is `:ok`, the reason is + `:succeeded`, otherwise, it is the error reason. + + * `telemetry_prefix ++ [:redis_cluster, :setup, :exception]` - This event + is specific to the `:redis_cluster` mode. When an exception is raised + while configuring the cluster, this event should be invoked. + + The `:measurements` map will include the following: + + * `:duration` - The time spent configuring the cluster. The measurement + is given in the `:native` time unit. You can read more about it in the + docs for `System.convert_time_unit/3`. + + A Telemetry `:metadata` map including the following fields: + + * `:adapter_meta` - The adapter metadata. + * `:pid` - The configuration manager PID. + * `:kind` - The type of the error: `:error`, `:exit`, or `:throw`. + * `:reason` - The reason of the error. + * `:stacktrace` - The stacktrace. + + """ + + # Provide Cache Implementation + @behaviour Nebulex.Adapter + @behaviour Nebulex.Adapter.KV + @behaviour Nebulex.Adapter.Queryable + @behaviour Nebulex.Adapter.Info + + # Inherit default serializer implementation + use Nebulex.Adapters.Redis.Serializer + + import Nebulex.Utils + + alias Nebulex.Adapter + + alias __MODULE__.{ + Client, + ClientSideCluster, + Cluster, + Connection, + Options + } + + ## Nebulex.Adapter + + @impl true + defmacro __before_compile__(_env) do + quote do + @doc """ + A convenience to fetch a Redis connection. + """ + def fetch_conn(opts \\ []) do + {name, opts} = Keyword.pop(opts, :name, __MODULE__) + {key, opts} = Keyword.pop(opts, :key) + + name + |> Adapter.lookup_meta() + |> Client.fetch_conn(key, opts) + end + + @doc """ + Same as `fetch_conn` but raises an exception in case of error. + """ + def fetch_conn!(opts \\ []) do + case fetch_conn(opts) do + {:ok, conn} -> conn + {:error, e} -> raise e + end + end + + @doc """ + A convenience to encode the given `key`. + """ + def encode_key(name \\ __MODULE__, key) do + %{serializer: sz, encode_key_opts: enc_opts} = Adapter.lookup_meta(name) + + sz.encode_key(key, enc_opts) + end + + @doc """ + A convenience to decode the given `key`. + """ + def decode_key(name \\ __MODULE__, key) do + %{serializer: sz, decode_key_opts: dec_opts} = Adapter.lookup_meta(name) + + sz.decode_key(key, dec_opts) + end + + @doc """ + A convenience to encode the given `value`. + """ + def encode_value(name \\ __MODULE__, value) do + %{serializer: sz, encode_value_opts: enc_opts} = Adapter.lookup_meta(name) + + sz.encode_value(value, enc_opts) + end + + @doc """ + A convenience to decode the given `value`. + """ + def decode_value(name \\ __MODULE__, value) do + %{serializer: sz, decode_value_opts: dec_opts} = Adapter.lookup_meta(name) + + sz.decode_value(value, dec_opts) + end + end + end + + @impl true + def init(opts) do + # Common options + {telemetry_prefix, opts} = Keyword.pop!(opts, :telemetry_prefix) + {telemetry, opts} = Keyword.pop!(opts, :telemetry) + {cache, opts} = Keyword.pop!(opts, :cache) + + # Validate options + opts = Options.validate_start_opts!(opts) + + # Get the cache name (required) + name = opts[:name] || cache + + # Adapter mode + mode = Keyword.fetch!(opts, :mode) + + # Local registry + registry = camelize_and_concat([name, Registry]) + + # Redis serializer for encoding/decoding keys and values + serializer_meta = assert_serializer!(opts) + + # Resolve the pool size + pool_size = Keyword.get_lazy(opts, :pool_size, fn -> System.schedulers_online() end) + + # Init adapter metadata + adapter_meta = + %{ + telemetry_prefix: telemetry_prefix, + telemetry: telemetry, + cache_pid: self(), + name: opts[:name] || cache, + mode: mode, + pool_size: pool_size, + registry: registry, + started_at: DateTime.utc_now() + } + |> Map.merge(serializer_meta) + + # Init the connections child spec according to the adapter mode + {conn_child_spec, adapter_meta} = do_init(adapter_meta, opts) + + # Supervisorr name + sup_name = camelize_and_concat([name, Supervisor]) + + # Prepare child spec + child_spec = + Supervisor.child_spec( + {Nebulex.Adapters.Redis.Supervisor, {sup_name, conn_child_spec, adapter_meta}}, + id: {__MODULE__, sup_name} + ) + + {:ok, child_spec, adapter_meta} + end + + defp assert_serializer!(opts) do + serializer = Keyword.get(opts, :serializer, __MODULE__) + serializer_opts = Keyword.fetch!(opts, :serializer_opts) + + %{ + serializer: serializer, + encode_key_opts: Keyword.fetch!(serializer_opts, :encode_key), + encode_value_opts: Keyword.fetch!(serializer_opts, :encode_value), + decode_key_opts: Keyword.fetch!(serializer_opts, :decode_key), + decode_value_opts: Keyword.fetch!(serializer_opts, :decode_value) + } + end + + defp do_init(%{mode: :standalone} = adapter_meta, opts) do + Connection.init(adapter_meta, opts) + end + + defp do_init(%{mode: :redis_cluster} = adapter_meta, opts) do + Cluster.init(adapter_meta, opts) + end + + defp do_init(%{mode: :client_side_cluster} = adapter_meta, opts) do + ClientSideCluster.init(adapter_meta, opts) + end + + ## Nebulex.Adapter.KV + + @impl true + def fetch( + %{ + serializer: serializer, + encode_key_opts: enc_key_opts, + decode_value_opts: dec_value_opts + } = adapter_meta, + key, + opts + ) do + redis_k = serializer.encode_key(key, enc_key_opts) + + case Client.command(adapter_meta, ["GET", redis_k], [key: redis_k] ++ opts) do + {:ok, nil} -> + wrap_error Nebulex.KeyError, key: key, reason: :not_found + + {:ok, value} -> + {:ok, serializer.decode_value(value, dec_value_opts)} + + {:error, _} = e -> + e + end + end + + @impl true + def put( + %{ + serializer: serializer, + encode_key_opts: enc_key_opts, + encode_value_opts: enc_value_opts + } = adapter_meta, + key, + value, + on_write, + ttl, + keep_ttl?, + opts + ) do + redis_k = serializer.encode_key(key, enc_key_opts) + redis_v = serializer.encode_value(value, enc_value_opts) + cmd_opts = cmd_opts(keep_ttl: keep_ttl?, ttl: ttl, action: on_write) + + case Client.command(adapter_meta, ["SET", redis_k, redis_v | cmd_opts], [key: redis_k] ++ opts) do + {:ok, "OK"} -> {:ok, true} + {:ok, nil} -> {:ok, false} + {:error, _} = e -> e + end + end + + @impl true + def put_all( + %{ + mode: mode, + serializer: serializer, + encode_key_opts: enc_key_opts, + encode_value_opts: enc_value_opts + } = adapter_meta, + entries, + on_write, + ttl, + opts + ) do + entries = + Enum.map(entries, fn {key, val} -> + {serializer.encode_key(key, enc_key_opts), serializer.encode_value(val, enc_value_opts)} + end) + + case mode do + :standalone -> + do_put_all(adapter_meta, nil, entries, on_write, ttl, opts) + + _else -> + cluster_put_all(adapter_meta, entries, on_write, ttl, opts) + end + end + + defp cluster_put_all(adapter_meta, entries, on_write, ttl, opts) do + keys = Enum.map(entries, &elem(&1, 0)) + + case execute(adapter_meta, %{op: :count_all, query: {:in, keys}}, []) do + {:ok, 0} -> + entries + |> group_keys_by_hash_slot(adapter_meta, :tuples) + |> Enum.reduce_while({:ok, true}, fn {hash_slot, group}, acc -> + adapter_meta + |> do_put_all(hash_slot, group, on_write, ttl, opts) + |> handle_put_all_response(acc) + end) + + {:ok, _} -> + {:ok, false} + + error -> + error + end + end + + defp do_put_all(adapter_meta, hash_slot, entries, on_write, ttl, opts) do + cmd = + case on_write do + :put -> "MSET" + :put_new -> "MSETNX" + end + + {mset, expire} = + Enum.reduce(entries, {[cmd], []}, fn {key, val}, {acc1, acc2} -> + acc2 = + if is_integer(ttl), + do: [["PEXPIRE", key, ttl] | acc2], + else: acc2 + + {[val, key | acc1], acc2} + end) + + with {:ok, [result | _]} <- + Client.transaction_pipeline( + adapter_meta, + [Enum.reverse(mset) | expire], + [key: hash_slot] ++ opts + ) do + case result do + "OK" -> {:ok, true} + 1 -> {:ok, true} + 0 -> {:ok, false} + end + end + end + + defp handle_put_all_response({:ok, true}, acc) do + {:cont, acc} + end + + defp handle_put_all_response(other, _acc) do + {:halt, other} + end + + @impl true + def delete(adapter_meta, key, opts) do + redis_k = enc_key(adapter_meta, key) + + with {:ok, _} <- + Client.command(adapter_meta, ["DEL", redis_k], [key: redis_k] ++ opts) do + :ok + end + end + + @impl true + def take(%{serializer: serializer, decode_value_opts: dec_val_opts} = adapter_meta, key, opts) do + redis_k = enc_key(adapter_meta, key) + + case Client.transaction_pipeline( + adapter_meta, + [["GET", redis_k], ["DEL", redis_k]], + [key: redis_k] ++ opts + ) do + {:ok, [nil | _]} -> + wrap_error Nebulex.KeyError, key: key, reason: :not_found + + {:ok, [result | _]} -> + {:ok, serializer.decode_value(result, dec_val_opts)} + + {:error, _} = e -> + e + end + end + + @impl true + def has_key?(adapter_meta, key, opts) do + redis_k = enc_key(adapter_meta, key) + + case Client.command(adapter_meta, ["EXISTS", redis_k], [key: redis_k] ++ opts) do + {:ok, 1} -> {:ok, true} + {:ok, 0} -> {:ok, false} + {:error, _} = e -> e + end + end + + @impl true + def ttl(adapter_meta, key, opts) do + redis_k = enc_key(adapter_meta, key) + + case Client.command(adapter_meta, ["PTTL", redis_k], [key: redis_k] ++ opts) do + {:ok, -1} -> + {:ok, :infinity} + + {:ok, -2} -> + wrap_error Nebulex.KeyError, key: key, reason: :not_found + + {:ok, ttl} -> + {:ok, ttl * 1000} + + {:error, _} = e -> + e + end + end + + @impl true + def expire(adapter_meta, key, ttl, opts) do + do_expire(adapter_meta, enc_key(adapter_meta, key), ttl, opts) + end + + defp do_expire(adapter_meta, redis_k, :infinity, opts) do + commands = [["PTTL", redis_k], ["PERSIST", redis_k]] + + case Client.transaction_pipeline(adapter_meta, commands, [key: redis_k] ++ opts) do + {:ok, [-2, 0]} -> {:ok, false} + {:ok, [_, _]} -> {:ok, true} + {:error, _} = e -> e + end + end + + defp do_expire(adapter_meta, redis_k, ttl, opts) do + case Client.command(adapter_meta, ["PEXPIRE", redis_k, ttl], [key: redis_k] ++ opts) do + {:ok, 1} -> {:ok, true} + {:ok, 0} -> {:ok, false} + {:error, _} = e -> e + end + end + + @impl true + def touch(adapter_meta, key, opts) do + redis_k = enc_key(adapter_meta, key) + + case Client.command(adapter_meta, ["TOUCH", redis_k], [key: redis_k] ++ opts) do + {:ok, 1} -> {:ok, true} + {:ok, 0} -> {:ok, false} + {:error, _} = e -> e + end + end + + @impl true + def update_counter(adapter_meta, key, incr, default, ttl, opts) do + do_update_counter(adapter_meta, enc_key(adapter_meta, key), incr, ttl, default, opts) + end + + defp do_update_counter(adapter_meta, redis_k, incr, :infinity, default, opts) do + with {:ok, ^incr} when default > 0 <- + Client.command(adapter_meta, ["INCRBY", redis_k, incr], [key: redis_k] ++ opts) do + # The key didn't exist, increment the default value + Client.command(adapter_meta, ["INCRBY", redis_k, default], [key: redis_k] ++ opts) + end + end + + defp do_update_counter(adapter_meta, redis_k, incr, ttl, default, opts) do + with {:ok, default_incr} <- default_incr(adapter_meta, redis_k, default, opts), + {:ok, [result | _]} <- + Client.transaction_pipeline( + adapter_meta, + [["INCRBY", redis_k, incr + default_incr], ["PEXPIRE", redis_k, ttl]], + [key: redis_k] ++ opts + ) do + {:ok, result} + end + end + + defp default_incr(adapter_meta, redis_k, default, opts) do + case Client.command(adapter_meta, ["EXISTS", redis_k], [key: redis_k] ++ opts) do + {:ok, 1} -> {:ok, 0} + {:ok, 0} -> {:ok, default} + {:error, _} = e -> e + end + end + + ## Nebulex.Adapter.Queryable + + @impl true + def execute(adapter_meta, query_meta, opts) + + def execute(_adapter_meta, %{op: :get_all, query: {:in, []}}, _opts) do + {:ok, []} + end + + def execute(_adapter_meta, %{op: op, query: {:in, []}}, _opts) + when op in [:count_all, :delete_all] do + {:ok, 0} + end + + def execute(%{mode: mode} = adapter_meta, %{op: :count_all, query: {:q, nil}}, opts) do + exec(mode, [adapter_meta, ["DBSIZE"], opts], [0, &Kernel.+(&2, &1)]) + end + + def execute(%{mode: mode} = adapter_meta, %{op: :delete_all, query: {:q, nil}}, opts) do + with {:ok, _} = ok <- exec(mode, [adapter_meta, ["DBSIZE"], opts], [0, &Kernel.+(&2, &1)]), + {:ok, _} <- exec(mode, [adapter_meta, ["FLUSHDB"], opts], []) do + ok + end + end + + def execute(%{mode: :standalone} = adapter_meta, %{op: :count_all, query: {:in, keys}}, opts) + when is_list(keys) do + command = ["EXISTS" | Enum.map(keys, &enc_key(adapter_meta, &1))] + + Client.command(adapter_meta, command, opts) + end + + def execute(%{mode: :standalone} = adapter_meta, %{op: :delete_all, query: {:in, keys}}, opts) + when is_list(keys) do + command = ["DEL" | Enum.map(keys, &enc_key(adapter_meta, &1))] + + Client.command(adapter_meta, command, opts) + end + + def execute( + %{mode: :standalone} = adapter_meta, + %{op: :get_all, query: {:in, keys}, select: select}, + opts + ) + when is_list(keys) do + mget(adapter_meta, enc_keys(keys, adapter_meta), select, opts) + end + + def execute(adapter_meta, %{op: :get_all, query: {:in, keys}, select: select}, opts) + when is_list(keys) do + keys + |> enc_keys(adapter_meta) + |> group_keys_by_hash_slot(adapter_meta, :keys) + |> Enum.reduce_while({:ok, []}, fn {hash_slot, keys}, {:ok, acc} -> + case mget(adapter_meta, keys, select, opts, hash_slot) do + {:ok, results} -> {:cont, {:ok, results ++ acc}} + error -> {:halt, error} + end + end) + end + + def execute(adapter_meta, %{op: op, query: {:in, keys}}, opts) + when is_list(keys) do + redis_cmd = + case op do + :count_all -> "EXISTS" + :delete_all -> "DEL" + end + + keys + |> enc_keys(adapter_meta) + |> group_keys_by_hash_slot(adapter_meta, :keys) + |> Enum.reduce_while({:ok, 0}, fn {hash_slot, keys_group}, {:ok, acc} -> + Client.command( + adapter_meta, + [redis_cmd | Enum.map(keys_group, &enc_key(adapter_meta, &1))], + [key: hash_slot] ++ opts + ) + |> case do + {:ok, count} -> + {:cont, {:ok, acc + count}} + + {:error, _} = error -> + {:halt, error} + end + end) + end + + def execute( + %{mode: :standalone} = adapter_meta, + %{op: :get_all, query: {:q, query}, select: select}, + opts + ) do + with {:ok, encoded_keys} <- execute_query(query, adapter_meta, opts) do + mget(adapter_meta, encoded_keys, select, opts) + end + end + + def execute(adapter_meta, %{op: :get_all, query: {:q, query}, select: select}, opts) do + with {:ok, encoded_keys} <- execute_query(query, adapter_meta, opts) do + encoded_keys + |> group_keys_by_hash_slot(adapter_meta, :keys) + |> Enum.reduce_while({:ok, []}, fn {hash_slot, keys}, {:ok, acc} -> + case mget(adapter_meta, keys, select, opts, hash_slot) do + {:ok, ls} -> {:cont, {:ok, ls ++ acc}} + {:error, _} = e -> {:halt, e} + end + end) + end + end + + def execute(adapter_meta, query, opts) do + with {:ok, keys} <- execute(adapter_meta, %{query | op: :get_all, select: :key}, opts) do + execute(adapter_meta, %{query | query: {:in, keys}}, opts) + end + end + + @impl true + def stream(adapter_meta, query, opts) do + _ = assert_query(query) + opts = Options.validate_stream_opts!(opts) + + Stream.resource( + fn -> + {on_error, opts} = Keyword.pop!(opts, :on_error) + {_max_entries, opts} = Keyword.pop!(opts, :max_entries) + + {execute(adapter_meta, %{query | op: :get_all}, opts), on_error} + end, + fn + {{:ok, []}, _on_error} -> + {:halt, []} + + {{:ok, elems}, on_error} -> + {[elems], {{:ok, []}, on_error}} + + {{:error, _}, :nothing} -> + {:halt, []} + + {{:error, reason}, :raise} -> + stacktrace = + Process.info(self(), :current_stacktrace) + |> elem(1) + |> tl() + + raise Nebulex.Error, reason: reason, stacktrace: stacktrace + end, + & &1 + ) + |> wrap_ok() + end + + ## Nebulex.Adapter.Info + + @impl true + def info(adapter_meta, spec, opts) + + def info(adapter_meta, section, opts) when section in [:all, :default, :everything] do + with {:ok, info_str} <- Client.command(adapter_meta, ["INFO", section], opts) do + {:ok, parse_info(info_str)} + end + end + + def info(adapter_meta, section, opts) when is_atom(section) do + with {:ok, info} <- info(adapter_meta, [section], opts) do + {:ok, Map.get(info, section, %{})} + end + end + + def info(adapter_meta, sections, opts) when is_list(sections) do + sections = Enum.map(sections, &to_string/1) + + with {:ok, info_str} <- Client.command(adapter_meta, ["INFO" | sections], opts) do + {:ok, parse_info(info_str)} + end + end + + defp parse_info(info_str) do + info_str + |> String.split("#", trim: true) + |> Enum.map(&String.split(&1, ["\r\n", "\n"], trim: true)) + |> Map.new(&parse_info_items/1) + end + + defp parse_info_items([name | items]) do + key = atomify_key(name) + + info_map = + Map.new(items, fn item -> + [k, v] = String.split(item, ":", parts: 2) + + {atomify_key(k), v} + end) + + {key, info_map} + end + + ## Private Functions + + defp cmd_opts(keys) do + Enum.reduce(keys, [], fn + {:action, :put}, acc -> acc + {:action, :put_new}, acc -> ["NX" | acc] + {:action, :replace}, acc -> ["XX" | acc] + {:ttl, :infinity}, acc -> acc + {:ttl, ttl}, acc -> ["PX", "#{ttl}" | acc] + {:keep_ttl, true}, acc -> ["KEEPTTL" | acc] + {:keep_ttl, false}, acc -> acc + end) + end + + defp enc_key(%{serializer: serializer, encode_key_opts: enc_key_opts}, key) do + serializer.encode_key(key, enc_key_opts) + end + + defp enc_keys(keys, %{serializer: serializer, encode_key_opts: enc_key_opts}) do + Enum.map(keys, &serializer.encode_key(&1, enc_key_opts)) + end + + defp assert_query(%{query: {:q, q}} = query) do + %{query | query: {:q, assert_query(q)}} + end + + defp assert_query(%{query: {:in, _}} = query) do + query + end + + defp assert_query(query) when is_nil(query) or is_binary(query) do + query + end + + defp assert_query(query) do + raise Nebulex.QueryError, message: "invalid pattern", query: query + end + + defp execute_query(nil, adapter_meta, opts) do + execute_query("*", adapter_meta, opts) + end + + defp execute_query(query, %{mode: mode} = adapter_meta, opts) do + query = assert_query(query) + + exec(mode, [adapter_meta, ["KEYS", query], opts], [[], &Kernel.++(&1, &2)]) + end + + defp exec(:standalone, args, _extra_args) do + apply(Client, :command, args) + end + + defp exec(:redis_cluster, args, extra_args) do + apply(Cluster, :command, args ++ extra_args) + end + + defp exec(:client_side_cluster, args, extra_args) do + apply(ClientSideCluster, :command, args ++ extra_args) + end + + defp group_keys_by_hash_slot(enum, %{mode: :redis_cluster, keyslot: keyslot}, enum_type) do + Cluster.group_keys_by_hash_slot(enum, keyslot, enum_type) + end + + defp group_keys_by_hash_slot(enum, %{mode: :client_side_cluster, ring: ring}, enum_type) do + ClientSideCluster.group_keys_by_hash_slot(enum, ring, enum_type) + end + + defp select(key, _value, :key, _serializer, _dec_value_opts) do + key + end + + defp select(_key, value, :value, serializer, dec_value_opts) do + serializer.decode_value(value, dec_value_opts) + end + + defp select(key, value, {:key, :value}, serializer, dec_value_opts) do + {key, serializer.decode_value(value, dec_value_opts)} + end + + defp mget(adapter_meta, keys, select, opts, hash_slot_key \\ nil) + + defp mget(_adapter_meta, [], _select, _opts, _hash_slot_key) do + {:ok, []} + end + + defp mget( + %{ + serializer: serializer, + decode_key_opts: dec_key_opts, + decode_value_opts: dec_value_opts + } = adapter_meta, + enc_keys, + select, + opts, + hash_slot_key + ) do + with {:ok, result} <- + Client.command(adapter_meta, ["MGET" | enc_keys], [key: hash_slot_key] ++ opts) do + results = + enc_keys + |> Enum.map(&serializer.decode_key(&1, dec_key_opts)) + |> Enum.zip(result) + |> Enum.reduce([], fn + {_key, nil}, acc -> + acc + + {key, value}, acc -> + [select(key, value, select, serializer, dec_value_opts) | acc] + end) + + {:ok, results} + end + end + + defp atomify_key(str) when is_binary(str) do + str + |> String.downcase() + |> String.trim() + |> to_atom() + end + + defp to_atom(str) when is_binary(str) do + String.to_existing_atom(str) + rescue + ArgumentError -> String.to_atom(str) + end +end diff --git a/lib/nebulex/adapters/redis/client.ex b/lib/nebulex/adapters/redis/client.ex new file mode 100644 index 0000000..5b2f89b --- /dev/null +++ b/lib/nebulex/adapters/redis/client.ex @@ -0,0 +1,161 @@ +defmodule Nebulex.Adapters.Redis.Client do + # Redix wrapper + @moduledoc false + + import Nebulex.Adapters.Redis.Helpers + + alias Nebulex.Adapters.Redis.{ + ClientSideCluster, + Cluster, + Cluster.ConfigManager, + Pool + } + + ## API + + @doc """ + Executes a Redis command. + """ + @spec command( + Nebulex.Adapter.adapter_meta(), + Redix.command(), + keyword() + ) :: {:ok, any()} | {:error, any()} + def command(adapter_meta, command, opts \\ []) + + def command(%{mode: :redis_cluster, name: name} = adapter_meta, command, opts) do + on_moved = fn -> + # Re-configure the cluster + :ok = ConfigManager.setup_shards(name) + + # Retry once more + do_command(adapter_meta, command, Keyword.put(opts, :on_moved, nil)) + end + + do_command(adapter_meta, command, Keyword.put(opts, :on_moved, on_moved)) + end + + def command(adapter_meta, command, opts) do + do_command(adapter_meta, command, Keyword.put(opts, :on_moved, nil)) + end + + defp do_command(adapter_meta, command, opts) do + {key, opts} = Keyword.pop(opts, :key) + {on_moved, opts} = Keyword.pop(opts, :on_moved) + + with {:ok, conn} <- fetch_conn(adapter_meta, key, opts) do + conn + |> Redix.command(command, redis_command_opts(opts)) + |> handle_command_response(on_moved) + end + end + + @doc """ + Executes a Redis `MULTI`/`EXEC` transaction. + """ + @spec transaction_pipeline( + Nebulex.Adapter.adapter_meta(), + [Redix.command()], + keyword() + ) :: {:ok, [any()]} | {:error, any()} + def transaction_pipeline(adapter_meta, commands, opts \\ []) + + def transaction_pipeline(%{mode: :redis_cluster, name: name} = adapter_meta, commands, opts) do + on_moved = fn -> + # Re-configure the cluster + :ok = ConfigManager.setup_shards(name) + + # Retry once more + do_transaction_pipeline(adapter_meta, commands, Keyword.put(opts, :on_moved, nil)) + end + + do_transaction_pipeline(adapter_meta, commands, Keyword.put(opts, :on_moved, on_moved)) + end + + def transaction_pipeline(adapter_meta, commands, opts) do + do_transaction_pipeline(adapter_meta, commands, Keyword.put(opts, :on_moved, nil)) + end + + defp do_transaction_pipeline(%{mode: mode} = adapter_meta, commands, opts) do + {key, opts} = Keyword.pop(opts, :key) + {on_moved, opts} = Keyword.pop(opts, :on_moved) + + with {:ok, conn} <- fetch_conn(adapter_meta, key, opts) do + conn + |> redix_transaction_pipeline(commands, redis_command_opts(opts), mode) + |> handle_tx_pipeline_response(on_moved) + end + end + + @spec fetch_conn(Nebulex.Adapter.adapter_meta(), any(), keyword()) :: + {:ok, pid()} | {:error, Nebulex.Error.t()} + def fetch_conn(adapter_meta, key, opts) + + def fetch_conn( + %{mode: :standalone, name: name, registry: registry, pool_size: pool_size}, + _key, + _opts + ) do + Pool.fetch_conn(registry, name, pool_size) + end + + def fetch_conn(%{mode: :redis_cluster, name: name} = meta, key, opts) do + with {:error, %Nebulex.Error{reason: :redis_connection_error}} <- + Cluster.fetch_conn(meta, key, opts) do + # Perhars the cluster should be re-configured + :ok = ConfigManager.setup_shards(name) + + # Retry once more + Cluster.fetch_conn(meta, key, opts) + end + end + + def fetch_conn(%{mode: :client_side_cluster} = meta, key, opts) do + ClientSideCluster.fetch_conn(meta, key, opts) + end + + ## Private Functions + + defp handle_command_response({:error, %Redix.Error{message: "MOVED" <> _}}, on_moved) + when is_function(on_moved) do + on_moved.() + end + + defp handle_command_response(other, _on_moved) do + other + end + + defp handle_tx_pipeline_response({:error, {error, prev_responses}}, on_moved) do + Enum.reduce_while(prev_responses, {:error, error}, fn + %Redix.Error{message: "MOVED" <> _}, _acc when is_function(on_moved) -> + {:halt, on_moved.()} + + %Redix.Error{message: "MOVED" <> _} = error, _acc -> + {:halt, {:error, error}} + + _other, acc -> + {:cont, acc} + end) + end + + defp handle_tx_pipeline_response(other, _on_moved) do + other + end + + # A tweaked version of `Redix.transaction_pipeline/3` for handling + # Redis Cluster errors + defp redix_transaction_pipeline(conn, [_ | _] = commands, options, :redis_cluster) + when is_list(options) do + with {:ok, responses} <- Redix.pipeline(conn, [["MULTI"]] ++ commands ++ [["EXEC"]], options) do + case Enum.split(responses, -1) do + {prev_responses, [%Redix.Error{} = error]} -> {:error, {error, prev_responses}} + {_prev_responses, [other]} -> {:ok, other} + end + end + end + + # Forward to `Redix.transaction_pipeline/3` + defp redix_transaction_pipeline(conn, commands, options, _mode) do + Redix.transaction_pipeline(conn, commands, options) + end +end diff --git a/lib/nebulex/adapters/redis/client_side_cluster.ex b/lib/nebulex/adapters/redis/client_side_cluster.ex new file mode 100644 index 0000000..59f198a --- /dev/null +++ b/lib/nebulex/adapters/redis/client_side_cluster.ex @@ -0,0 +1,143 @@ +defmodule Nebulex.Adapters.Redis.ClientSideCluster do + # Client-side Cluster + @moduledoc false + + import Nebulex.Adapters.Redis.Helpers + import Nebulex.Utils, only: [camelize_and_concat: 1] + + alias ExHashRing.Ring + alias Nebulex.Adapters.Redis.{Options, Pool} + + @typedoc "Proxy type to the adapter meta" + @type adapter_meta() :: Nebulex.Adapter.adapter_meta() + + @type node_entry() :: {node_name :: atom(), pool_size :: pos_integer()} + @type nodes_config() :: [node_entry()] + + ## API + + @spec init(adapter_meta(), keyword()) :: {Supervisor.child_spec(), adapter_meta()} + def init(%{name: name, pool_size: pool_size} = adapter_meta, opts) do + cluster_opts = Keyword.get(opts, :client_side_cluster) + + # Ensure :client_side_cluster is provided + if is_nil(cluster_opts) do + raise NimbleOptions.ValidationError, + Options.invalid_cluster_config_error( + "invalid value for :client_side_cluster option: ", + nil, + :client_side_cluster + ) + end + + nodes = + cluster_opts + |> Keyword.fetch!(:nodes) + |> Map.new(&{to_string(elem(&1, 0)), Keyword.get(elem(&1, 1), :pool_size, pool_size)}) + + ring_name = camelize_and_concat([name, Ring]) + + adapter_meta = Map.merge(adapter_meta, %{nodes: nodes, ring: ring_name}) + + children = [ + {ExHashRing.Ring, name: ring_name}, + {__MODULE__.NodeSupervisor, {adapter_meta, cluster_opts}} + ] + + child_spec = %{ + id: {name, ClientSideClusterSupervisor}, + start: {Supervisor, :start_link, [children, [strategy: :rest_for_one]]}, + type: :supervisor + } + + {child_spec, adapter_meta} + end + + @spec command( + Nebulex.Adapter.adapter_meta(), + Redix.command(), + keyword(), + init_acc :: any(), + reducer :: (any(), any() -> any()) + ) :: any() + def command( + %{name: name, registry: registry, nodes: nodes}, + command, + opts, + init_acc \\ nil, + reducer \\ fn res, _ -> res end + ) do + Enum.reduce_while(nodes, {:ok, init_acc}, fn {node_name, pool_size}, {:ok, acc} -> + registry + |> do_command(name, node_name, pool_size, command, opts) + |> handle_reduce_while(acc, reducer) + end) + end + + defp do_command(registry, name, node_name, pool_size, command, opts) do + with {:ok, conn} <- Pool.fetch_conn(registry, {name, node_name}, pool_size) do + Redix.command(conn, command, redis_command_opts(opts)) + end + end + + defp handle_reduce_while({:ok, result}, acc, reducer) do + {:cont, {:ok, reducer.(result, acc)}} + end + + defp handle_reduce_while({:error, _} = e, _acc, _reducer) do + {:halt, e} + end + + @spec fetch_conn(adapter_meta(), {:"$hash_slot", any()} | any(), keyword()) :: + {:ok, pid()} | {:error, Nebulex.Error.t()} + def fetch_conn(adapter_meta, key, opts) + + def fetch_conn( + %{name: name, registry: registry, nodes: nodes}, + {:"$hash_slot", node_name}, + _opts + ) do + pool_size = Map.fetch!(nodes, node_name) + + Pool.fetch_conn(registry, {name, node_name}, pool_size) + end + + def fetch_conn( + %{ + name: name, + registry: registry, + ring: ring, + nodes: nodes + }, + key, + _opts + ) do + node = get_node(ring, key) + pool_size = Map.fetch!(nodes, node) + + Pool.fetch_conn(registry, {name, node}, pool_size) + end + + @spec group_keys_by_hash_slot(Enum.t(), atom(), atom()) :: map() + def group_keys_by_hash_slot(enum, ring, type) + + def group_keys_by_hash_slot(enum, ring, :keys) do + Enum.group_by(enum, &hash_slot(ring, &1)) + end + + def group_keys_by_hash_slot(enum, ring, :tuples) do + Enum.group_by(enum, &hash_slot(ring, elem(&1, 0))) + end + + ## Private Functions + + defp get_node(ring, key) do + {:ok, node} = Ring.find_node(ring, key) + + node + end + + defp hash_slot(ring, key) do + {:"$hash_slot", get_node(ring, key)} + end +end diff --git a/lib/nebulex/adapters/redis/client_side_cluster/node_supervisor.ex b/lib/nebulex/adapters/redis/client_side_cluster/node_supervisor.ex new file mode 100644 index 0000000..338818b --- /dev/null +++ b/lib/nebulex/adapters/redis/client_side_cluster/node_supervisor.ex @@ -0,0 +1,46 @@ +defmodule Nebulex.Adapters.Redis.ClientSideCluster.NodeSupervisor do + @moduledoc false + use Supervisor + + alias ExHashRing.{Configuration, Ring} + alias Nebulex.Adapters.Redis.ClientSideCluster.PoolSupervisor + + ## API + + @doc false + def start_link({adapter_meta, opts}) do + Supervisor.start_link(__MODULE__, {adapter_meta, opts}) + end + + ## Supervisor Callbacks + + @impl true + def init({adapter_meta, opts}) do + %{name: name, registry: registry, pool_size: pool_size, ring: ring} = adapter_meta + + children = + opts + |> Keyword.fetch!(:nodes) + |> Enum.map(fn {node_name, node_opts} -> + node_name = to_string(node_name) + + {replicas, node_opts} = + Keyword.pop_lazy(node_opts, :ch_ring_replicas, fn -> + Configuration.get_replicas() + end) + + {:ok, _} = Ring.add_node(ring, node_name, replicas) + + node_opts = + node_opts + |> Keyword.put(:name, name) + |> Keyword.put(:registry, registry) + |> Keyword.put(:node, node_name) + |> Keyword.put_new(:pool_size, pool_size) + + Supervisor.child_spec({PoolSupervisor, node_opts}, id: {name, node_name}, type: :supervisor) + end) + + Supervisor.init(children, strategy: :one_for_one) + end +end diff --git a/lib/nebulex_redis_adapter/client_cluster/supervisor.ex b/lib/nebulex/adapters/redis/client_side_cluster/pool_supervisor.ex similarity index 74% rename from lib/nebulex_redis_adapter/client_cluster/supervisor.ex rename to lib/nebulex/adapters/redis/client_side_cluster/pool_supervisor.ex index ccc85e3..0675848 100644 --- a/lib/nebulex_redis_adapter/client_cluster/supervisor.ex +++ b/lib/nebulex/adapters/redis/client_side_cluster/pool_supervisor.ex @@ -1,8 +1,8 @@ -defmodule NebulexRedisAdapter.ClientCluster.Supervisor do +defmodule Nebulex.Adapters.Redis.ClientSideCluster.PoolSupervisor do @moduledoc false use Supervisor - alias NebulexRedisAdapter.Pool + alias Nebulex.Adapters.Redis.Pool ## API @@ -22,7 +22,7 @@ defmodule NebulexRedisAdapter.ClientCluster.Supervisor do children = Pool.register_names(registry, {name, node}, pool_size, fn conn_name -> - {NebulexRedisAdapter.Connection, Keyword.put(opts, :name, conn_name)} + {Nebulex.Adapters.Redis.Connection, Keyword.put(opts, :name, conn_name)} end) Supervisor.init(children, strategy: :one_for_one) diff --git a/lib/nebulex/adapters/redis/cluster.ex b/lib/nebulex/adapters/redis/cluster.ex new file mode 100644 index 0000000..bd9f3a6 --- /dev/null +++ b/lib/nebulex/adapters/redis/cluster.ex @@ -0,0 +1,241 @@ +defmodule Nebulex.Adapters.Redis.Cluster do + # Redis Cluster Manager + @moduledoc false + + import Nebulex.Adapters.Redis.Helpers + import Nebulex.Utils, only: [wrap_error: 2] + + alias __MODULE__.Keyslot + alias Nebulex.Adapters.Redis.{ErrorFormatter, Options, Pool} + + @typedoc "Proxy type to the adapter meta" + @type adapter_meta() :: Nebulex.Adapter.adapter_meta() + + # Redis cluster hash slots size + @redis_cluster_hash_slots 16_384 + + ## API + + @spec init(adapter_meta(), keyword()) :: {Supervisor.child_spec(), adapter_meta()} + def init(%{name: name} = adapter_meta, opts) do + # Ensure :redis_cluster is provided + if is_nil(Keyword.get(opts, :redis_cluster)) do + raise NimbleOptions.ValidationError, + Options.invalid_cluster_config_error( + "invalid value for :redis_cluster option: ", + nil, + :redis_cluster + ) + end + + # Init ETS table to store the hash slot map + cluster_shards_tab = init_hash_slot_map_table(name) + + # Update adapter meta + adapter_meta = + Map.merge(adapter_meta, %{ + cluster_shards_tab: cluster_shards_tab, + keyslot: get_keyslot(opts) + }) + + children = [ + {__MODULE__.DynamicSupervisor, {adapter_meta, opts}}, + {__MODULE__.ConfigManager, {adapter_meta, opts}} + ] + + child_spec = %{ + id: {name, RedisClusterSupervisor}, + start: {Supervisor, :start_link, [children, [strategy: :rest_for_one]]}, + type: :supervisor + } + + {child_spec, adapter_meta} + end + + @spec command( + adapter_meta(), + Redix.command(), + keyword(), + init_acc :: any(), + (any(), any() -> any()) + ) :: any() + def command( + %{ + name: name, + cluster_shards_tab: cluster_shards_tab, + registry: registry, + pool_size: pool_size + }, + command, + opts, + init_acc \\ nil, + reducer \\ fn res, _ -> res end + ) do + with_retry(name, Keyword.get(opts, :lock_retries, :infinity), fn -> + reduce_while(cluster_shards_tab, {:ok, init_acc}, fn slot_id, {:ok, acc} -> + registry + |> do_command(slot_id, pool_size, command, opts) + |> handle_reduce_while(acc, reducer) + end) + end) + end + + defp do_command(registry, slot_id, pool_size, command, opts) do + with {:ok, conn} <- Pool.fetch_conn(registry, slot_id, pool_size) do + Redix.command(conn, command, redis_command_opts(opts)) + end + end + + defp handle_reduce_while({:ok, result}, acc, reducer) do + {:cont, {:ok, reducer.(result, acc)}} + end + + defp handle_reduce_while({:error, _} = e, _acc, _reducer) do + {:halt, e} + end + + @spec fetch_conn(adapter_meta(), {:"$hash_slot", any()} | any(), keyword()) :: + {:ok, pid()} | {:error, Nebulex.Error.t()} + def fetch_conn( + %{ + name: name, + keyslot: keyslot, + cluster_shards_tab: cluster_shards_tab, + registry: registry, + pool_size: pool_size + }, + key, + opts + ) do + with_retry(name, Keyword.get(opts, :lock_retries, :infinity), fn -> + init_acc = wrap_error Nebulex.Error, reason: :redis_connection_error, module: ErrorFormatter + + {:"$hash_slot", hash_slot} = + case key do + {:"$hash_slot", _} -> key + _else -> hash_slot(key, keyslot) + end + + reduce_while(cluster_shards_tab, init_acc, fn + {start, stop} = slot_id, _acc when hash_slot >= start and hash_slot <= stop -> + {:halt, Pool.fetch_conn(registry, slot_id, pool_size)} + + _, acc -> + {:cont, acc} + end) + end) + end + + @spec group_keys_by_hash_slot(Enum.t(), Keyslot.t(), atom()) :: map() + def group_keys_by_hash_slot(enum, keyslot, type) + + def group_keys_by_hash_slot(enum, keyslot, :keys) do + Enum.group_by(enum, &hash_slot(&1, keyslot)) + end + + def group_keys_by_hash_slot(enum, keyslot, :tuples) do + Enum.group_by(enum, &hash_slot(elem(&1, 0), keyslot)) + end + + @spec hash_slot(any(), Keyslot.t()) :: {:"$hash_slot", non_neg_integer()} + def hash_slot(key, keyslot \\ &Keyslot.hash_slot/2) do + {:"$hash_slot", keyslot.(key, @redis_cluster_hash_slots)} + end + + @spec get_status(atom(), atom()) :: atom() + def get_status(name, default \\ nil) when is_atom(name) and is_atom(default) do + name + |> status_key() + |> :persistent_term.get(default) + end + + @spec put_status(atom(), atom()) :: :ok + def put_status(name, status) when is_atom(name) and is_atom(status) do + # An atom is a single word so this does not trigger a global GC + name + |> status_key() + |> :persistent_term.put(status) + end + + @spec del_status_key(atom()) :: :ok + def del_status_key(name) when is_atom(name) do + # An atom is a single word so this does not trigger a global GC + _ignore = + name + |> status_key() + |> :persistent_term.erase() + + :ok + end + + @spec with_retry(atom(), pos_integer() | :infinity, (-> any())) :: any() + def with_retry(name, retries, fun) + when (is_integer(retries) and retries > 0) or retries == :infinity do + with_retry(name, fun, retries, 1, :ok) + end + + defp with_retry(name, fun, max_retries, retries, _last_status) when retries <= max_retries do + case get_status(name) do + :ok -> + fun.() + + :locked -> + :ok = random_sleep(retries) + + with_retry(name, fun, max_retries, retries + 1, :locked) + + :error -> + wrap_error Nebulex.Error, + module: ErrorFormatter, + reason: {:redis_cluster_status_error, :error}, + cache: name + + nil -> + :ok = random_sleep(retries) + + with_retry(name, fun, max_retries, retries + 1, :shutdown) + end + end + + defp with_retry(name, _fun, _max_retries, _retries, last_status) do + wrap_error Nebulex.Error, + module: ErrorFormatter, + reason: {:redis_cluster_status_error, last_status}, + cache: name + end + + ## Private Functions + + # Inline common instructions + @compile {:inline, status_key: 1} + + defp status_key(name), do: {name, :redis_cluster_status} + + defp init_hash_slot_map_table(name) do + :ets.new(name, [ + :ordered_set, + :public, + :named_table, + read_concurrency: true + ]) + end + + defp reduce_while(table, acc, reducer) do + fun = fn elem, acc -> + case reducer.(elem, acc) do + {:cont, acc} -> acc + {:halt, _} = halt -> throw(halt) + end + end + + :ets.foldl(fun, acc, table) + catch + {:halt, result} -> result + end + + defp get_keyslot(opts) do + opts + |> Keyword.fetch!(:redis_cluster) + |> Keyword.fetch!(:keyslot) + end +end diff --git a/lib/nebulex_redis_adapter/redis_cluster/config_manager.ex b/lib/nebulex/adapters/redis/cluster/config_manager.ex similarity index 88% rename from lib/nebulex_redis_adapter/redis_cluster/config_manager.ex rename to lib/nebulex/adapters/redis/cluster/config_manager.ex index e7003f3..1c7628e 100644 --- a/lib/nebulex_redis_adapter/redis_cluster/config_manager.ex +++ b/lib/nebulex/adapters/redis/cluster/config_manager.ex @@ -1,14 +1,14 @@ -defmodule NebulexRedisAdapter.RedisCluster.ConfigManager do +defmodule Nebulex.Adapters.Redis.Cluster.ConfigManager do @moduledoc false use GenServer - import Nebulex.Helpers, only: [normalize_module_name: 1] - import NebulexRedisAdapter.Helpers + import Nebulex.Adapters.Redis.Helpers + import Nebulex.Utils + alias Nebulex.Adapters.Redis.{Cluster, ErrorFormatter} + alias Nebulex.Adapters.Redis.Cluster.PoolSupervisor alias Nebulex.Telemetry - alias NebulexRedisAdapter.RedisCluster - alias NebulexRedisAdapter.RedisCluster.PoolSupervisor require Logger @@ -23,17 +23,17 @@ defmodule NebulexRedisAdapter.RedisCluster.ConfigManager do ## API - @spec start_link({Nebulex.Adapter.adapter_meta(), keyword}) :: GenServer.on_start() + @spec start_link({Nebulex.Adapter.adapter_meta(), keyword()}) :: GenServer.on_start() def start_link({adapter_meta, opts}) do - name = normalize_module_name([adapter_meta.name, ConfigManager]) + name = camelize_and_concat([adapter_meta.name, ConfigManager]) GenServer.start(__MODULE__, {adapter_meta, opts}, name: name) end - @spec setup_shards(name :: atom) :: :ok + @spec setup_shards(name :: atom()) :: :ok def setup_shards(name) do [name, ConfigManager] - |> normalize_module_name() + |> camelize_and_concat() |> GenServer.call(:setup_shards) end @@ -44,7 +44,7 @@ defmodule NebulexRedisAdapter.RedisCluster.ConfigManager do state = %__MODULE__{ adapter_meta: adapter_meta, opts: opts, - dynamic_sup: normalize_module_name([adapter_meta.name, DynamicSupervisor]) + dynamic_sup: camelize_and_concat([adapter_meta.name, DynamicSupervisor]) } {:ok, state, {:continue, :setup_shards}} @@ -82,7 +82,7 @@ defmodule NebulexRedisAdapter.RedisCluster.ConfigManager do @impl true def terminate(_reason, %__MODULE__{adapter_meta: meta, dynamic_sup: sup, running_shards: lst}) do # Set cluster status to error - :ok = RedisCluster.put_status(meta.name, :error) + :ok = Cluster.del_status_key(meta.name) # Stop running shards/pools (cleanup) :ok = stop_running_shards(meta.cluster_shards_tab, sup, lst) @@ -95,16 +95,16 @@ defmodule NebulexRedisAdapter.RedisCluster.ConfigManager do on_locked \\ fn -> :noop end ) do # Lock the cluster - :ok = RedisCluster.put_status(name, :locked) + :ok = Cluster.put_status(name, :locked) # Invoke the on_locked callback - _ = on_locked.() + _ignore = on_locked.() # Configure the cluster shards/pools case configure_shards(state) do {:ok, running_shards} -> # Unlock the cluster (status set to ok) - :ok = RedisCluster.put_status(name, :ok) + :ok = Cluster.put_status(name, :ok) {:noreply, %{state | running_shards: running_shards, setup_retries: 1}} @@ -113,7 +113,7 @@ defmodule NebulexRedisAdapter.RedisCluster.ConfigManager do :ok = Logger.error(fn -> "Error configuring cluster shards: #{inspect(reason)}" end) # Set cluster status to error - :ok = RedisCluster.put_status(name, :error) + :ok = Cluster.put_status(name, :error) {:noreply, %{state | running_shards: [], setup_retries: n + 1}, random_timeout(n)} end @@ -132,15 +132,16 @@ defmodule NebulexRedisAdapter.RedisCluster.ConfigManager do reason: nil } - Telemetry.span(adapter_meta.telemetry_prefix ++ [:config_manager, :setup], metadata, fn -> + Telemetry.span(adapter_meta.telemetry_prefix ++ [:redis_cluster, :setup], metadata, fn -> case configure_shards(adapter_meta, dynamic_sup, running_shards, opts) do {:ok, _} = ok -> {ok, %{metadata | status: :ok, reason: :succeeded}} {:error, reason} -> - # Wrap up the error + # Wrap up the error with the Telemetry metadata error = - wrap_error NebulexRedisAdapter.Error, + wrap_error Nebulex.Error, + module: ErrorFormatter, reason: {:redis_cluster_setup_error, reason}, cache: adapter_meta.name @@ -167,7 +168,7 @@ defmodule NebulexRedisAdapter.RedisCluster.ConfigManager do running_shards = Enum.map(specs, fn {start, stop, m_host, m_port} -> # Define slot id - slot_id = {:cluster_shards, start, stop} + slot_id = {start, stop} # Define options opts = @@ -209,7 +210,7 @@ defmodule NebulexRedisAdapter.RedisCluster.ConfigManager do defp stop_running_shards(cluster_shards_tab, dynamic_sup, running_shards) do # Flush the hash slot map - true = :ets.delete(cluster_shards_tab, :cluster_shards) + true = :ets.delete_all_objects(cluster_shards_tab) # Stop the running shards/pools Enum.each(running_shards, &DynamicSupervisor.terminate_child(dynamic_sup, elem(&1, 0))) diff --git a/lib/nebulex_redis_adapter/redis_cluster/dynamic_supervisor.ex b/lib/nebulex/adapters/redis/cluster/dynamic_supervisor.ex similarity index 56% rename from lib/nebulex_redis_adapter/redis_cluster/dynamic_supervisor.ex rename to lib/nebulex/adapters/redis/cluster/dynamic_supervisor.ex index 377eb04..7b7a326 100644 --- a/lib/nebulex_redis_adapter/redis_cluster/dynamic_supervisor.ex +++ b/lib/nebulex/adapters/redis/cluster/dynamic_supervisor.ex @@ -1,15 +1,15 @@ -defmodule NebulexRedisAdapter.RedisCluster.DynamicSupervisor do +defmodule Nebulex.Adapters.Redis.Cluster.DynamicSupervisor do @moduledoc false use DynamicSupervisor - import Nebulex.Helpers, only: [normalize_module_name: 1] + import Nebulex.Utils, only: [camelize_and_concat: 1] ## API - @spec start_link({Nebulex.Adapter.adapter_meta(), keyword}) :: Supervisor.on_start() + @spec start_link({Nebulex.Adapter.adapter_meta(), keyword()}) :: Supervisor.on_start() def start_link({adapter_meta, opts}) do - name = normalize_module_name([adapter_meta.name, DynamicSupervisor]) + name = camelize_and_concat([adapter_meta.name, DynamicSupervisor]) DynamicSupervisor.start_link(__MODULE__, opts, name: name) end diff --git a/lib/nebulex_redis_adapter/redis_cluster/keyslot.ex b/lib/nebulex/adapters/redis/cluster/keyslot.ex similarity index 73% rename from lib/nebulex_redis_adapter/redis_cluster/keyslot.ex rename to lib/nebulex/adapters/redis/cluster/keyslot.ex index cc12c98..f090edf 100644 --- a/lib/nebulex_redis_adapter/redis_cluster/keyslot.ex +++ b/lib/nebulex/adapters/redis/cluster/keyslot.ex @@ -1,29 +1,29 @@ -defmodule NebulexRedisAdapter.RedisCluster.Keyslot do +defmodule Nebulex.Adapters.Redis.Cluster.Keyslot do @moduledoc """ Default `Nebulex.Adapter.Keyslot` implementation. """ - use Nebulex.Adapter.Keyslot + @typedoc "Keyslot funtion type" + @type t() :: (binary(), any() -> non_neg_integer()) if Code.ensure_loaded?(CRC) do - alias NebulexRedisAdapter.Serializer.Serializable - - @impl true + @doc false def hash_slot(key, range) def hash_slot(key, range) when is_binary(key) do key |> compute_key() - |> do_hash_slot(range) + |> compute_hash_slot(range) end def hash_slot(key, range) do key - |> Serializable.encode() - |> do_hash_slot(range) + |> :erlang.phash2() + |> to_string() + |> compute_hash_slot(range) end - defp do_hash_slot(key, range) do + defp compute_hash_slot(key, range) do :crc_16_xmodem |> CRC.crc(key) |> rem(range) diff --git a/lib/nebulex_redis_adapter/redis_cluster/pool_supervisor.ex b/lib/nebulex/adapters/redis/cluster/pool_supervisor.ex similarity index 87% rename from lib/nebulex_redis_adapter/redis_cluster/pool_supervisor.ex rename to lib/nebulex/adapters/redis/cluster/pool_supervisor.ex index 79738b4..a3d9407 100644 --- a/lib/nebulex_redis_adapter/redis_cluster/pool_supervisor.ex +++ b/lib/nebulex/adapters/redis/cluster/pool_supervisor.ex @@ -1,11 +1,11 @@ -defmodule NebulexRedisAdapter.RedisCluster.PoolSupervisor do +defmodule Nebulex.Adapters.Redis.Cluster.PoolSupervisor do @moduledoc """ - Redis Cluster Node/Slot Supervisor. + Redis Cluster shard supervisor. """ use Supervisor - alias NebulexRedisAdapter.Pool + alias Nebulex.Adapters.Redis.Pool ## API diff --git a/lib/nebulex_redis_adapter/connection.ex b/lib/nebulex/adapters/redis/connection.ex similarity index 57% rename from lib/nebulex_redis_adapter/connection.ex rename to lib/nebulex/adapters/redis/connection.ex index fba26a3..726aaa7 100644 --- a/lib/nebulex_redis_adapter/connection.ex +++ b/lib/nebulex/adapters/redis/connection.ex @@ -1,16 +1,16 @@ -defmodule NebulexRedisAdapter.Connection do +defmodule Nebulex.Adapters.Redis.Connection do @moduledoc false - alias NebulexRedisAdapter.Pool + alias Nebulex.Adapters.Redis.Pool @typedoc "Proxy type to the adapter meta" - @type adapter_meta :: Nebulex.Adapter.metadata() + @type adapter_meta() :: Nebulex.Adapter.adapter_meta() ## API - @spec init(adapter_meta, Keyword.t()) :: {Supervisor.child_spec(), adapter_meta} + @spec init(adapter_meta(), keyword()) :: {Supervisor.child_spec(), adapter_meta()} def init(%{name: name, registry: registry, pool_size: pool_size} = adapter_meta, opts) do - connections_specs = + conn_specs = Pool.register_names(registry, name, pool_size, fn conn_name -> opts |> Keyword.put(:name, conn_name) @@ -20,24 +20,20 @@ defmodule NebulexRedisAdapter.Connection do connections_supervisor_spec = %{ id: :connections_supervisor, type: :supervisor, - start: {Supervisor, :start_link, [connections_specs, [strategy: :one_for_one]]} + start: {Supervisor, :start_link, [conn_specs, [strategy: :one_for_one]]} } {connections_supervisor_spec, adapter_meta} end - @spec child_spec(Keyword.t()) :: Supervisor.child_spec() + @spec child_spec(keyword()) :: Supervisor.child_spec() def child_spec(opts) do name = Keyword.fetch!(opts, :name) - %{ - id: {Redix, name}, - type: :worker, - start: {Redix, :start_link, redix_args(name, opts)} - } + Supervisor.child_spec({Redix, redix_args(name, opts)}, id: {Redix, name}) end - @spec conn_opts(Keyword.t()) :: Keyword.t() + @spec conn_opts(keyword()) :: keyword() def conn_opts(opts) do Keyword.get(opts, :conn_opts, host: "127.0.0.1", port: 6379) end @@ -51,8 +47,8 @@ defmodule NebulexRedisAdapter.Connection do |> Keyword.put(:name, name) case Keyword.pop(conn_opts, :url) do - {nil, conn_opts} -> [conn_opts] - {url, conn_opts} -> [url, conn_opts] + {nil, conn_opts} -> conn_opts + {url, conn_opts} -> {url, conn_opts} end end end diff --git a/lib/nebulex/adapters/redis/error_formatter.ex b/lib/nebulex/adapters/redis/error_formatter.ex new file mode 100644 index 0000000..7e70bdf --- /dev/null +++ b/lib/nebulex/adapters/redis/error_formatter.ex @@ -0,0 +1,42 @@ +defmodule Nebulex.Adapters.Redis.ErrorFormatter do + @moduledoc """ + Adapter error formatter. + """ + + import Nebulex.Error, only: [maybe_format_metadata: 2] + + @doc false + def format_error(reason, metadata) do + reason + |> msg(metadata) + |> format_metadata() + end + + defp msg(:redis_connection_error, metadata) do + {"connection not available; maybe the cache was not started or it does not exist", metadata} + end + + defp msg({:redis_cluster_status_error, status}, metadata) do + {"could not run the command because Redis Cluster is in #{status} status", metadata} + end + + defp msg({:redis_cluster_setup_error, exception}, metadata) when is_exception(exception) do + {stacktrace, metadata} = Keyword.pop(metadata, :stacktrace, []) + + msg = """ + could not setup Redis Cluster. + + #{Exception.format(:error, exception, stacktrace) |> String.replace("\n", "\n ")} + """ + + {msg, metadata} + end + + defp msg({:redis_cluster_setup_error, reason}, metadata) do + {"could not setup Redis Cluster, failed with reason: #{inspect(reason)}.", metadata} + end + + defp format_metadata({msg, metadata}) do + maybe_format_metadata(msg, metadata) + end +end diff --git a/lib/nebulex_redis_adapter/helpers.ex b/lib/nebulex/adapters/redis/helpers.ex similarity index 66% rename from lib/nebulex_redis_adapter/helpers.ex rename to lib/nebulex/adapters/redis/helpers.ex index 8927b9e..731836a 100644 --- a/lib/nebulex_redis_adapter/helpers.ex +++ b/lib/nebulex/adapters/redis/helpers.ex @@ -1,4 +1,4 @@ -defmodule NebulexRedisAdapter.Helpers do +defmodule Nebulex.Adapters.Redis.Helpers do @moduledoc false import Bitwise, only: [<<<: 2] @@ -8,12 +8,12 @@ defmodule NebulexRedisAdapter.Helpers do ## API - @spec redis_command_opts(Keyword.t()) :: Keyword.t() + @spec redis_command_opts(keyword()) :: keyword() def redis_command_opts(opts) do Keyword.take(opts, [:timeout, :telemetry_metadata]) end - @spec random_sleep(pos_integer) :: :ok + @spec random_sleep(pos_integer()) :: :ok def random_sleep(times) do t = random_timeout(times) @@ -23,7 +23,7 @@ defmodule NebulexRedisAdapter.Helpers do end end - @spec random_timeout(pos_integer) :: pos_integer + @spec random_timeout(pos_integer()) :: pos_integer() def random_timeout(times) do _ = if rem(times, 10) == 0, do: :rand.seed(:exsplus) @@ -37,11 +37,4 @@ defmodule NebulexRedisAdapter.Helpers do :rand.uniform(tmax) end - - @doc false - defmacro wrap_error(exception, opts) do - quote do - {:error, unquote(exception).exception(unquote(opts))} - end - end end diff --git a/lib/nebulex_redis_adapter/options.ex b/lib/nebulex/adapters/redis/options.ex similarity index 69% rename from lib/nebulex_redis_adapter/options.ex rename to lib/nebulex/adapters/redis/options.ex index 9c5ea02..4bc338f 100644 --- a/lib/nebulex_redis_adapter/options.ex +++ b/lib/nebulex/adapters/redis/options.ex @@ -1,10 +1,12 @@ -defmodule NebulexRedisAdapter.Options do +defmodule Nebulex.Adapters.Redis.Options do @moduledoc false - import Nebulex.Helpers + import Nebulex.Utils - # Start option definitions (runtime) - start_opts_defs = [ + alias Nebulex.Adapters.Redis.Cluster.Keyslot, as: RedisClusterKeyslot + + # Start option definitions + start_opts = [ mode: [ type: {:in, [:standalone, :redis_cluster, :client_side_cluster]}, required: false, @@ -24,6 +26,14 @@ defmodule NebulexRedisAdapter.Options do """ ], + stats: [ + type: :boolean, + required: false, + default: true, + doc: """ + A flag to determine whether to collect cache stats. + """ + ], pool_size: [ type: :pos_integer, required: false, @@ -33,10 +43,10 @@ defmodule NebulexRedisAdapter.Options do """ ], serializer: [ - type: {:custom, __MODULE__, :validate_behaviour, [NebulexRedisAdapter.Serializer]}, + type: {:custom, __MODULE__, :validate_behaviour, [Nebulex.Adapters.Redis.Serializer]}, required: false, doc: """ - Custom serializer module implementing the `NebulexRedisAdapter.Serializer` + Custom serializer module implementing the `Nebulex.Adapters.Redis.Serializer` behaviour. """ ], @@ -142,13 +152,11 @@ defmodule NebulexRedisAdapter.Options do """ ], keyslot: [ - type: {:custom, __MODULE__, :validate_behaviour, [Nebulex.Adapter.Keyslot]}, + type: {:fun, 2}, required: false, - default: NebulexRedisAdapter.RedisCluster.Keyslot, + default: &RedisClusterKeyslot.hash_slot/2, doc: """ - The module implementing the `Nebulex.Adapter.Keyslot` - behaviour, which is used to compute the node where the command - will be applied to. + A function to compute the hash slot for a given key and range. """ ] ] @@ -185,28 +193,73 @@ defmodule NebulexRedisAdapter.Options do type: :keyword_list, doc: """ Same as `:conn_opts`. + + Additionally, option `:ch_ring_replicas` is also allowed to + indicate the number of replicas for the consistent hash ring. """ ] ] - ], - keyslot: [ - type: {:custom, __MODULE__, :validate_behaviour, [Nebulex.Adapter.Keyslot]}, - required: false, - default: NebulexRedisAdapter.ClientCluster.Keyslot, - doc: """ - The module implementing the `Nebulex.Adapter.Keyslot` - behaviour, which is used to compute the node where the command - will be applied to. - """ ] ] ] ] + # Command/Pipilene options + command_opts = [ + timeout: [ + type: :timeout, + required: false, + default: :timer.seconds(5), + doc: """ + Same as `Redix.pipeline/3`. + """ + ], + telemetry_metadata: [ + type: {:map, :any, :any}, + default: %{}, + doc: """ + Same as `Redix.pipeline/3`. + """ + ] + ] + + # Stream options + stream_opts = [ + on_error: [ + type: {:in, [:nothing, :raise]}, + type_doc: "`:raise` | `:nothing`", + required: false, + default: :raise, + doc: """ + Indicates whether to raise an exception when an error occurs or do nothing + (skip errors). + + When the stream is evaluated, the adapter attempts to execute the `stream` + command on the different nodes. Still, the execution could fail due to an + RPC error or the command explicitly returns an error. If the option is set + to `:raise`, the command will raise an exception when an error occurs on + the stream evaluation. On the other hand, if it is set to `:nothing`, the + error is skipped. + """ + ] + ] + # Start options schema - @start_opts_schema NimbleOptions.new!(start_opts_defs) + @start_opts_schema NimbleOptions.new!(start_opts) + + # Nebulex common option keys + @nbx_start_opts Nebulex.Cache.Options.__compile_opts__() ++ Nebulex.Cache.Options.__start_opts__() + + # Command/Pipilene options schema + @command_opts_schema NimbleOptions.new!(command_opts) + + # Stream options schema + @stream_opts_schema NimbleOptions.new!(stream_opts ++ command_opts) + + # Stream options docs schema + @stream_opts_docs_schema NimbleOptions.new!(stream_opts) - ## API + ## Docs API # coveralls-ignore-start @@ -215,45 +268,55 @@ defmodule NebulexRedisAdapter.Options do NimbleOptions.docs(@start_opts_schema) end + @spec command_options_docs() :: binary() + def command_options_docs do + NimbleOptions.docs(@command_opts_schema) + end + + @spec stream_options_docs() :: binary() + def stream_options_docs do + NimbleOptions.docs(@stream_opts_docs_schema) + end + # coveralls-ignore-stop + ## Validation API + @spec validate_start_opts!(keyword()) :: keyword() def validate_start_opts!(opts) do start_opts = opts - # Skip validating base Nebulex.Cache options - |> Keyword.drop([ - :otp_app, - :adapter, - :cache, - :name, - :telemetry_prefix, - :telemetry, - :stats - ]) - |> validate!(@start_opts_schema) + |> Keyword.drop(@nbx_start_opts) + |> NimbleOptions.validate!(@start_opts_schema) Keyword.merge(opts, start_opts) end - @spec validate!(keyword(), NimbleOptions.t()) :: keyword() - def validate!(opts, schema) do - opts - |> NimbleOptions.validate(schema) - |> format_error() - end + @spec validate_command_opts!(keyword()) :: keyword() + def validate_command_opts!(opts) do + adapter_opts = + opts + |> Keyword.take([:timeout]) + |> NimbleOptions.validate!(@command_opts_schema) - defp format_error({:ok, opts}) do - opts + Keyword.merge(opts, adapter_opts) end - defp format_error({:error, %NimbleOptions.ValidationError{message: message}}) do - raise ArgumentError, message + @spec validate_stream_opts!(keyword()) :: keyword() + def validate_stream_opts!(opts) do + adapter_opts = + opts + |> Keyword.take([:timeout, :on_error]) + |> NimbleOptions.validate!(@stream_opts_schema) + + Keyword.merge(opts, adapter_opts) end + ## Helpers + @doc false def validate_behaviour(module, behaviour) when is_atom(module) and is_atom(behaviour) do - if behaviour in module_behaviours(module, "module") do + if behaviour in module_behaviours(module) do {:ok, module} else {:error, "expected #{inspect(module)} to implement the behaviour #{inspect(behaviour)}"} diff --git a/lib/nebulex/adapters/redis/pool.ex b/lib/nebulex/adapters/redis/pool.ex new file mode 100644 index 0000000..7700cc9 --- /dev/null +++ b/lib/nebulex/adapters/redis/pool.ex @@ -0,0 +1,27 @@ +defmodule Nebulex.Adapters.Redis.Pool do + @moduledoc false + + import Nebulex.Utils, only: [wrap_error: 2] + + alias Nebulex.Adapters.Redis.ErrorFormatter + + ## API + + @spec register_names(atom(), any(), pos_integer(), ({:via, module(), any()} -> any())) :: [any()] + def register_names(registry, key, pool_size, fun) do + for index <- 0..(pool_size - 1) do + fun.({:via, Registry, {registry, {key, index}}}) + end + end + + @spec fetch_conn(atom(), any(), pos_integer()) :: {:ok, pid()} | {:error, Nebulex.Error.t()} + def fetch_conn(registry, key, pool_size) do + # Ensure selecting the connection based on the caller PID + index = :erlang.phash2(self(), pool_size) + + case Registry.lookup(registry, {key, index}) do + [{pid, _}] -> {:ok, pid} + [] -> wrap_error Nebulex.Error, reason: :redis_connection_error, module: ErrorFormatter + end + end +end diff --git a/lib/nebulex_redis_adapter/serializer.ex b/lib/nebulex/adapters/redis/serializer.ex similarity index 70% rename from lib/nebulex_redis_adapter/serializer.ex rename to lib/nebulex/adapters/redis/serializer.ex index ff615f1..01dfc3a 100644 --- a/lib/nebulex_redis_adapter/serializer.ex +++ b/lib/nebulex/adapters/redis/serializer.ex @@ -1,4 +1,4 @@ -defmodule NebulexRedisAdapter.Serializer do +defmodule Nebulex.Adapters.Redis.Serializer do @moduledoc """ A **Serializer** encodes keys and values sent to Redis, and decodes keys and values in the command output. @@ -9,29 +9,29 @@ defmodule NebulexRedisAdapter.Serializer do @doc """ Encodes `key` with the given `opts`. """ - @callback encode_key(key :: term, opts :: [term]) :: iodata + @callback encode_key(key :: any(), opts :: [any()]) :: iodata() @doc """ Encodes `value` with the given `opts`. """ - @callback encode_value(value :: term, opts :: [term]) :: iodata + @callback encode_value(value :: any(), opts :: [any()]) :: iodata() @doc """ Decodes `key` with the given `opts`. """ - @callback decode_key(key :: binary, opts :: [term]) :: term + @callback decode_key(key :: binary(), opts :: [any()]) :: any() @doc """ Decodes `value` with the given `opts`. """ - @callback decode_value(value :: binary, opts :: [term]) :: term + @callback decode_value(value :: binary(), opts :: [any()]) :: any() @doc false defmacro __using__(_opts) do quote do - @behaviour NebulexRedisAdapter.Serializer + @behaviour Nebulex.Adapters.Redis.Serializer - alias NebulexRedisAdapter.Serializer.Serializable + alias Nebulex.Adapters.Redis.Serializer.Serializable @impl true defdelegate encode_key(key, opts \\ []), to: Serializable, as: :encode diff --git a/lib/nebulex_redis_adapter/serializer/serializable.ex b/lib/nebulex/adapters/redis/serializer/serializable.ex similarity index 73% rename from lib/nebulex_redis_adapter/serializer/serializable.ex rename to lib/nebulex/adapters/redis/serializer/serializable.ex index de9e0a7..5240418 100644 --- a/lib/nebulex_redis_adapter/serializer/serializable.ex +++ b/lib/nebulex/adapters/redis/serializer/serializable.ex @@ -1,7 +1,7 @@ -defprotocol NebulexRedisAdapter.Serializer.Serializable do +defprotocol Nebulex.Adapters.Redis.Serializer.Serializable do @moduledoc """ Protocol controlling how a key/value is encoded to a string - and how a string is decoded into an Elixir term. + and how a string is decoded into an Elixir any(). See [Redis Strings](https://redis.io/docs/data-types/strings/). """ @@ -11,17 +11,17 @@ defprotocol NebulexRedisAdapter.Serializer.Serializable do @doc """ Encodes `data` with the given `opts`. """ - @spec encode(term, [term]) :: binary + @spec encode(any(), [any()]) :: binary() def encode(data, opts \\ []) @doc """ Decodes `data` with the given `opts`. """ - @spec decode(binary, [term]) :: term + @spec decode(binary(), [any()]) :: any() def decode(data, opts \\ []) end -defimpl NebulexRedisAdapter.Serializer.Serializable, for: BitString do +defimpl Nebulex.Adapters.Redis.Serializer.Serializable, for: BitString do def encode(binary, _opts) when is_binary(binary) do binary end @@ -42,7 +42,7 @@ defimpl NebulexRedisAdapter.Serializer.Serializable, for: BitString do end end -defimpl NebulexRedisAdapter.Serializer.Serializable, for: Any do +defimpl Nebulex.Adapters.Redis.Serializer.Serializable, for: Any do def encode(data, opts) do opts = Keyword.take(opts, [:compressed, :minor_version]) diff --git a/lib/nebulex/adapters/redis/supervisor.ex b/lib/nebulex/adapters/redis/supervisor.ex new file mode 100644 index 0000000..c6a4079 --- /dev/null +++ b/lib/nebulex/adapters/redis/supervisor.ex @@ -0,0 +1,24 @@ +defmodule Nebulex.Adapters.Redis.Supervisor do + @moduledoc false + + use Supervisor + + ## API + + @doc false + def start_link({sup_name, conn_child_spec, adapter_meta}) do + Supervisor.start_link(__MODULE__, {conn_child_spec, adapter_meta}, name: sup_name) + end + + ## Supervisor callback + + @impl true + def init({conn_child_spec, %{registry: registry}}) do + children = [ + {Registry, name: registry, keys: :unique}, + conn_child_spec + ] + + Supervisor.init(children, strategy: :rest_for_one) + end +end diff --git a/lib/nebulex_redis_adapter.ex b/lib/nebulex_redis_adapter.ex deleted file mode 100644 index 5d9703f..0000000 --- a/lib/nebulex_redis_adapter.ex +++ /dev/null @@ -1,898 +0,0 @@ -defmodule NebulexRedisAdapter do - @moduledoc """ - Nebulex adapter for Redis. This adapter is implemented using `Redix`, - a Redis driver for Elixir. - - **NebulexRedisAdapter** provides three setup alternatives: - - * **Standalone** - The adapter establishes a pool of connections - with a single Redis node. The `:standalone` is the default mode. - - * **Redis Cluster** - [Redis Cluster](https://redis.io/topics/cluster-tutorial) - is a built-in feature in Redis since version 3, and it may be the most - convenient and recommendable way to set up Redis in a cluster and have - a distributed cache storage out-of-box. This adapter provides the - `:redis_cluster` mode to set up **Redis Cluster** from the client-side - automatically and be able to use it transparently. - - * **Built-in client-side cluster based on sharding** - This adapter - provides a simple client-side cluster implementation based on - Sharding distribution model via `:client_side_cluster` mode. - - ## Standalone - - We can define a cache to use Redis as follows: - - defmodule MyApp.RedisCache do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: NebulexRedisAdapter - end - - The configuration for the cache must be in your application environment, - usually defined in your `config/config.exs`: - - config :my_app, MyApp.RedisCache, - conn_opts: [ - host: "127.0.0.1", - port: 6379 - ] - - ## Redis Cluster - - We can define a cache to use Redis Cluster as follows: - - defmodule MyApp.RedisClusterCache do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: NebulexRedisAdapter - end - - The config: - - config :my_app, MyApp.RedisClusterCache, - mode: :redis_cluster, - redis_cluster: [ - configuration_endpoints: [ - endpoint1_conn_opts: [ - host: "127.0.0.1", - port: 6379, - # Add the password if 'requirepass' is on - password: "password" - ], - ... - ] - ] - - ## Client-side Cluster - - We can define a cache with "client-side cluster mode" as follows: - - defmodule MyApp.ClusteredCache do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: NebulexRedisAdapter - end - - The config: - - config :my_app, MyApp.ClusteredCache, - mode: :client_side_cluster, - client_side_cluster: [ - nodes: [ - node1: [ - pool_size: 10, - conn_opts: [ - host: "127.0.0.1", - port: 9001 - ] - ], - node2: [ - pool_size: 4, - conn_opts: [ - url: "redis://127.0.0.1:9002" - ] - ], - node3: [ - conn_opts: [ - host: "127.0.0.1", - port: 9003 - ] - ], - ... - ] - ] - - ## Configuration options - - In addition to `Nebulex.Cache` config options, the adapter supports the - following options: - - #{NebulexRedisAdapter.Options.start_options_docs()} - - ## Shared runtime options - - Since the adapter runs on top of `Redix`, all commands accept their options - (e.g.: `:timeout`, and `:telemetry_metadata`). See `Redix` docs for more - information. - - ### Redis Cluster runtime options - - The following options apply to all commands: - - * `:lock_retries` - This option is specific to the `:redis_cluster` mode. - When the config manager is running and setting up the hash slot map, - all Redis commands get blocked until the cluster is properly configured - and the hash slot map is ready to use. This option defines the max retry - attempts to acquire the lock and execute the command in case the - configuration manager is running and all commands are locked. - Defaults to `:infinity`. - - ## Custom Keyslot - - As it is mentioned in the configuration options above, the `:redis_cluster` - and `:client_side_cluster` modes have a default value for the `:keyslot` - option. However, you can also provide your own implementation by implementing - the `Nebulex.Adapter.Keyslot` and configuring the `:keyslot` option. - For example: - - defmodule MyApp.ClusteredCache.Keyslot do - use Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range) do - # your implementation goes here - end - end - - And the config: - - config :my_app, MyApp.ClusteredCache, - mode: :client_side_cluster, - client_side_cluster: [ - keyslot: MyApp.ClusteredCache.Keyslot, - nodes: [ - ... - ] - ] - - > **NOTE:** For `:redis_cluster` mode, the`:keyslot` implementation must - follow the [Redis cluster specification][redis_cluster_spec]. - - [redis_cluster_spec]: https://redis.io/docs/reference/cluster-spec/ - - ## TTL or Expiration Time - - As is explained in `Nebulex.Cache`, most of the write-like functions support - the `:ttl` option to define the expiration time, and it is defined in - **milliseconds**. Despite Redis work with **seconds**, the conversion logic - is handled by the adapter transparently, so when using a cache even with the - Redis adapter, be sure you pass the `:ttl` option in **milliseconds**. - - ## Data Types - - Currently. the adapter only works with strings, which means a given Elixir - term is encoded to a binary/string before executing a command. Similarly, - a returned binary from Redis after executing a command is decoded into an - Elixir term. The encoding/decoding process is performed by the adapter - under-the-hood. However, it is possible to provide a custom serializer via the - option `:serializer`. The value must be module implementing the - `NebulexRedisAdapter.Serializer` behaviour. - - **NOTE:** Support for other Redis Data Types is in the roadmap. - - ## Queryable API - - Since the queryable API is implemented by using `KEYS` command, - keep in mind the following caveats: - - * Only keys can be queried. - * Only strings and predefined queries are allowed as query values. - - ### Predefined queries - - * `nil` - All keys are returned. - - * `{:in, [term]}` - Only the keys in the given key list (`[term]`) - are returned. This predefined query is only supported for - `c:Nebulex.Cache.delete_all/2`. This is the recommended - way of doing bulk delete of keys. - - ### Examples - - iex> MyApp.RedisCache.put_all(%{ - ...> "firstname" => "Albert", - ...> "lastname" => "Einstein", - ...> "age" => 76 - ...> }) - :ok - - iex> MyApp.RedisCache.all("**name**") - ["firstname", "lastname"] - - iex> MyApp.RedisCache.all("a??") - ["age"] - - iex> MyApp.RedisCache.all() - ["age", "firstname", "lastname"] - - iex> stream = MyApp.RedisCache.stream("**name**") - iex> stream |> Enum.to_list() - ["firstname", "lastname"] - - # get the values for the returned queried keys - iex> "**name**" |> MyApp.RedisCache.all() |> MyApp.RedisCache.get_all() - %{"firstname" => "Albert", "lastname" => "Einstein"} - - ### Deleting multiple keys at once (bulk delete) - - iex> MyApp.RedisCache.delete_all({:in, ["foo", "bar"]}) - 2 - - ## Transactions - - This adapter doesn't provide support for transactions. However, in the future, - it is planned support [Redis Transactions][redis_transactions] by using the - commands `MULTI`, `EXEC`, `DISCARD` and `WATCH`. - - [redis_transactions]: https://redis.io/docs/manual/transactions/ - - ## Running Redis commands and/or pipelines - - Since `NebulexRedisAdapter` works on top of `Redix` and provides features like - connection pools and "Redis Cluster" support, it may be seen also as a sort of - Redis client, but it is meant to be used mainly with the Nebulex cache API. - However, Redis API is quite extensive and there are a lot of useful commands - we may want to run taking advantage of the `NebulexRedisAdapter` features. - Therefore, the adapter provides two additional/extended functions to the - defined cache: `command!/2` and `pipeline!/2`. - - ### `command!(command, opts \\\\ [])` - - iex> MyCache.command!(["LPUSH", "mylist", "world"], key: "mylist") - 1 - iex> MyCache.command!(["LPUSH", "mylist", "hello"], key: "mylist") - 2 - iex> MyCache.command!(["LRANGE", "mylist", "0", "-1"], key: "mylist") - ["hello", "world"] - - ### `pipeline!(commands, opts \\\\ [])` - - iex> [ - ...> ["LPUSH", "mylist", "world"], - ...> ["LPUSH", "mylist", "hello"], - ...> ["LRANGE", "mylist", "0", "-1"] - ...> ] - ...> |> cache.pipeline!(key: "mylist") - [1, 2, ["hello", "world"]] - - ### Options for `command!/2` and `pipeline!/2`: - - * `:key` - It is required when used the adapter in mode `:redis_cluster` - or `:client_side_cluster` so that the node where the commands will - take place can be selected properly. For `:standalone` mode is not - required (optional). - * `:name` - The name of the cache in case you are using dynamic caches, - otherwise it is not required. - - Since these functions run on top of `Redix`, they also accept their options - (e.g.: `:timeout`, and `:telemetry_metadata`). See `Redix` docs for more - information. - - ## Telemetry - - This adapter emits the recommended Telemetry events. - See the "Telemetry events" section in `Nebulex.Cache` - for more information. - - ### Adapter-specific telemetry events for the `:redis_cluster` mode - - Aside from the recommended Telemetry events by `Nebulex.Cache`, this adapter - exposes the following Telemetry events for the `:redis_cluster` mode: - - * `telemetry_prefix ++ [:config_manager, :setup, :start]` - This event is - specific to the `:redis_cluster` mode. Before the configuration manager - calls Redis to set up the cluster shards, this event should be invoked. - - The `:measurements` map will include the following: - - * `:system_time` - The current system time in native units from calling: - `System.system_time()`. - - A Telemetry `:metadata` map including the following fields: - - * `:adapter_meta` - The adapter metadata. - * `:pid` - The configuration manager PID. - - * `telemetry_prefix ++ [:config_manager, :setup, :stop]` - This event is - specific to the `:redis_cluster` mode. After the configuration manager - set up the cluster shards, this event should be invoked. - - The `:measurements` map will include the following: - - * `:duration` - The time spent configuring the cluster. The measurement - is given in the `:native` time unit. You can read more about it in the - docs for `System.convert_time_unit/3`. - - A Telemetry `:metadata` map including the following fields: - - * `:adapter_meta` - The adapter metadata. - * `:pid` - The configuration manager PID. - * `:status` - The cluster setup status. If the cluster was configured - successfully, the status will be set to `:ok`, otherwise, will be - set to `:error`. - * `:error` - The status reason. When the status is `:ok`, the reason is - `:succeeded`, otherwise, it is the error reason. - - * `telemetry_prefix ++ [:config_manager, :setup, :exception]` - This event - is specific to the `:redis_cluster` mode. When an exception is raised - while configuring the cluster, this event should be invoked. - - The `:measurements` map will include the following: - - * `:duration` - The time spent configuring the cluster. The measurement - is given in the `:native` time unit. You can read more about it in the - docs for `System.convert_time_unit/3`. - - A Telemetry `:metadata` map including the following fields: - - * `:adapter_meta` - The adapter metadata. - * `:pid` - The configuration manager PID. - * `:kind` - The type of the error: `:error`, `:exit`, or `:throw`. - * `:reason` - The reason of the error. - * `:stacktrace` - The stacktrace. - - """ - - # Provide Cache Implementation - @behaviour Nebulex.Adapter - @behaviour Nebulex.Adapter.Entry - @behaviour Nebulex.Adapter.Queryable - - # Inherit default stats implementation - use Nebulex.Adapter.Stats - - # Inherit default serializer implementation - use NebulexRedisAdapter.Serializer - - import Nebulex.Adapter - import Nebulex.Helpers - - alias Nebulex.Adapter - alias Nebulex.Adapter.Stats - - alias NebulexRedisAdapter.{ - ClientCluster, - Command, - Connection, - Options, - RedisCluster - } - - ## Nebulex.Adapter - - @impl true - defmacro __before_compile__(_env) do - quote do - @doc """ - A convenience function for executing a Redis command. - """ - def command(command, opts \\ []) do - {name, key, opts} = pop_cache_name_and_key(opts) - - Adapter.with_meta(name, fn _, meta -> - Command.exec(meta, command, key, opts) - end) - end - - @doc """ - A convenience function for executing a Redis command, - but raises an exception if an error occurs. - """ - def command!(command, opts \\ []) do - {name, key, opts} = pop_cache_name_and_key(opts) - - Adapter.with_meta(name, fn _, meta -> - Command.exec!(meta, command, key, opts) - end) - end - - @doc """ - A convenience function for executing a Redis pipeline. - """ - def pipeline(commands, opts \\ []) do - {name, key, opts} = pop_cache_name_and_key(opts) - - Adapter.with_meta(name, fn _, meta -> - Command.pipeline(meta, commands, key, opts) - end) - end - - @doc """ - A convenience function for executing a Redis pipeline, - but raises an exception if an error occurs. - """ - def pipeline!(commands, opts \\ []) do - {name, key, opts} = pop_cache_name_and_key(opts) - - Adapter.with_meta(name, fn _, meta -> - Command.pipeline!(meta, commands, key, opts) - end) - end - - defp pop_cache_name_and_key(opts) do - {name, opts} = Keyword.pop(opts, :name, __MODULE__) - {key, opts} = Keyword.pop(opts, :key) - - {name, key, opts} - end - end - end - - @impl true - def init(opts) do - # Required cache name - name = opts[:name] || Keyword.fetch!(opts, :cache) - - # Init stats - stats_counter = Stats.init(opts) - - # Validate options - opts = Options.validate_start_opts!(opts) - - # Adapter mode - mode = Keyword.fetch!(opts, :mode) - - # Local registry - registry = normalize_module_name([name, Registry]) - - # Redis serializer for encoding/decoding keys and values - serializer_meta = assert_serializer!(opts) - - # Resolve the pool size - pool_size = Keyword.get_lazy(opts, :pool_size, fn -> System.schedulers_online() end) - - # Init adapter metadata - adapter_meta = - %{ - cache_pid: self(), - name: name, - mode: mode, - pool_size: pool_size, - stats_counter: stats_counter, - registry: registry, - started_at: DateTime.utc_now(), - default_dt: Keyword.get(opts, :default_data_type, :object), - telemetry: Keyword.fetch!(opts, :telemetry), - telemetry_prefix: Keyword.fetch!(opts, :telemetry_prefix) - } - |> Map.merge(serializer_meta) - - # Init the connections child spec according to the adapter mode - {conn_child_spec, adapter_meta} = do_init(adapter_meta, opts) - - # Build the child spec - child_spec = - Nebulex.Adapters.Supervisor.child_spec( - name: normalize_module_name([name, Supervisor]), - strategy: :one_for_all, - children: [ - {NebulexRedisAdapter.BootstrapServer, adapter_meta}, - {Registry, name: registry, keys: :unique}, - conn_child_spec - ] - ) - - {:ok, child_spec, adapter_meta} - end - - defp assert_serializer!(opts) do - serializer = Keyword.get(opts, :serializer, __MODULE__) - serializer_opts = Keyword.fetch!(opts, :serializer_opts) - - %{ - serializer: serializer, - encode_key_opts: Keyword.fetch!(serializer_opts, :encode_key), - encode_value_opts: Keyword.fetch!(serializer_opts, :encode_value), - decode_key_opts: Keyword.fetch!(serializer_opts, :decode_key), - decode_value_opts: Keyword.fetch!(serializer_opts, :decode_value) - } - end - - defp do_init(%{mode: :standalone} = adapter_meta, opts) do - Connection.init(adapter_meta, opts) - end - - defp do_init(%{mode: :redis_cluster} = adapter_meta, opts) do - RedisCluster.init(adapter_meta, opts) - end - - defp do_init(%{mode: :client_side_cluster} = adapter_meta, opts) do - ClientCluster.init(adapter_meta, opts) - end - - ## Nebulex.Adapter.Entry - - @impl true - defspan get(adapter_meta, key, opts) do - %{ - serializer: serializer, - encode_key_opts: enc_key_opts, - decode_value_opts: dec_value_opts - } = adapter_meta - - adapter_meta - |> Command.exec!(["GET", serializer.encode_key(key, enc_key_opts)], key, opts) - |> serializer.decode_value(dec_value_opts) - end - - @impl true - defspan get_all(adapter_meta, keys, opts) do - do_get_all(adapter_meta, keys, opts) - end - - defp do_get_all(%{mode: :standalone} = adapter_meta, keys, opts) do - mget(nil, adapter_meta, keys, opts) - end - - defp do_get_all(adapter_meta, keys, opts) do - keys - |> group_keys_by_hash_slot(adapter_meta, :keys) - |> Enum.reduce(%{}, fn {hash_slot, keys}, acc -> - return = mget(hash_slot, adapter_meta, keys, opts) - - Map.merge(acc, return) - end) - end - - defp mget( - hash_slot_key, - %{ - serializer: serializer, - encode_key_opts: enc_key_opts, - decode_value_opts: dec_value_opts - } = adapter_meta, - keys, - opts - ) do - adapter_meta - |> Command.exec!( - ["MGET" | Enum.map(keys, &serializer.encode_key(&1, enc_key_opts))], - hash_slot_key, - opts - ) - |> Enum.reduce({keys, %{}}, fn - nil, {[_key | keys], acc} -> - {keys, acc} - - value, {[key | keys], acc} -> - {keys, Map.put(acc, key, serializer.decode_value(value, dec_value_opts))} - end) - |> elem(1) - end - - @impl true - defspan put(adapter_meta, key, value, ttl, on_write, opts) do - %{ - serializer: serializer, - encode_key_opts: enc_key_opts, - encode_value_opts: enc_value_opts - } = adapter_meta - - redis_k = serializer.encode_key(key, enc_key_opts) - redis_v = serializer.encode_value(value, enc_value_opts) - cmd_opts = cmd_opts(action: on_write, ttl: fix_ttl(ttl)) - - case Command.exec!(adapter_meta, ["SET", redis_k, redis_v | cmd_opts], key, opts) do - "OK" -> true - nil -> false - end - end - - @impl true - defspan put_all(adapter_meta, entries, ttl, on_write, opts) do - ttl = fix_ttl(ttl) - - case adapter_meta.mode do - :standalone -> - do_put_all(adapter_meta, nil, entries, ttl, on_write, opts) - - _ -> - entries - |> group_keys_by_hash_slot(adapter_meta, :tuples) - |> Enum.reduce(:ok, fn {hash_slot, group}, acc -> - acc && do_put_all(adapter_meta, hash_slot, group, ttl, on_write, opts) - end) - end - end - - defp do_put_all( - %{ - serializer: serializer, - encode_key_opts: enc_key_opts, - encode_value_opts: enc_value_opts - } = adapter_meta, - hash_slot, - entries, - ttl, - on_write, - opts - ) do - cmd = - case on_write do - :put -> "MSET" - :put_new -> "MSETNX" - end - - {mset, expire} = - Enum.reduce(entries, {[cmd], []}, fn {key, val}, {acc1, acc2} -> - redis_k = serializer.encode_key(key, enc_key_opts) - - acc2 = - if is_integer(ttl), - do: [["EXPIRE", redis_k, ttl] | acc2], - else: acc2 - - {[serializer.encode_value(val, enc_value_opts), redis_k | acc1], acc2} - end) - - adapter_meta - |> Command.pipeline!([Enum.reverse(mset) | expire], hash_slot, opts) - |> hd() - |> case do - "OK" -> :ok - 1 -> true - 0 -> false - end - end - - @impl true - defspan delete(adapter_meta, key, opts) do - _ = Command.exec!(adapter_meta, ["DEL", enc_key(adapter_meta, key)], key, opts) - - :ok - end - - @impl true - defspan take(adapter_meta, key, opts) do - redis_k = enc_key(adapter_meta, key) - - with_pipeline(adapter_meta, key, [["GET", redis_k], ["DEL", redis_k]], opts) - end - - @impl true - defspan has_key?(adapter_meta, key) do - case Command.exec!(adapter_meta, ["EXISTS", enc_key(adapter_meta, key)], key) do - 1 -> true - 0 -> false - end - end - - @impl true - defspan ttl(adapter_meta, key) do - case Command.exec!(adapter_meta, ["TTL", enc_key(adapter_meta, key)], key) do - -1 -> :infinity - -2 -> nil - ttl -> ttl * 1000 - end - end - - @impl true - defspan expire(adapter_meta, key, ttl) do - do_expire(adapter_meta, key, ttl) - end - - defp do_expire(adapter_meta, key, :infinity) do - redis_k = enc_key(adapter_meta, key) - - case Command.pipeline!(adapter_meta, [["TTL", redis_k], ["PERSIST", redis_k]], key) do - [-2, 0] -> false - [_, _] -> true - end - end - - defp do_expire(adapter_meta, key, ttl) do - redis_k = enc_key(adapter_meta, key) - - case Command.exec!(adapter_meta, ["EXPIRE", redis_k, fix_ttl(ttl)], key) do - 1 -> true - 0 -> false - end - end - - @impl true - defspan touch(adapter_meta, key) do - redis_k = enc_key(adapter_meta, key) - - case Command.exec!(adapter_meta, ["TOUCH", redis_k], key) do - 1 -> true - 0 -> false - end - end - - @impl true - defspan update_counter(adapter_meta, key, incr, ttl, default, opts) do - do_update_counter(adapter_meta, key, incr, ttl, default, opts) - end - - defp do_update_counter(adapter_meta, key, incr, :infinity, default, opts) do - redis_k = enc_key(adapter_meta, key) - - adapter_meta - |> maybe_incr_default(key, redis_k, default, opts) - |> Command.exec!(["INCRBY", redis_k, incr], key, opts) - end - - defp do_update_counter(adapter_meta, key, incr, ttl, default, opts) do - redis_k = enc_key(adapter_meta, key) - - adapter_meta - |> maybe_incr_default(key, redis_k, default, opts) - |> Command.pipeline!( - [["INCRBY", redis_k, incr], ["EXPIRE", redis_k, fix_ttl(ttl)]], - key, - opts - ) - |> hd() - end - - defp maybe_incr_default(adapter_meta, key, redis_k, default, opts) - when is_integer(default) and default > 0 do - case Command.exec!(adapter_meta, ["EXISTS", redis_k], key, opts) do - 1 -> - adapter_meta - - 0 -> - _ = Command.exec!(adapter_meta, ["INCRBY", redis_k, default], key, opts) - - adapter_meta - end - end - - defp maybe_incr_default(adapter_meta, _, _, _, _) do - adapter_meta - end - - ## Nebulex.Adapter.Queryable - - @impl true - defspan execute(adapter_meta, operation, query, opts) do - do_execute(adapter_meta, operation, query, opts) - end - - defp do_execute(%{mode: mode} = adapter_meta, :count_all, nil, opts) do - exec!(mode, [adapter_meta, ["DBSIZE"], opts], [0, &Kernel.+(&2, &1)]) - end - - defp do_execute(%{mode: mode} = adapter_meta, :delete_all, nil, opts) do - size = exec!(mode, [adapter_meta, ["DBSIZE"], opts], [0, &Kernel.+(&2, &1)]) - _ = exec!(mode, [adapter_meta, ["FLUSHDB"], opts], []) - - size - end - - defp do_execute(%{mode: :standalone} = adapter_meta, :delete_all, {:in, keys}, opts) - when is_list(keys) do - _ = Command.exec!(adapter_meta, ["DEL" | Enum.map(keys, &enc_key(adapter_meta, &1))], opts) - - length(keys) - end - - defp do_execute(adapter_meta, :delete_all, {:in, keys}, opts) - when is_list(keys) do - :ok = - keys - |> group_keys_by_hash_slot(adapter_meta, :keys) - |> Enum.each(fn {hash_slot, keys_group} -> - Command.exec!( - adapter_meta, - ["DEL" | Enum.map(keys_group, &enc_key(adapter_meta, &1))], - hash_slot, - opts - ) - end) - - length(keys) - end - - defp do_execute(adapter_meta, :all, query, opts) do - execute_query(query, adapter_meta, opts) - end - - @impl true - defspan stream(adapter_meta, query, opts) do - Stream.resource( - fn -> - execute_query(query, adapter_meta, opts) - end, - fn - [] -> {:halt, []} - elems -> {elems, []} - end, - & &1 - ) - end - - @impl Nebulex.Adapter.Stats - def stats(%{started_at: started_at} = adapter_meta) do - if stats = super(adapter_meta) do - %{stats | metadata: Map.put(stats.metadata, :started_at, started_at)} - end - end - - ## Private Functions - - defp with_pipeline( - %{serializer: serializer, decode_value_opts: dec_val_opts} = adapter_meta, - key, - pipeline, - opts - ) do - adapter_meta - |> Command.pipeline!(pipeline, key, opts) - |> hd() - |> serializer.decode_value(dec_val_opts) - end - - defp cmd_opts(keys), do: Enum.reduce(keys, [], &cmd_opts/2) - - defp cmd_opts({:action, :put}, acc), do: acc - defp cmd_opts({:action, :put_new}, acc), do: ["NX" | acc] - defp cmd_opts({:action, :replace}, acc), do: ["XX" | acc] - defp cmd_opts({:ttl, :infinity}, acc), do: acc - defp cmd_opts({:ttl, ttl}, acc), do: ["EX", "#{ttl}" | acc] - - defp fix_ttl(:infinity), do: :infinity - defp fix_ttl(ttl) when is_integer(ttl) and ttl >= 1000, do: div(ttl, 1000) - - defp fix_ttl(ttl) do - raise ArgumentError, - "expected ttl: to be an integer >= 1000 or :infinity, got: #{inspect(ttl)}" - end - - defp execute_query(nil, %{serializer: serializer} = adapter_meta, opts) do - "*" - |> execute_query(adapter_meta, opts) - |> Enum.map(&serializer.decode_key/1) - end - - defp execute_query(pattern, %{mode: mode} = adapter_meta, opts) when is_binary(pattern) do - exec!(mode, [adapter_meta, ["KEYS", pattern], opts], [[], &Kernel.++(&1, &2)]) - end - - defp execute_query(pattern, _adapter_meta, _opts) do - raise Nebulex.QueryError, message: "invalid pattern", query: pattern - end - - defp exec!(:standalone, args, _extra_args) do - apply(Command, :exec!, args) - end - - defp exec!(:client_side_cluster, args, extra_args) do - apply(ClientCluster, :exec!, args ++ extra_args) - end - - defp exec!(:redis_cluster, args, extra_args) do - apply(RedisCluster, :exec!, args ++ extra_args) - end - - defp group_keys_by_hash_slot( - enum, - %{ - mode: :client_side_cluster, - nodes: nodes, - keyslot: keyslot - }, - enum_type - ) do - ClientCluster.group_keys_by_hash_slot(enum, nodes, keyslot, enum_type) - end - - defp group_keys_by_hash_slot(enum, %{mode: :redis_cluster, keyslot: keyslot}, enum_type) do - RedisCluster.group_keys_by_hash_slot(enum, keyslot, enum_type) - end - - defp enc_key(%{serializer: serializer, encode_key_opts: enc_key_opts}, key) do - serializer.encode_key(key, enc_key_opts) - end -end diff --git a/lib/nebulex_redis_adapter/bootstrap_server.ex b/lib/nebulex_redis_adapter/bootstrap_server.ex deleted file mode 100644 index f5ee7dd..0000000 --- a/lib/nebulex_redis_adapter/bootstrap_server.ex +++ /dev/null @@ -1,61 +0,0 @@ -defmodule NebulexRedisAdapter.BootstrapServer do - @moduledoc """ - This server takes care of initialization and cleanup jobs. For example, - attaching the stats handler when the cache starts and detaching it when - it terminates. - """ - use GenServer - - import Nebulex.Helpers - - alias Nebulex.Telemetry - alias Nebulex.Telemetry.StatsHandler - alias NebulexRedisAdapter.RedisCluster - - ## API - - @spec start_link(Nebulex.Adapter.adapter_meta()) :: GenServer.on_start() - def start_link(adapter_meta) do - name = normalize_module_name([Map.fetch!(adapter_meta, :name), BootstrapServer]) - - GenServer.start_link(__MODULE__, adapter_meta, name: name) - end - - ## GenServer Callbacks - - @impl true - def init(adapter_meta) do - _ = Process.flag(:trap_exit, true) - - {:ok, adapter_meta, {:continue, :attach_stats_handler}} - end - - @impl true - def handle_continue(:attach_stats_handler, adapter_meta) do - _ = maybe_attach_stats_handler(adapter_meta) - - {:noreply, adapter_meta} - end - - @impl true - def terminate(_reason, adapter_meta) do - _ = if ref = adapter_meta.stats_counter, do: Telemetry.detach(ref) - - if adapter_meta.mode == :redis_cluster do - RedisCluster.del_status_key(adapter_meta.name) - end - end - - ## Private Functions - - defp maybe_attach_stats_handler(adapter_meta) do - if ref = adapter_meta.stats_counter do - Telemetry.attach_many( - ref, - [adapter_meta.telemetry_prefix ++ [:command, :stop]], - &StatsHandler.handle_event/4, - ref - ) - end - end -end diff --git a/lib/nebulex_redis_adapter/client_cluster.ex b/lib/nebulex_redis_adapter/client_cluster.ex deleted file mode 100644 index df331ca..0000000 --- a/lib/nebulex_redis_adapter/client_cluster.ex +++ /dev/null @@ -1,130 +0,0 @@ -defmodule NebulexRedisAdapter.ClientCluster do - # Client-side Cluster - @moduledoc false - - import NebulexRedisAdapter.Helpers - - alias NebulexRedisAdapter.ClientCluster.Supervisor, as: ClientClusterSupervisor - alias NebulexRedisAdapter.{Options, Pool} - - @typedoc "Proxy type to the adapter meta" - @type adapter_meta :: Nebulex.Adapter.metadata() - - @type hash_slot :: {:"$hash_slot", term} - @type node_entry :: {node_name :: atom, pool_size :: pos_integer} - @type nodes_config :: [node_entry] - - ## API - - @spec init(adapter_meta, Keyword.t()) :: {Supervisor.child_spec(), adapter_meta} - def init(%{name: name, registry: registry, pool_size: pool_size} = adapter_meta, opts) do - cluster_opts = Keyword.get(opts, :client_side_cluster) - - # Ensure :client_side_cluster is provided - if is_nil(cluster_opts) do - raise ArgumentError, - Options.invalid_cluster_config_error( - "invalid value for :client_side_cluster option: ", - nil, - :client_side_cluster - ) - end - - {node_connections_specs, nodes} = - cluster_opts - |> Keyword.fetch!(:nodes) - |> Enum.reduce({[], []}, fn {node_name, node_opts}, {acc1, acc2} -> - node_opts = - node_opts - |> Keyword.put(:name, name) - |> Keyword.put(:registry, registry) - |> Keyword.put(:node, node_name) - |> Keyword.put_new(:pool_size, pool_size) - - child_spec = - Supervisor.child_spec({ClientClusterSupervisor, node_opts}, - type: :supervisor, - id: {name, node_name} - ) - - {[child_spec | acc1], [{node_name, Keyword.fetch!(node_opts, :pool_size)} | acc2]} - end) - - node_connections_supervisor_spec = %{ - id: :node_connections_supervisor, - type: :supervisor, - start: {Supervisor, :start_link, [node_connections_specs, [strategy: :one_for_one]]} - } - - # Update adapter meta - adapter_meta = - Map.merge(adapter_meta, %{ - nodes: nodes, - keyslot: Keyword.fetch!(cluster_opts, :keyslot) - }) - - {node_connections_supervisor_spec, adapter_meta} - end - - @spec exec!( - Nebulex.Adapter.adapter_meta(), - Redix.command(), - Keyword.t(), - init_acc :: any, - reducer :: (any, any -> any) - ) :: any | no_return - def exec!( - %{name: name, registry: registry, nodes: nodes}, - command, - opts, - init_acc \\ nil, - reducer \\ fn res, _ -> res end - ) do - Enum.reduce(nodes, init_acc, fn {node_name, pool_size}, acc -> - registry - |> Pool.get_conn({name, node_name}, pool_size) - |> Redix.command!(command, redis_command_opts(opts)) - |> reducer.(acc) - end) - end - - @spec get_conn(atom, atom, nodes_config, atom) :: pid - def get_conn(registry, name, nodes, node_name) do - pool_size = Keyword.fetch!(nodes, node_name) - - Pool.get_conn(registry, {name, node_name}, pool_size) - end - - @spec get_conn(atom, atom, nodes_config, term, module) :: pid - def get_conn(registry, name, nodes, key, module) do - {node_name, pool_size} = get_node(module, nodes, key) - - Pool.get_conn(registry, {name, node_name}, pool_size) - end - - @spec group_keys_by_hash_slot(Enum.t(), nodes_config, module, atom()) :: map - def group_keys_by_hash_slot(enum, nodes, module, :keys) do - Enum.group_by(enum, &hash_slot(module, &1, nodes)) - end - - def group_keys_by_hash_slot(enum, nodes, module, :tuples) do - Enum.group_by(enum, fn {key, _} -> hash_slot(module, key, nodes) end) - end - - ## Private Functions - - defp get_node(module, nodes, key) do - index = module.hash_slot(key, length(nodes)) - - Enum.at(nodes, index) - end - - defp hash_slot(module, key, nodes) do - node = - module - |> get_node(nodes, key) - |> elem(0) - - {:"$hash_slot", node} - end -end diff --git a/lib/nebulex_redis_adapter/client_cluster/keyslot.ex b/lib/nebulex_redis_adapter/client_cluster/keyslot.ex deleted file mode 100644 index 03630fa..0000000 --- a/lib/nebulex_redis_adapter/client_cluster/keyslot.ex +++ /dev/null @@ -1,15 +0,0 @@ -defmodule NebulexRedisAdapter.ClientCluster.Keyslot do - @moduledoc """ - Default Keyslot implementation when `:jchash` module is loaded. - """ - use Nebulex.Adapter.Keyslot - - if Code.ensure_loaded?(:jchash) do - @impl true - def hash_slot(key, range) do - key - |> :erlang.phash2() - |> :jchash.compute(range) - end - end -end diff --git a/lib/nebulex_redis_adapter/command.ex b/lib/nebulex_redis_adapter/command.ex deleted file mode 100644 index f507c11..0000000 --- a/lib/nebulex_redis_adapter/command.ex +++ /dev/null @@ -1,181 +0,0 @@ -defmodule NebulexRedisAdapter.Command do - # Redix command executor - @moduledoc false - - import NebulexRedisAdapter.Helpers - - alias NebulexRedisAdapter.{ - ClientCluster, - Pool, - RedisCluster, - RedisCluster.ConfigManager - } - - ## API - - @doc """ - Executes a Redis command. - """ - @spec exec( - Nebulex.Adapter.adapter_meta(), - Redix.command(), - Nebulex.Cache.key(), - Keyword.t() - ) :: {:ok, term} | {:error, term} - def exec(adapter_meta, command, key \\ nil, opts \\ []) do - # TODO: Handle errors; especially for :redis_cluster mode - adapter_meta - |> conn(key, opts) - |> Redix.command(command, redis_command_opts(opts)) - end - - @doc """ - Executes a Redis command, but raises an exception if an error occurs. - """ - @spec exec!( - Nebulex.Adapter.adapter_meta(), - Redix.command(), - Nebulex.Cache.key(), - Keyword.t() - ) :: term - def exec!(adapter_meta, command, key \\ nil, opts \\ []) - - def exec!(%{mode: :redis_cluster, name: name} = adapter_meta, command, key, opts) do - on_moved = fn -> - # Re-configure the cluster - :ok = ConfigManager.setup_shards(name) - - # Retry once more - do_exec!(adapter_meta, command, key, opts, nil) - end - - do_exec!(adapter_meta, command, key, opts, on_moved) - end - - def exec!(adapter_meta, command, key, opts) do - do_exec!(adapter_meta, command, key, opts, nil) - end - - defp do_exec!(adapter_meta, command, key, opts, on_moved) do - adapter_meta - |> conn(key, opts) - |> Redix.command(command, redis_command_opts(opts)) - |> handle_command_response(on_moved) - end - - @doc """ - Executes a Redis pipeline. - """ - @spec pipeline( - Nebulex.Adapter.adapter_meta(), - [Redix.command()], - Nebulex.Cache.key(), - Keyword.t() - ) :: {:ok, [term]} | {:error, term} - def pipeline(adapter_meta, commands, key \\ nil, opts \\ []) do - # TODO: Handle errors; especially for :redis_cluster mode - adapter_meta - |> conn(key, opts) - |> Redix.pipeline(commands, redis_command_opts(opts)) - end - - @doc """ - Executes a Redis pipeline, but raises an exception if an error occurs. - """ - @spec pipeline!( - Nebulex.Adapter.adapter_meta(), - [Redix.command()], - Nebulex.Cache.key(), - Keyword.t() - ) :: [term] - def pipeline!(adapter_meta, commands, key \\ nil, opts \\ []) - - def pipeline!(%{mode: :redis_cluster, name: name} = adapter_meta, commands, key, opts) do - on_moved = fn -> - # Re-configure the cluster - :ok = ConfigManager.setup_shards(name) - - # Retry once more - do_pipeline!(adapter_meta, commands, key, opts, nil) - end - - do_pipeline!(adapter_meta, commands, key, opts, on_moved) - end - - def pipeline!(adapter_meta, commands, key, opts) do - do_pipeline!(adapter_meta, commands, key, opts, nil) - end - - defp do_pipeline!(adapter_meta, commands, key, opts, on_moved) do - adapter_meta - |> conn(key, opts) - |> Redix.pipeline(commands, redis_command_opts(opts)) - |> handle_command_response(on_moved) - |> check_pipeline_errors(on_moved) - end - - ## Private Functions - - defp conn( - %{mode: :standalone, name: name, registry: registry, pool_size: pool_size}, - _key, - _opts - ) do - Pool.get_conn(registry, name, pool_size) - end - - defp conn(%{mode: :redis_cluster, name: name} = meta, key, opts) do - with nil <- RedisCluster.get_conn(meta, key, opts) do - # Perhars the cluster has to be re-configured again - :ok = ConfigManager.setup_shards(name) - - # Retry once more - RedisCluster.get_conn(meta, key, opts) - end - end - - defp conn( - %{mode: :client_side_cluster, name: name, registry: registry, nodes: nodes}, - {:"$hash_slot", node_name}, - _opts - ) do - ClientCluster.get_conn(registry, name, nodes, node_name) - end - - defp conn( - %{ - mode: :client_side_cluster, - name: name, - registry: registry, - nodes: nodes, - keyslot: keyslot - }, - key, - _opts - ) do - ClientCluster.get_conn(registry, name, nodes, key, keyslot) - end - - defp handle_command_response({:ok, response}, _on_moved) do - response - end - - defp handle_command_response({:error, %Redix.Error{message: "MOVED" <> _}}, on_moved) - when is_function(on_moved) do - on_moved.() - end - - defp handle_command_response({:error, reason}, _on_moved) do - raise reason - end - - defp check_pipeline_errors(results, on_moved) do - Enum.map(results, fn - %Redix.Error{} = error -> - handle_command_response({:error, error}, on_moved) - - result -> - result - end) - end -end diff --git a/lib/nebulex_redis_adapter/exceptions.ex b/lib/nebulex_redis_adapter/exceptions.ex deleted file mode 100644 index 580fbbe..0000000 --- a/lib/nebulex_redis_adapter/exceptions.ex +++ /dev/null @@ -1,45 +0,0 @@ -defmodule NebulexRedisAdapter.Error do - @moduledoc """ - NebulexRedisAdapter error. - """ - - @typedoc "Error reason type" - @type reason :: :atom | {:atom, term} - - @typedoc "Error type" - @type t :: %__MODULE__{reason: reason, cache: atom} - - # Exception struct - defexception reason: nil, cache: nil - - ## API - - @doc false - def exception(opts) do - reason = Keyword.fetch!(opts, :reason) - cache = Keyword.fetch!(opts, :cache) - - %__MODULE__{reason: reason, cache: cache} - end - - @doc false - def message(%__MODULE__{reason: reason, cache: cache}) do - format_error(reason, cache) - end - - ## Helpers - - def format_error(:redis_cluster_status_error, cache) do - "Could not run the command because Redis Cluster is in error status " <> - "for cache #{inspect(cache)}." - end - - def format_error({:redis_cluster_setup_error, reason}, cache) when is_exception(reason) do - "Could not setup Redis Cluster for cache #{inspect(cache)}. " <> Exception.message(reason) - end - - def format_error({:redis_cluster_setup_error, reason}, cache) do - "Could not setup Redis Cluster for cache #{inspect(cache)}. " <> - "Failed with error #{inspect(reason)}." - end -end diff --git a/lib/nebulex_redis_adapter/pool.ex b/lib/nebulex_redis_adapter/pool.ex deleted file mode 100644 index 195e41f..0000000 --- a/lib/nebulex_redis_adapter/pool.ex +++ /dev/null @@ -1,23 +0,0 @@ -defmodule NebulexRedisAdapter.Pool do - @moduledoc false - - ## API - - @spec register_names(atom, term, pos_integer, ({:via, module, term} -> term)) :: [term] - def register_names(registry, key, pool_size, fun) do - for index <- 0..(pool_size - 1) do - fun.({:via, Registry, {registry, {key, index}}}) - end - end - - @spec get_conn(atom, term, pos_integer) :: pid - def get_conn(registry, key, pool_size) do - # Ensure selecting the same connection based on the caller PID - index = :erlang.phash2(self(), pool_size) - - registry - |> Registry.lookup({key, index}) - |> hd() - |> elem(0) - end -end diff --git a/lib/nebulex_redis_adapter/redis_cluster.ex b/lib/nebulex_redis_adapter/redis_cluster.ex deleted file mode 100644 index 949f899..0000000 --- a/lib/nebulex_redis_adapter/redis_cluster.ex +++ /dev/null @@ -1,199 +0,0 @@ -defmodule NebulexRedisAdapter.RedisCluster do - # Redis Cluster Manager - @moduledoc false - - import NebulexRedisAdapter.Helpers - - alias NebulexRedisAdapter.{Options, Pool} - alias NebulexRedisAdapter.RedisCluster.Keyslot, as: RedisClusterKeyslot - - @typedoc "Proxy type to the adapter meta" - @type adapter_meta :: Nebulex.Adapter.adapter_meta() - - # Redis cluster hash slots size - @redis_cluster_hash_slots 16_384 - - ## API - - @spec init(adapter_meta, Keyword.t()) :: {Supervisor.child_spec(), adapter_meta} - def init(%{name: name} = adapter_meta, opts) do - # Ensure :redis_cluster is provided - if is_nil(Keyword.get(opts, :redis_cluster)) do - raise ArgumentError, - Options.invalid_cluster_config_error( - "invalid value for :redis_cluster option: ", - nil, - :redis_cluster - ) - end - - # Init ETS table to store the hash slot map - cluster_shards_tab = init_hash_slot_map_table(name) - - # Update adapter meta - adapter_meta = - Map.merge(adapter_meta, %{ - cluster_shards_tab: cluster_shards_tab, - keyslot: get_keyslot(opts) - }) - - children = [ - {NebulexRedisAdapter.RedisCluster.DynamicSupervisor, {adapter_meta, opts}}, - {NebulexRedisAdapter.RedisCluster.ConfigManager, {adapter_meta, opts}} - ] - - cluster_shards_supervisor_spec = %{ - id: {name, RedisClusterSupervisor}, - type: :supervisor, - start: {Supervisor, :start_link, [children, [strategy: :rest_for_one]]} - } - - {cluster_shards_supervisor_spec, adapter_meta} - end - - @spec exec!(adapter_meta, Redix.command(), Keyword.t(), init_acc :: any, (any, any -> any)) :: - any - def exec!( - %{ - name: name, - cluster_shards_tab: cluster_shards_tab, - registry: registry, - pool_size: pool_size - }, - command, - opts, - init_acc \\ nil, - reducer \\ fn res, _ -> res end - ) do - with_retry(name, Keyword.get(opts, :lock_retries, :infinity), fn -> - cluster_shards_tab - |> :ets.lookup(:cluster_shards) - |> Enum.reduce(init_acc, fn slot_id, acc -> - registry - |> Pool.get_conn(slot_id, pool_size) - |> Redix.command!(command, redis_command_opts(opts)) - |> reducer.(acc) - end) - end) - end - - @spec get_conn(adapter_meta, {:"$hash_slot", any} | any, Keyword.t()) :: pid | nil - def get_conn( - %{ - name: name, - keyslot: keyslot, - cluster_shards_tab: cluster_shards_tab, - registry: registry, - pool_size: pool_size - }, - key, - opts - ) do - with_retry(name, Keyword.get(opts, :lock_retries, :infinity), fn -> - {:"$hash_slot", hash_slot} = - case key do - {:"$hash_slot", _} -> - key - - _ -> - hash_slot(key, keyslot) - end - - cluster_shards_tab - |> :ets.lookup(:cluster_shards) - |> Enum.reduce_while(nil, fn - {_, start, stop} = slot_id, _acc when hash_slot >= start and hash_slot <= stop -> - {:halt, Pool.get_conn(registry, slot_id, pool_size)} - - _, acc -> - {:cont, acc} - end) - end) - end - - @spec group_keys_by_hash_slot(Enum.t(), module, atom()) :: map - def group_keys_by_hash_slot(enum, keyslot, :keys) do - Enum.group_by(enum, &hash_slot(&1, keyslot)) - end - - def group_keys_by_hash_slot(enum, keyslot, :tuples) do - Enum.group_by(enum, fn {key, _} -> hash_slot(key, keyslot) end) - end - - @spec hash_slot(any, module) :: {:"$hash_slot", pos_integer} - def hash_slot(key, keyslot \\ RedisClusterKeyslot) do - {:"$hash_slot", keyslot.hash_slot(key, @redis_cluster_hash_slots)} - end - - @spec get_status(atom, atom) :: atom - def get_status(name, default \\ :locked) when is_atom(name) and is_atom(default) do - name - |> status_key() - |> :persistent_term.get(default) - end - - @spec put_status(atom, atom) :: :ok - def put_status(name, status) when is_atom(name) and is_atom(status) do - # An atom is a single word so this does not trigger a global GC - name - |> status_key() - |> :persistent_term.put(status) - end - - @spec del_status_key(atom) :: boolean - def del_status_key(name) when is_atom(name) do - # An atom is a single word so this does not trigger a global GC - name - |> status_key() - |> :persistent_term.erase() - end - - @spec with_retry(atom, pos_integer, (-> term)) :: term - def with_retry(name, retries, fun) do - with_retry(name, fun, retries, 1) - end - - # coveralls-ignore-start - - defp with_retry(_name, fun, max_retries, retries) when retries >= max_retries do - fun.() - end - - # coveralls-ignore-stop - - defp with_retry(name, fun, max_retries, retries) do - case get_status(name) do - :ok -> - fun.() - - :locked -> - :ok = random_sleep(retries) - - with_retry(name, fun, max_retries, retries + 1) - - :error -> - raise NebulexRedisAdapter.Error, reason: :redis_cluster_status_error, cache: name - end - end - - ## Private Functions - - # Inline common instructions - @compile {:inline, status_key: 1} - - defp status_key(name), do: {name, :redis_cluster_status} - - defp init_hash_slot_map_table(name) do - :ets.new(name, [ - :public, - :duplicate_bag, - read_concurrency: true - ]) - end - - defp get_keyslot(opts) do - opts - |> Keyword.fetch!(:redis_cluster) - |> Keyword.fetch!(:keyslot) - end -end diff --git a/mix.exs b/mix.exs index dde15d1..f9359b3 100644 --- a/mix.exs +++ b/mix.exs @@ -1,22 +1,22 @@ -defmodule NebulexRedisAdapter.MixProject do +defmodule Nebulex.Adapters.Redis.MixProject do use Mix.Project @source_url "https://github.com/cabol/nebulex_redis_adapter" - @version "2.4.2" - @nbx_tag "2.6.4" - @nbx_vsn "2.6" + @version "3.0.0-dev" + # @nbx_tag "2.6.3" + # @nbx_vsn "2.6" def project do [ app: :nebulex_redis_adapter, version: @version, - elixir: "~> 1.12", + elixir: "~> 1.15", elixirc_paths: elixirc_paths(Mix.env()), aliases: aliases(), deps: deps(), # Docs - name: "NebulexRedisAdapter", + name: "Nebulex.Adapters.Redis", docs: docs(), # Testing @@ -52,30 +52,32 @@ defmodule NebulexRedisAdapter.MixProject do nebulex_dep(), {:redix, "~> 1.5"}, {:nimble_options, "~> 0.5 or ~> 1.0"}, + {:telemetry, "~> 0.4 or ~> 1.0"}, {:crc, "~> 0.10", optional: true}, {:jchash, "~> 0.1", optional: true}, - {:telemetry, "~> 0.4 or ~> 1.0", optional: true}, + {:ex_hash_ring, "~> 6.0", optional: true}, # Test & Code Analysis {:excoveralls, "~> 0.18", only: :test}, {:credo, "~> 1.7", only: [:dev, :test], runtime: false}, {:dialyxir, "~> 1.4", only: [:dev, :test], runtime: false}, - {:mimic, "~> 1.9", only: :test}, + {:mimic, "~> 1.10", only: :test}, + {:stream_data, "~> 1.1", only: [:dev, :test]}, # Benchmark Test {:benchee, "~> 1.3", only: :test}, {:benchee_html, "~> 1.0", only: :test}, # Docs - {:ex_doc, "~> 0.34", only: [:dev, :test], runtime: false} + {:ex_doc, "~> 0.35", only: [:dev, :test], runtime: false} ] end defp nebulex_dep do if path = System.get_env("NEBULEX_PATH") do - {:nebulex, "~> #{@nbx_tag}", path: path} + {:nebulex, path: path} else - {:nebulex, "~> #{@nbx_vsn}"} + {:nebulex, github: "cabol/nebulex", branch: "v3.0.0-dev"} end end @@ -83,7 +85,7 @@ defmodule NebulexRedisAdapter.MixProject do [ "nbx.setup": [ "cmd rm -rf nebulex", - "cmd git clone --depth 1 --branch v#{@nbx_tag} https://github.com/cabol/nebulex" + "cmd git clone --depth 1 --branch v3.0.0-dev https://github.com/cabol/nebulex" ], check: [ "compile --warnings-as-errors", @@ -106,7 +108,7 @@ defmodule NebulexRedisAdapter.MixProject do defp docs do [ - main: "NebulexRedisAdapter", + main: "Nebulex.Adapters.Redis", source_ref: "v#{@version}", canonical: "http://hexdocs.pm/nebulex_redis_adapter", source_url: @source_url diff --git a/mix.lock b/mix.lock index b9bf065..488fd9c 100644 --- a/mix.lock +++ b/mix.lock @@ -4,26 +4,28 @@ "benchee_json": {:hex, :benchee_json, "1.0.0", "cc661f4454d5995c08fe10dd1f2f72f229c8f0fb1c96f6b327a8c8fc96a91fe5", [:mix], [{:benchee, ">= 0.99.0 and < 2.0.0", [hex: :benchee, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "da05d813f9123505f870344d68fb7c86a4f0f9074df7d7b7e2bb011a63ec231c"}, "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"}, "crc": {:hex, :crc, "0.10.5", "ee12a7c056ac498ef2ea985ecdc9fa53c1bfb4e53a484d9f17ff94803707dfd8", [:mix, :rebar3], [{:elixir_make, "~> 0.6", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "3e673b6495a9525c5c641585af1accba59a1eb33de697bedf341e247012c2c7f"}, - "credo": {:hex, :credo, "1.7.8", "9722ba1681e973025908d542ec3d95db5f9c549251ba5b028e251ad8c24ab8c5", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "cb9e87cc64f152f3ed1c6e325e7b894dea8f5ef2e41123bd864e3cd5ceb44968"}, + "credo": {:hex, :credo, "1.7.10", "6e64fe59be8da5e30a1b96273b247b5cf1cc9e336b5fd66302a64b25749ad44d", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "71fbc9a6b8be21d993deca85bf151df023a3097b01e09a2809d460348561d8cd"}, "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, - "dialyxir": {:hex, :dialyxir, "1.4.4", "fb3ce8741edeaea59c9ae84d5cec75da00fa89fe401c72d6e047d11a61f65f70", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "cd6111e8017ccd563e65621a4d9a4a1c5cd333df30cebc7face8029cacb4eff6"}, + "dialyxir": {:hex, :dialyxir, "1.4.5", "ca1571ac18e0f88d4ab245f0b60fa31ff1b12cbae2b11bd25d207f865e8ae78a", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "b0fb08bb8107c750db5c0b324fa2df5ceaa0f9307690ee3c1f6ba5b9eb5d35c3"}, "earmark_parser": {:hex, :earmark_parser, "1.4.41", "ab34711c9dc6212dda44fcd20ecb87ac3f3fce6f0ca2f28d4a00e4154f8cd599", [:mix], [], "hexpm", "a81a04c7e34b6617c2792e291b5a2e57ab316365c2644ddc553bb9ed863ebefa"}, - "elixir_make": {:hex, :elixir_make, "0.8.4", "4960a03ce79081dee8fe119d80ad372c4e7badb84c493cc75983f9d3bc8bde0f", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:certifi, "~> 2.0", [hex: :certifi, repo: "hexpm", optional: true]}], "hexpm", "6e7f1d619b5f61dfabd0a20aa268e575572b542ac31723293a4c1a567d5ef040"}, + "elixir_make": {:hex, :elixir_make, "0.9.0", "6484b3cd8c0cee58f09f05ecaf1a140a8c97670671a6a0e7ab4dc326c3109726", [:mix], [], "hexpm", "db23d4fd8b757462ad02f8aa73431a426fe6671c80b200d9710caf3d1dd0ffdb"}, "erlex": {:hex, :erlex, "0.2.7", "810e8725f96ab74d17aac676e748627a07bc87eb950d2b83acd29dc047a30595", [:mix], [], "hexpm", "3ed95f79d1a844c3f6bf0cea61e0d5612a42ce56da9c03f01df538685365efb0"}, - "ex_doc": {:hex, :ex_doc, "0.34.2", "13eedf3844ccdce25cfd837b99bea9ad92c4e511233199440488d217c92571e8", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "5ce5f16b41208a50106afed3de6a2ed34f4acfd65715b82a0b84b49d995f95c1"}, + "ex_doc": {:hex, :ex_doc, "0.35.1", "de804c590d3df2d9d5b8aec77d758b00c814b356119b3d4455e4b8a8687aecaf", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "2121c6402c8d44b05622677b761371a759143b958c6c19f6558ff64d0aed40df"}, + "ex_hash_ring": {:hex, :ex_hash_ring, "6.0.4", "bef9d2d796afbbe25ab5b5a7ed746e06b99c76604f558113c273466d52fa6d6b", [:mix], [], "hexpm", "89adabf31f7d3dfaa36802ce598ce918e9b5b33bae8909ac1a4d052e1e567d18"}, "excoveralls": {:hex, :excoveralls, "0.18.3", "bca47a24d69a3179951f51f1db6d3ed63bca9017f476fe520eb78602d45f7756", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "746f404fcd09d5029f1b211739afb8fb8575d775b21f6a3908e7ce3e640724c6"}, "file_system": {:hex, :file_system, "1.0.1", "79e8ceaddb0416f8b8cd02a0127bdbababe7bf4a23d2a395b983c1f8b3f73edd", [:mix], [], "hexpm", "4414d1f38863ddf9120720cd976fce5bdde8e91d8283353f0e31850fa89feb9e"}, "ham": {:hex, :ham, "0.3.0", "7cd031b4a55fba219c11553e7b13ba73bd86eab4034518445eff1e038cb9a44d", [:mix], [], "hexpm", "7d6c6b73d7a6a83233876cc1b06a4d9b5de05562b228effda4532f9a49852bf6"}, "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, "jchash": {:hex, :jchash, "0.1.4", "996eaef8217764c5edb6c75bea87ec4e48534b5ba8ed233b5da1726583bbe348", [:rebar3], [], "hexpm", "f0d739cd75b2b9ff44b242b1912afb9fa44361102e76509098a0922bf4a511ed"}, - "makeup": {:hex, :makeup, "1.1.2", "9ba8837913bdf757787e71c1581c21f9d2455f4dd04cfca785c70bbfff1a76a3", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "cce1566b81fbcbd21eca8ffe808f33b221f9eee2cbc7a1706fc3da9ff18e6cac"}, - "makeup_elixir": {:hex, :makeup_elixir, "0.16.2", "627e84b8e8bf22e60a2579dad15067c755531fea049ae26ef1020cad58fe9578", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "41193978704763f6bbe6cc2758b84909e62984c7752b3784bd3c218bb341706b"}, + "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"}, + "makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"}, "makeup_erlang": {:hex, :makeup_erlang, "1.0.1", "c7f58c120b2b5aa5fd80d540a89fdf866ed42f1f3994e4fe189abebeab610839", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "8a89a1eeccc2d798d6ea15496a6e4870b75e014d1af514b1b71fa33134f57814"}, "mimic": {:hex, :mimic, "1.10.2", "0d7e67ba09b1e8fe21a61a91f4cb2b876151c2d7e1c9bf6fc325195dd33075dd", [:mix], [{:ham, "~> 0.2", [hex: :ham, repo: "hexpm", optional: false]}], "hexpm", "21a50eddbdee1e9bad93cb8738bd4e224913d0d25a06692d34fb19881dba7292"}, - "nebulex": {:hex, :nebulex, "2.6.4", "4b00706e0e676474783d988962abf74614480e13c0a32645acb89bb32b660e09", [:mix], [{:decorator, "~> 1.4", [hex: :decorator, repo: "hexpm", optional: true]}, {:shards, "~> 1.1", [hex: :shards, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "25bdabf3fb86035c8151bba60bda20f80f96ae0261db7bd4090878ff63b03581"}, + "nebulex": {:git, "https://github.com/cabol/nebulex.git", "c213a4599f69e491d2122b0dd8b0ff7ca1b6316e", [branch: "v3.0.0-dev"]}, "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"}, "nimble_parsec": {:hex, :nimble_parsec, "1.4.0", "51f9b613ea62cfa97b25ccc2c1b4216e81df970acd8e16e8d1bdc58fef21370d", [:mix], [], "hexpm", "9c565862810fb383e9838c1dd2d7d2c437b3d13b267414ba6af33e50d2d1cf28"}, "redix": {:hex, :redix, "1.5.2", "ab854435a663f01ce7b7847f42f5da067eea7a3a10c0a9d560fa52038fd7ab48", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:nimble_options, "~> 0.5.0 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "78538d184231a5d6912f20567d76a49d1be7d3fca0e1aaaa20f4df8e1142dcb8"}, "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, + "stream_data": {:hex, :stream_data, "1.1.2", "05499eaec0443349ff877aaabc6e194e82bda6799b9ce6aaa1aadac15a9fdb4d", [:mix], [], "hexpm", "129558d2c77cbc1eb2f4747acbbea79e181a5da51108457000020a906813a1a9"}, "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, } diff --git a/test/nebulex_redis_adapter/client_cluster_test.exs b/test/nebulex/adapters/redis/client_side_cluster_test.exs similarity index 55% rename from test/nebulex_redis_adapter/client_cluster_test.exs rename to test/nebulex/adapters/redis/client_side_cluster_test.exs index 97fa789..ebaf0b9 100644 --- a/test/nebulex_redis_adapter/client_cluster_test.exs +++ b/test/nebulex/adapters/redis/client_side_cluster_test.exs @@ -1,18 +1,20 @@ -defmodule NebulexRedisAdapter.ClientClusterTest do +defmodule Nebulex.Adapters.Redis.ClientSideClusterTest do use ExUnit.Case, async: true - use NebulexRedisAdapter.CacheTest + @moduletag capture_log: true - alias NebulexRedisAdapter.TestCache.ClientCluster, as: Cache + # Inherited tests + use Nebulex.Adapters.Redis.CacheTest + use Nebulex.CacheTestCase, except: [Nebulex.Cache.KVPropTest, Nebulex.Cache.TransactionTest] + + import Nebulex.CacheCase + + alias Nebulex.Adapters.Redis.TestCache.ClientSideCluster, as: Cache setup do {:ok, pid} = Cache.start_link() _ = Cache.delete_all() - :ok - on_exit(fn -> - :ok = Process.sleep(100) - if Process.alive?(pid), do: Cache.stop(pid) - end) + on_exit(fn -> safe_stop(pid) end) {:ok, cache: Cache, name: Cache} end @@ -23,12 +25,12 @@ defmodule NebulexRedisAdapter.ClientClusterTest do @moduledoc false use Nebulex.Cache, otp_app: :nebulex_redis_adapter, - adapter: NebulexRedisAdapter + adapter: Nebulex.Adapters.Redis end _ = Process.flag(:trap_exit, true) - assert {:error, {%ArgumentError{message: msg}, _}} = + assert {:error, {%NimbleOptions.ValidationError{message: msg}, _}} = ClientClusterWithInvalidOpts.start_link(mode: :client_side_cluster) assert Regex.match?(~r/invalid value for :client_side_cluster option: expected/, msg) diff --git a/test/nebulex/adapters/redis/client_test.exs b/test/nebulex/adapters/redis/client_test.exs new file mode 100644 index 0000000..5e99691 --- /dev/null +++ b/test/nebulex/adapters/redis/client_test.exs @@ -0,0 +1,55 @@ +defmodule Nebulex.Adapters.Redis.ClientTest do + use ExUnit.Case, async: true + use Mimic + + import Nebulex.CacheCase, only: [safe_stop: 1] + + alias Nebulex.Adapters.Redis.Client + + @adapter_meta %{mode: :standalone, name: :test, registry: :test, pool_size: 1} + + describe "command/3" do + test "error: raises an exception" do + Nebulex.Adapters.Redis.Pool + |> expect(:fetch_conn, fn _, _, _ -> {:ok, self()} end) + + Redix + |> expect(:command, fn _, _, _ -> {:error, %Redix.Error{}} end) + + assert {:error, %Redix.Error{}} = Client.command(@adapter_meta, [["PING"]]) + end + end + + describe "transaction_pipeline/3" do + test "error: raises an exception" do + Nebulex.Adapters.Redis.Pool + |> expect(:fetch_conn, fn _, _, _ -> {:ok, self()} end) + + Redix + |> expect(:pipeline, fn _, _, _ -> {:ok, [%Redix.Error{}]} end) + + assert {:error, %Redix.Error{}} = Client.transaction_pipeline(@adapter_meta, [["PING"]]) + end + end + + describe "fetch_conn/3" do + setup do + {:ok, pid} = Registry.start_link(keys: :unique, name: __MODULE__.Registry) + + on_exit(fn -> safe_stop(pid) end) + + {:ok, pid: pid} + end + + test "error: no connections available" do + assert Client.fetch_conn(%{@adapter_meta | registry: __MODULE__.Registry}, :key, 1) == + {:error, + %Nebulex.Error{ + __exception__: true, + metadata: [], + module: Nebulex.Adapters.Redis.ErrorFormatter, + reason: :redis_connection_error + }} + end + end +end diff --git a/test/nebulex/adapters/redis/cluster_cache_test.exs b/test/nebulex/adapters/redis/cluster_cache_test.exs new file mode 100644 index 0000000..e527153 --- /dev/null +++ b/test/nebulex/adapters/redis/cluster_cache_test.exs @@ -0,0 +1,25 @@ +defmodule Nebulex.Adapters.Redis.ClusterCacheTest do + use ExUnit.Case, async: false + @moduletag :nebulex_test + @moduletag capture_log: true + + # Inherited tests from Nebulex + use Nebulex.CacheTestCase, + only: [ + Nebulex.Cache.QueryableTest, + Nebulex.Cache.QueryableExpirationTest + ] + + import Nebulex.CacheCase + + alias Nebulex.Adapters.Redis.TestCache.RedisCluster, as: Cache + + setup do + {:ok, pid} = Cache.start_link() + _ = Cache.delete_all!() + + on_exit(fn -> safe_stop(pid) end) + + {:ok, cache: Cache, name: Cache} + end +end diff --git a/test/nebulex/adapters/redis/cluster_test.exs b/test/nebulex/adapters/redis/cluster_test.exs new file mode 100644 index 0000000..3dcd21e --- /dev/null +++ b/test/nebulex/adapters/redis/cluster_test.exs @@ -0,0 +1,295 @@ +defmodule Nebulex.Adapters.Redis.ClusterTest do + use ExUnit.Case, async: false + @moduletag capture_log: true + + use Mimic + + # Inherited tests + use Nebulex.Adapters.Redis.CacheTest + + # Inherited tests from Nebulex + use Nebulex.CacheTestCase, + except: [ + Nebulex.Cache.KVExpirationTest, + Nebulex.Cache.KVPropTest, + Nebulex.Cache.TransactionTest, + Nebulex.Cache.QueryableTest, + Nebulex.Cache.QueryableExpirationTest + ] + + import Nebulex.CacheCase + import Nebulex.Utils, only: [wrap_error: 2] + + alias Nebulex.Adapters.Redis.Cluster + alias Nebulex.Adapters.Redis.Cluster.Keyslot + alias Nebulex.Adapters.Redis.TestCache.RedisCluster, as: Cache + alias Nebulex.Adapters.Redis.TestCache.RedisClusterConnError + + # Telemetry events + @redis_cluster_prefix ~w(nebulex cache redis_cluster setup)a + @redis_cluster_start @redis_cluster_prefix ++ [:start] + @redis_cluster_stop @redis_cluster_prefix ++ [:stop] + @redis_cluster_events [@redis_cluster_start, @redis_cluster_stop] + + setup do + {:ok, pid} = Cache.start_link() + _ = Cache.delete_all!() + + on_exit(fn -> safe_stop(pid) end) + + {:ok, cache: Cache, name: Cache} + end + + describe "cluster setup" do + test "error: missing :redis_cluster option" do + defmodule RedisClusterWithInvalidOpts do + @moduledoc false + use Nebulex.Cache, + otp_app: :nebulex_redis_adapter, + adapter: Nebulex.Adapters.Redis + end + + _ = Process.flag(:trap_exit, true) + + assert {:error, {%NimbleOptions.ValidationError{message: msg}, _}} = + RedisClusterWithInvalidOpts.start_link(mode: :redis_cluster) + + assert Regex.match?(~r/invalid value for :redis_cluster option: expected non-empty/, msg) + end + + test "error: invalid :redis_cluster options", %{cache: cache} do + _ = Process.flag(:trap_exit, true) + + assert {:error, {%NimbleOptions.ValidationError{message: msg}, _}} = + cache.start_link(name: :redis_cluster_invalid_opts1, redis_cluster: []) + + assert Regex.match?(~r/invalid value for :redis_cluster option: expected non-empty/, msg) + end + + test "error: invalid :keyslot option", %{cache: cache} do + _ = Process.flag(:trap_exit, true) + + assert {:error, {%NimbleOptions.ValidationError{message: msg}, _}} = + cache.start_link( + name: :redis_cluster_invalid_opts2, + redis_cluster: [configuration_endpoints: [x: []], keyslot: RedisClusterConnError] + ) + + assert msg =~ "invalid value for :keyslot option: expected" + end + + test "error: connection with config endpoint cannot be established" do + with_telemetry_handler(__MODULE__, [@redis_cluster_stop], fn -> + {:ok, _pid} = RedisClusterConnError.start_link() + + # 1st failed attempt + assert_receive {@redis_cluster_stop, %{duration: _}, %{status: :error}}, 5000 + + # Command fails because the cluster is in error status + assert_raise Nebulex.Error, + ~r/could not run the command because Redis Cluster is in error status/, + fn -> + RedisClusterConnError.get!("foo") + end + + # 2dn failed attempt + assert_receive {@redis_cluster_stop, %{duration: _}, %{status: :error}}, 5000 + end) + end + + test "error: redis cluster is shutdown" do + _ = Process.flag(:trap_exit, true) + + with_telemetry_handler(__MODULE__, @redis_cluster_events, fn -> + {:ok, _} = + RedisClusterConnError.start_link( + redis_cluster: [ + configuration_endpoints: [ + endpoint1_conn_opts: [ + host: "127.0.0.1", + port: 6380, + password: "password" + ] + ], + override_master_host: true + ] + ) + + assert_receive {@redis_cluster_start, _, %{pid: pid}}, 5000 + assert_receive {@redis_cluster_stop, %{duration: _}, %{status: :ok}}, 5000 + + :ok = GenServer.stop(pid) + + assert_raise Nebulex.Error, ~r"Redis Cluster is in shutdown status", fn -> + RedisClusterConnError.fetch!("foo", lock_retries: 2) + end + end) + end + + test "error: command failed after reconfiguring cluster" do + with_telemetry_handler(__MODULE__, @redis_cluster_events, fn -> + {:ok, _} = + RedisClusterConnError.start_link( + redis_cluster: [ + configuration_endpoints: [ + endpoint1_conn_opts: [ + url: "redis://127.0.0.1:6380", + password: "password" + ] + ], + override_master_host: true + ] + ) + + assert_receive {@redis_cluster_start, _, %{pid: pid}}, 5000 + + # Setup mocks - testing Redis version < 7 (["CLUSTER", "SLOTS"]) + Redix + |> expect(:command, fn _, _ -> {:error, %Redix.Error{}} end) + |> expect(:command, fn _, _ -> {:ok, [[0, 16_384, ["127.0.0.1", 6380]]]} end) + |> allow(self(), pid) + + assert_receive {@redis_cluster_stop, %{duration: _}, %{status: :ok}}, 5000 + + # Setup mocks + Nebulex.Adapters.Redis.Cluster + |> expect(:fetch_conn, fn _, _, _ -> + wrap_error Nebulex.Error, reason: :redis_connection_error + end) + + refute RedisClusterConnError.get!("foo") + + assert_receive {@redis_cluster_stop, %{duration: _}, %{status: :ok}}, 5000 + end) + end + end + + describe "hash_slot (default CRC16)" do + test "ok: returns the expected slot" do + assert Cluster.hash_slot("123456789") == {:"$hash_slot", 12_739} + end + end + + describe "keys with hash tags" do + test "compute_key/1" do + assert Keyslot.compute_key("{foo}.bar") == "foo" + assert Keyslot.compute_key("foo{bar}foo") == "bar" + assert Keyslot.compute_key("foo.{bar}") == "bar" + assert Keyslot.compute_key("foo.{bar}foo") == "bar" + assert Keyslot.compute_key("foo.{}bar") == "foo.{}bar" + assert Keyslot.compute_key("foo.{bar") == "foo.{bar" + assert Keyslot.compute_key("foo.}bar") == "foo.}bar" + assert Keyslot.compute_key("foo.{hello}bar{world}!") == "hello" + assert Keyslot.compute_key("foo.bar") == "foo.bar" + end + + test "hash_slot/2" do + for i <- 0..10 do + assert Cluster.hash_slot("{foo}.#{i}") == + Cluster.hash_slot("{foo}.#{i + 1}") + + assert Cluster.hash_slot("foo.{bar}.#{i}") == + Cluster.hash_slot("foo.{bar}.#{i + 1}") + end + + assert Cluster.hash_slot("foo.{bar.1") != Cluster.hash_slot("foo.{bar.2") + end + + test "put and get operations", %{cache: cache} do + assert cache.put_all!(%{"foo{bar}.1" => "bar1", "foo{bar}.2" => "bar2"}) == :ok + + assert cache.get_all!(in: ["foo{bar}.1", "foo{bar}.2"]) |> Map.new() == %{ + "foo{bar}.1" => "bar1", + "foo{bar}.2" => "bar2" + } + end + + test "put and get operations with tupled keys", %{cache: cache} do + assert cache.put_all!(%{ + {__MODULE__, "key1"} => "bar1", + {__MODULE__, "key2"} => "bar2" + }) == :ok + + assert cache.get_all!(in: [{__MODULE__, "key1"}, {__MODULE__, "key2"}]) + |> Map.new() == %{ + {__MODULE__, "key1"} => "bar1", + {__MODULE__, "key2"} => "bar2" + } + + assert cache.put_all!(%{ + {__MODULE__, {Nested, "key1"}} => "bar1", + {__MODULE__, {Nested, "key2"}} => "bar2" + }) == :ok + + assert cache.get_all!( + in: [ + {__MODULE__, {Nested, "key1"}}, + {__MODULE__, {Nested, "key2"}} + ] + ) + |> Map.new() == %{ + {__MODULE__, {Nested, "key1"}} => "bar1", + {__MODULE__, {Nested, "key2"}} => "bar2" + } + end + end + + describe "MOVED" do + test "error: raises an exception in the 2nd attempt after reconfiguring the cluster", %{ + cache: cache + } do + _ = Process.flag(:trap_exit, true) + + # Setup mocks + Nebulex.Adapters.Redis.Cluster.Keyslot + |> stub(:hash_slot, &:erlang.phash2/2) + + # put is executed with a Redis command + assert_raise Nebulex.Error, ~r/\*\* \(Redix.Error\) MOVED/, fn -> + cache.put!("1234567890", "hello") + end + + # take is executed with a Redis transaction pipeline + assert_raise Nebulex.Error, ~r/\*\* \(Redix.Error\) MOVED/, fn -> + cache.take!("1234567890") + end + end + + test "ok: command is successful after configuring the cluster", %{cache: cache} do + with_telemetry_handler(__MODULE__, @redis_cluster_events, fn -> + # Setup mocks + Nebulex.Adapters.Redis.Cluster.Keyslot + |> expect(:hash_slot, fn _, _ -> 0 end) + + # Triggers MOVED error the first time, then the command succeeds + :ok = cache.put!("MOVED", "MOVED") + + # Cluster is re-configured + assert_receive {@redis_cluster_stop, %{duration: _}, %{status: :ok}}, 5000 + + # Command was executed successfully + assert cache.get!("MOVED") == "MOVED" + end) + end + end + + describe "put_new_all" do + test "error: key alredy exists", %{cache: cache} do + assert cache.put_new_all!(v1: 1, v2: 2) == true + + Redix + |> stub(:command, fn _, _, _ -> {:ok, 0} end) + + assert cache.put_new_all!(v1: 1, v3: 3) == false + end + + test "error: command failed", %{cache: cache} do + Redix + |> expect(:command, fn _, _, _ -> {:error, %Redix.ConnectionError{reason: :timeout}} end) + + assert_raise Nebulex.Error, ~r/timeout/, fn -> + cache.put_new_all!(v1: 1, v3: 3) + end + end + end +end diff --git a/test/nebulex_redis_adapter/codec/string_proto_test.exs b/test/nebulex/adapters/redis/codec/string_proto_test.exs similarity index 79% rename from test/nebulex_redis_adapter/codec/string_proto_test.exs rename to test/nebulex/adapters/redis/codec/string_proto_test.exs index d2e4679..14c0a63 100644 --- a/test/nebulex_redis_adapter/codec/string_proto_test.exs +++ b/test/nebulex/adapters/redis/codec/string_proto_test.exs @@ -1,12 +1,12 @@ -defmodule NebulexRedisAdapter.Serializer.SerializableTest do +defmodule Nebulex.Adapters.Redis.Serializer.SerializableTest do use ExUnit.Case, async: true - alias NebulexRedisAdapter.Serializer.Serializable + alias Nebulex.Adapters.Redis.Serializer.Serializable defmodule Cache do use Nebulex.Cache, otp_app: :nebulex_redis_adapter, - adapter: NebulexRedisAdapter + adapter: Nebulex.Adapters.Redis end describe "encode/2" do @@ -37,8 +37,8 @@ defmodule NebulexRedisAdapter.Serializer.SerializableTest do {:cont, []} end - assert Cache.put("fun", fun) == :ok - assert value = Cache.get("fun") + assert Cache.put!("fun", fun) == :ok + assert value = Cache.get!("fun") assert is_function(value, 2) assert value == fun assert Enum.to_list(value) == [] diff --git a/test/nebulex/adapters/redis/standalone_test.exs b/test/nebulex/adapters/redis/standalone_test.exs new file mode 100644 index 0000000..0b197a9 --- /dev/null +++ b/test/nebulex/adapters/redis/standalone_test.exs @@ -0,0 +1,22 @@ +defmodule Nebulex.Adapters.Redis.StandaloneTest do + use ExUnit.Case, async: true + @moduletag capture_log: true + + # Inherited tests + use Nebulex.Adapters.Redis.CacheTest + use Nebulex.CacheTestCase, except: [Nebulex.Cache.TransactionTest] + + import Nebulex.CacheCase, only: [safe_stop: 1] + + alias Nebulex.Adapters.Redis.TestCache.Standalone, as: Cache + + setup do + {:ok, pid} = Cache.start_link() + + _ = Cache.delete_all() + + on_exit(fn -> safe_stop(pid) end) + + {:ok, cache: Cache, name: Cache} + end +end diff --git a/test/nebulex_redis_adapter/command_test.exs b/test/nebulex_redis_adapter/command_test.exs deleted file mode 100644 index 7027f80..0000000 --- a/test/nebulex_redis_adapter/command_test.exs +++ /dev/null @@ -1,23 +0,0 @@ -defmodule NebulexRedisAdapter.CommandTest do - use ExUnit.Case, async: true - use Mimic - - alias NebulexRedisAdapter.Command - - describe "pipeline!/3" do - test "error: raises an exception" do - NebulexRedisAdapter.Pool - |> expect(:get_conn, fn _, _, _ -> self() end) - - Redix - |> expect(:pipeline, fn _, _, _ -> {:ok, [%Redix.Error{}]} end) - - assert_raise Redix.Error, fn -> - Command.pipeline!( - %{mode: :standalone, name: :test, registry: :test, pool_size: 1}, - [["PING"]] - ) - end - end - end -end diff --git a/test/nebulex_redis_adapter/redis_cluster_test.exs b/test/nebulex_redis_adapter/redis_cluster_test.exs deleted file mode 100644 index 6cd0cc2..0000000 --- a/test/nebulex_redis_adapter/redis_cluster_test.exs +++ /dev/null @@ -1,269 +0,0 @@ -defmodule NebulexRedisAdapter.RedisClusterTest do - use ExUnit.Case, async: true - use NebulexRedisAdapter.CacheTest - use Mimic - - import Nebulex.CacheCase, only: [with_telemetry_handler: 3] - - alias NebulexRedisAdapter.RedisCluster - alias NebulexRedisAdapter.RedisCluster.Keyslot, as: RedisClusterKeyslot - alias NebulexRedisAdapter.TestCache.RedisCluster, as: Cache - alias NebulexRedisAdapter.TestCache.RedisClusterConnError - - @moduletag capture_log: true - - setup do - {:ok, pid} = Cache.start_link() - _ = Cache.delete_all() - - on_exit(fn -> - :ok = Process.sleep(100) - - if Process.alive?(pid), do: Cache.stop(pid) - end) - - {:ok, cache: Cache, name: Cache} - end - - describe "cluster setup" do - setup do - start_event = telemetry_event(:redis_cluster_conn_error, :start) - stop_event = telemetry_event(:redis_cluster_conn_error, :stop) - - {:ok, events: [start_event, stop_event]} - end - - test "error: missing :redis_cluster option" do - defmodule RedisClusterWithInvalidOpts do - @moduledoc false - use Nebulex.Cache, - otp_app: :nebulex_redis_adapter, - adapter: NebulexRedisAdapter - end - - _ = Process.flag(:trap_exit, true) - - assert {:error, {%ArgumentError{message: msg}, _}} = - RedisClusterWithInvalidOpts.start_link(mode: :redis_cluster) - - assert Regex.match?(~r/invalid value for :redis_cluster option: expected non-empty/, msg) - end - - test "error: invalid :redis_cluster options" do - _ = Process.flag(:trap_exit, true) - - assert {:error, {%ArgumentError{message: msg}, _}} = - Cache.start_link(name: :redis_cluster_invalid_opts1, redis_cluster: []) - - assert Regex.match?(~r/invalid value for :redis_cluster option: expected non-empty/, msg) - end - - test "error: invalid :keyslot option" do - _ = Process.flag(:trap_exit, true) - - assert {:error, {%ArgumentError{message: msg}, _}} = - Cache.start_link( - name: :redis_cluster_invalid_opts2, - redis_cluster: [configuration_endpoints: [x: []], keyslot: RedisClusterConnError] - ) - - assert msg == - "invalid value for :keyslot option: expected " <> - "NebulexRedisAdapter.TestCache.RedisClusterConnError " <> - "to implement the behaviour Nebulex.Adapter.Keyslot" - end - - test "error: connection with config endpoint cannot be established", %{events: [_, stop]} do - with_telemetry_handler(__MODULE__, [stop], fn -> - {:ok, _pid} = RedisClusterConnError.start_link() - - # 1st failed attempt - assert_receive {^stop, %{duration: _}, %{status: :error}}, 5000 - - # 2dn failed attempt - assert_receive {^stop, %{duration: _}, %{status: :error}}, 5000 - end) - end - - test "error: redis cluster status is set to error", %{events: [start, stop] = events} do - with_telemetry_handler(__MODULE__, events, fn -> - {:ok, _} = - RedisClusterConnError.start_link( - redis_cluster: [ - configuration_endpoints: [ - endpoint1_conn_opts: [ - host: "127.0.0.1", - port: 6380, - password: "password" - ] - ], - override_master_host: true - ] - ) - - assert_receive {^start, _, %{pid: pid}}, 5000 - assert_receive {^stop, %{duration: _}, %{status: :ok}}, 5000 - - :ok = GenServer.stop(pid) - - assert_raise NebulexRedisAdapter.Error, ~r"Redis Cluster is in error status", fn -> - RedisClusterConnError.get("foo") - end - end) - end - - test "error: command failed after reconfiguring cluster", %{events: [start, stop] = events} do - with_telemetry_handler(__MODULE__, events, fn -> - {:ok, _} = - RedisClusterConnError.start_link( - redis_cluster: [ - configuration_endpoints: [ - endpoint1_conn_opts: [ - url: "redis://127.0.0.1:6380", - password: "password" - ] - ], - override_master_host: true - ] - ) - - assert_receive {^start, _, %{pid: pid}}, 5000 - - # Setup mocks - testing Redis version < 7 (["CLUSTER", "SLOTS"]) - Redix - |> expect(:command, fn _, _ -> {:error, %Redix.Error{}} end) - |> expect(:command, fn _, _ -> {:ok, [[0, 16_384, ["127.0.0.1", 6380]]]} end) - |> allow(self(), pid) - - assert_receive {^stop, %{duration: _}, %{status: :ok}}, 5000 - - # Setup mocks - NebulexRedisAdapter.RedisCluster - |> expect(:get_conn, fn _, _, _ -> nil end) - - refute RedisClusterConnError.get("foo") - - assert_receive {^stop, %{duration: _}, %{status: :ok}}, 5000 - end) - end - end - - describe "CRC16" do - test "ok: returns the expected hash_slot" do - assert RedisCluster.hash_slot("123456789") == {:"$hash_slot", 12_739} - end - end - - describe "keys with hash tags" do - test "compute_key/1" do - assert RedisClusterKeyslot.compute_key("{foo}.bar") == "foo" - assert RedisClusterKeyslot.compute_key("foo{bar}foo") == "bar" - assert RedisClusterKeyslot.compute_key("foo.{bar}") == "bar" - assert RedisClusterKeyslot.compute_key("foo.{bar}foo") == "bar" - assert RedisClusterKeyslot.compute_key("foo.{}bar") == "foo.{}bar" - assert RedisClusterKeyslot.compute_key("foo.{bar") == "foo.{bar" - assert RedisClusterKeyslot.compute_key("foo.}bar") == "foo.}bar" - assert RedisClusterKeyslot.compute_key("foo.{hello}bar{world}!") == "hello" - assert RedisClusterKeyslot.compute_key("foo.bar") == "foo.bar" - end - - test "hash_slot/2" do - for i <- 0..10 do - assert RedisCluster.hash_slot("{foo}.#{i}") == - RedisCluster.hash_slot("{foo}.#{i + 1}") - - assert RedisCluster.hash_slot("foo.{bar}.#{i}") == - RedisCluster.hash_slot("foo.{bar}.#{i + 1}") - end - - assert RedisCluster.hash_slot("{foo.1") != RedisCluster.hash_slot("{foo.2") - end - - test "put and get operations" do - assert Cache.put_all(%{"{foo}.1" => "bar1", "{foo}.2" => "bar2"}) == :ok - - assert Cache.get_all(["{foo}.1", "{foo}.2"]) == %{"{foo}.1" => "bar1", "{foo}.2" => "bar2"} - end - - test "put and get operations with tupled keys" do - assert Cache.put_all(%{ - {RedisCache.Testing, "key1"} => "bar1", - {RedisCache.Testing, "key2"} => "bar2" - }) == :ok - - assert Cache.get_all([{RedisCache.Testing, "key1"}, {RedisCache.Testing, "key2"}]) == %{ - {RedisCache.Testing, "key1"} => "bar1", - {RedisCache.Testing, "key2"} => "bar2" - } - - assert Cache.put_all(%{ - {RedisCache.Testing, {Nested, "key1"}} => "bar1", - {RedisCache.Testing, {Nested, "key2"}} => "bar2" - }) == :ok - - assert Cache.get_all([ - {RedisCache.Testing, {Nested, "key1"}}, - {RedisCache.Testing, {Nested, "key2"}} - ]) == %{ - {RedisCache.Testing, {Nested, "key1"}} => "bar1", - {RedisCache.Testing, {Nested, "key2"}} => "bar2" - } - end - end - - describe "MOVED" do - setup do - stop_event = telemetry_event(:redis_cluster, :stop) - - {:ok, events: [stop_event]} - end - - test "error: raises an exception in the 2nd attempt after reconfiguring the cluster" do - _ = Process.flag(:trap_exit, true) - - # Setup mocks - NebulexRedisAdapter.RedisCluster.Keyslot - |> stub(:hash_slot, &:erlang.phash2/2) - - # put is executed with a Redis command - assert_raise Redix.Error, ~r"MOVED", fn -> - Cache.put("1234567890", "hello") - end - - # put_all is executed with a Redis pipeline - assert_raise Redix.Error, ~r"MOVED", fn -> - Cache.put_all(foo: "bar", bar: "foo") - end - end - - test "ok: command is successful after configuring the cluster", %{events: [stop] = events} do - with_telemetry_handler(__MODULE__, events, fn -> - # Setup mocks - NebulexRedisAdapter.RedisCluster.Keyslot - |> expect(:hash_slot, fn _, _ -> 0 end) - - # Triggers MOVED error the first time, then the command succeeds - :ok = Cache.put("foo", "bar") - - # Cluster is re-configured - assert_receive {^stop, %{duration: _}, %{status: :ok}}, 5000 - - # Command was executed successfully - assert Cache.get("foo") == "bar" - end) - end - end - - ## Private functions - - defp telemetry_event(cache, event) do - [ - :nebulex_redis_adapter, - :test_cache, - cache, - :config_manager, - :setup, - event - ] - end -end diff --git a/test/nebulex_redis_adapter/standalone_test.exs b/test/nebulex_redis_adapter/standalone_test.exs deleted file mode 100644 index 6f60b51..0000000 --- a/test/nebulex_redis_adapter/standalone_test.exs +++ /dev/null @@ -1,20 +0,0 @@ -defmodule NebulexRedisAdapter.StandaloneTest do - use ExUnit.Case, async: true - use NebulexRedisAdapter.CacheTest - - alias NebulexRedisAdapter.TestCache.Standalone, as: Cache - - setup do - {:ok, pid} = Cache.start_link() - - _ = Cache.delete_all() - - on_exit(fn -> - :ok = Process.sleep(100) - - if Process.alive?(pid), do: Cache.stop(pid) - end) - - {:ok, cache: Cache, name: Cache} - end -end diff --git a/test/nebulex_redis_adapter/stats_test.exs b/test/nebulex_redis_adapter/stats_test.exs deleted file mode 100644 index 6c4107d..0000000 --- a/test/nebulex_redis_adapter/stats_test.exs +++ /dev/null @@ -1,43 +0,0 @@ -defmodule NebulexRedisAdapter.StatsTest do - use ExUnit.Case, async: true - - defmodule Cache do - use Nebulex.Cache, - otp_app: :nebulex, - adapter: NebulexRedisAdapter - end - - setup do - {:ok, pid} = Cache.start_link(stats: true, conn_opts: [database: 1]) - - on_exit(fn -> - :ok = Process.sleep(100) - if Process.alive?(pid), do: Cache.stop(pid) - end) - - {:ok, cache: Cache, name: Cache} - end - - describe "c:NebulexRedisAdapter.stats/1" do - test "returns valid %Stats{}" do - size = Cache.delete_all() - - refute Cache.get("stats") - assert Cache.put("stats", "stats") == :ok - assert Cache.get("stats") == "stats" - assert Cache.put("stats", "stats") == :ok - assert Cache.take("stats") == "stats" - refute Cache.get("stats") - assert Cache.put_new("stats", "stats") - assert Cache.replace("stats", "stats stats") - assert Cache.delete_all() == 1 - - assert stats = Cache.stats() - assert stats.measurements.evictions == size + 2 - assert stats.measurements.hits == 2 - assert stats.measurements.misses == 2 - assert stats.measurements.writes == 3 - assert stats.measurements.updates == 1 - end - end -end diff --git a/test/shared/cache/command_error_test.exs b/test/shared/cache/command_error_test.exs new file mode 100644 index 0000000..f16cbe3 --- /dev/null +++ b/test/shared/cache/command_error_test.exs @@ -0,0 +1,115 @@ +defmodule Nebulex.Adapters.Redis.CommandErrorTest do + import Nebulex.CacheCase + + deftests "command" do + use Mimic + + alias Nebulex.Adapter + + test "take", %{cache: cache, name: name} do + _ = mock_redix_transaction_pipeline(name) + + assert_raise Nebulex.Error, ~r/\*\* \(Redix.ConnectionError\)/, fn -> + cache.take!("conn_error") + end + end + + test "has_key?", %{cache: cache} do + _ = mock_redix_command() + + assert {:error, %Nebulex.Error{}} = cache.has_key?("conn_error") + end + + test "ttl", %{cache: cache} do + _ = mock_redix_command() + + assert_raise Nebulex.Error, ~r/\*\* \(Redix.ConnectionError\)/, fn -> + cache.ttl!("conn_error") + end + end + + test "touch", %{cache: cache} do + _ = mock_redix_command() + + assert_raise Nebulex.Error, ~r/\*\* \(Redix.ConnectionError\)/, fn -> + cache.touch!("conn_error") + end + end + + test "expire", %{cache: cache} do + _ = mock_redix_command() + + assert_raise Nebulex.Error, ~r/\*\* \(Redix.ConnectionError\)/, fn -> + cache.expire!("conn_error", 1000) + end + end + + test "expire (:infinity)", %{cache: cache, name: name} do + _ = mock_redix_transaction_pipeline(name) + + assert_raise Nebulex.Error, ~r/\*\* \(Redix.ConnectionError\)/, fn -> + cache.expire!("conn_error", :infinity) + end + end + + test "incr", %{cache: cache} do + _ = mock_redix_command() + + assert_raise Nebulex.Error, ~r/\*\* \(Redix.ConnectionError\)/, fn -> + cache.incr!("conn_error", 1, ttl: 1000) + end + end + + test "get_all", %{cache: cache} do + _ = mock_redix_command() + + assert_raise Nebulex.Error, ~r/\*\* \(Redix.ConnectionError\)/, fn -> + cache.get_all!(in: [:foo, :bar]) + end + end + + test "delete_all", %{cache: cache} do + _ = mock_redix_command() + + assert_raise Nebulex.Error, ~r/\*\* \(Redix.ConnectionError\)/, fn -> + cache.delete_all!(in: [:foo, :bar]) + end + end + + test "get_all (failed fetching values)", %{cache: cache, name: name} do + if Adapter.lookup_meta(name).mode == :client_side_cluster do + Nebulex.Adapters.Redis.Pool + |> stub(:fetch_conn, fn _, _, _ -> {:ok, self()} end) + + Redix + |> expect(:command, 3, fn _, _, _ -> {:ok, ["foo", "bar"]} end) + |> expect(:command, fn _, _, _ -> {:error, %Redix.ConnectionError{}} end) + + assert_raise Nebulex.Error, ~r/\*\* \(Redix.ConnectionError\)/, fn -> + cache.get_all!() + end + end + end + + defp mock_redix_command do + Nebulex.Adapters.Redis.Pool + |> expect(:fetch_conn, fn _, _, _ -> {:ok, self()} end) + + Redix + |> stub(:command, fn _, _, _ -> {:error, %Redix.ConnectionError{}} end) + end + + defp mock_redix_transaction_pipeline(name) do + Nebulex.Adapters.Redis.Pool + |> expect(:fetch_conn, fn _, _, _ -> {:ok, self()} end) + + if Adapter.lookup_meta(name).mode == :redis_cluster do + Redix + |> stub(:pipeline, fn _, _, _ -> {:error, %Redix.ConnectionError{}} end) + else + Redix + |> stub(:transaction_pipeline, fn _, _, _ -> {:error, %Redix.ConnectionError{}} end) + end + end + end +end diff --git a/test/shared/cache/command_test.exs b/test/shared/cache/command_test.exs deleted file mode 100644 index c307be5..0000000 --- a/test/shared/cache/command_test.exs +++ /dev/null @@ -1,69 +0,0 @@ -defmodule NebulexRedisAdapter.Cache.CommandTest do - import Nebulex.CacheCase - - deftests "Redis" do - alias Nebulex.Adapter - - test "command/3 executes a command", %{cache: cache, name: name} do - mode = Adapter.with_meta(name, fn _, %{mode: mode} -> mode end) - - if mode != :redis_cluster do - assert cache.command(["SET", "foo", "bar"], timeout: 5000) == {:ok, "OK"} - assert cache.command(["GET", "foo"]) == {:ok, "bar"} - end - end - - test "command/3 returns an error", %{cache: cache} do - assert {:error, %Redix.Error{}} = cache.command(["INCRBY", "counter", "invalid"]) - end - - test "command!/3 raises an error", %{cache: cache} do - assert_raise Redix.Error, fn -> - cache.command!(["INCRBY", "counter", "invalid"]) - end - end - - test "command!/3 with LIST", %{cache: cache} do - assert cache.command!(["LPUSH", "mylist", "world"], key: "mylist") == 1 - assert cache.command!(["LPUSH", "mylist", "hello"], key: "mylist") == 2 - assert cache.command!(["LRANGE", "mylist", "0", "-1"], key: "mylist") == ["hello", "world"] - end - - test "pipeline/3 runs the piped commands", %{cache: cache, name: name} do - mode = Adapter.with_meta(name, fn _, %{mode: mode} -> mode end) - - if mode != :redis_cluster do - assert cache.pipeline( - [ - ["LPUSH", "mylist", "world"], - ["LPUSH", "mylist", "hello"], - ["LRANGE", "mylist", "0", "-1"] - ], - key: "mylist", - timeout: 5000 - ) == {:ok, [1, 2, ["hello", "world"]]} - end - end - - test "pipeline/3 returns an error", %{cache: cache} do - assert {:ok, [%Redix.Error{}]} = cache.pipeline([["INCRBY", "counter", "invalid"]]) - end - - test "pipeline!/3 runs the piped commands", %{cache: cache} do - assert cache.pipeline!( - [ - ["LPUSH", "mylist", "world"], - ["LPUSH", "mylist", "hello"], - ["LRANGE", "mylist", "0", "-1"] - ], - key: "mylist" - ) == [1, 2, ["hello", "world"]] - end - - test "pipeline!/3 returns an error", %{cache: cache} do - assert_raise Redix.Error, fn -> - cache.command!([["INCRBY", "counter", "invalid"]]) - end - end - end -end diff --git a/test/shared/cache/entry_exp_test.exs b/test/shared/cache/entry_exp_test.exs deleted file mode 100644 index bd117b3..0000000 --- a/test/shared/cache/entry_exp_test.exs +++ /dev/null @@ -1,185 +0,0 @@ -defmodule NebulexRedisAdapter.Cache.EntryExpTest do - import Nebulex.CacheCase - - deftests "cache expiration" do - test "put_all", %{cache: cache} do - entries = [{0, nil} | for(x <- 1..3, do: {x, x})] - assert cache.put_all(entries, ttl: 1000) - - refute cache.get(0) - for x <- 1..3, do: assert(x == cache.get(x)) - :ok = Process.sleep(1500) - for x <- 1..3, do: refute(cache.get(x)) - end - - test "put_new_all", %{cache: cache} do - assert cache.put_new_all(%{"apples" => 1, "bananas" => 3}, ttl: 1000) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - - refute cache.put_new_all(%{"apples" => 3, "oranges" => 1}) - assert cache.get("apples") == 1 - assert cache.get("bananas") == 3 - refute cache.get("oranges") - - :ok = Process.sleep(1500) - refute cache.get("apples") - refute cache.get("bananas") - end - - test "take", %{cache: cache} do - :ok = cache.put("foo", "bar", ttl: 1000) - :ok = Process.sleep(1500) - refute cache.take(1) - end - - test "take!", %{cache: cache} do - :ok = cache.put(1, 1, ttl: 1000) - :ok = Process.sleep(1500) - - assert_raise KeyError, fn -> - cache.take!(1) - end - end - - test "has_key?", %{cache: cache} do - assert cache.put("foo", "bar", ttl: 1000) == :ok - assert cache.has_key?("foo") - - Process.sleep(1500) - refute cache.has_key?("foo") - end - - test "ttl", %{cache: cache} do - assert cache.put(:a, 1, ttl: 1000) == :ok - assert cache.ttl(:a) > 0 - assert cache.put(:b, 2) == :ok - - :ok = Process.sleep(200) - assert cache.ttl(:a) > 0 - assert cache.ttl(:b) == :infinity - refute cache.ttl(:c) - - :ok = Process.sleep(1500) - refute cache.ttl(:a) - end - - test "expire", %{cache: cache} do - assert cache.put(:a, 1, ttl: 1000) == :ok - assert cache.ttl(:a) > 0 - - assert cache.expire(:a, 2000) - assert cache.ttl(:a) > 1000 - - assert cache.expire(:a, :infinity) - assert cache.ttl(:a) == :infinity - - refute cache.expire(:b, 5000) - refute cache.expire(:c, :infinity) - - assert_raise ArgumentError, ~r"expected ttl to be a valid timeout", fn -> - cache.expire(:a, "hello") - end - end - - test "touch", %{cache: cache} do - assert cache.put(:touch, 1, ttl: 1000) == :ok - - :ok = Process.sleep(100) - assert cache.touch(:touch) - - :ok = Process.sleep(200) - assert cache.touch(:touch) - assert cache.get(:touch) == 1 - - :ok = Process.sleep(1500) - refute cache.get(:touch) - - refute cache.touch(:non_existent) - end - - test "key expiration with ttl", %{cache: cache} do - assert cache.put(1, 11, ttl: 1000) == :ok - assert cache.get!(1) == 11 - - :ok = Process.sleep(10) - assert cache.get(1) == 11 - :ok = Process.sleep(1500) - refute cache.get(1) - - ops = [ - put: ["foo", "bar", [ttl: 1000]], - put_all: [[{"foo", "bar"}], [ttl: 1000]] - ] - - for {action, args} <- ops do - assert apply(cache, action, args) == :ok - :ok = Process.sleep(10) - assert cache.get("foo") == "bar" - :ok = Process.sleep(1200) - refute cache.get("foo") - - assert apply(cache, action, args) == :ok - :ok = Process.sleep(10) - assert cache.get("foo") == "bar" - :ok = Process.sleep(1200) - refute cache.get("foo") - end - end - - test "entry ttl", %{cache: cache} do - assert cache.put(1, 11, ttl: 1000) == :ok - assert cache.get!(1) == 11 - - for _ <- 1..3 do - assert cache.ttl(1) > 0 - - Process.sleep(200) - end - - :ok = Process.sleep(500) - - refute cache.ttl(1) - assert cache.put(1, 11, ttl: 1000) == :ok - assert cache.ttl(1) > 0 - end - - test "update existing entry with ttl", %{cache: cache} do - assert cache.put(1, 1, ttl: 1000) == :ok - assert cache.ttl(1) > 0 - - :ok = Process.sleep(10) - - assert cache.update(1, 10, &Integer.to_string/1) == "1" - assert cache.ttl(1) == :infinity - - :ok = Process.sleep(1200) - assert cache.get(1) == "1" - end - - test "incr with ttl", %{cache: cache} do - assert cache.incr(:counter, 1, ttl: 1000) == 1 - assert cache.ttl(1) > 0 - - :ok = Process.sleep(1500) - refute cache.get(:counter) - end - - test "incr and then set ttl", %{cache: cache} do - assert cache.incr(:counter, 1) == 1 - assert cache.ttl(:counter) == :infinity - - assert cache.expire(:counter, 1000) - :ok = Process.sleep(1500) - refute cache.get(:counter) - end - - test "invalid ttl", %{cache: cache} do - msg = "expected ttl: to be an integer >= 1000 or :infinity, got: 100" - - assert_raise ArgumentError, msg, fn -> - cache.put(1, 1, ttl: 100) - end - end - end -end diff --git a/test/shared/cache/info_test.exs b/test/shared/cache/info_test.exs new file mode 100644 index 0000000..cb78785 --- /dev/null +++ b/test/shared/cache/info_test.exs @@ -0,0 +1,43 @@ +defmodule Nebulex.Adapters.Redis.InfoTest do + import Nebulex.CacheCase + + deftests "info" do + @redis_info_sections ~w( + server clients memory persistence stats replication cpu commandstats + latencystats cluster modules keyspace errorstats + )a + + test "returns all", %{cache: cache} do + # equivalent to cache.info(:all) + assert {:ok, info} = cache.info() + + for section <- @redis_info_sections do + assert Map.fetch!(info, section) |> is_map() + end + end + + test "returns everything", %{cache: cache} do + assert {:ok, info} = cache.info(:everything) + + for section <- @redis_info_sections do + assert Map.fetch!(info, section) |> is_map() + end + end + + test "returns default", %{cache: cache} do + assert {:ok, info} = cache.info(:default) + + for section <- @redis_info_sections -- ~w(commandstats latencystats)a do + assert Map.fetch!(info, section) |> is_map() + end + end + + test "returns multiple sections", %{cache: cache} do + assert %{server: %{}, memory: %{}, stats: %{}} = cache.info!([:server, :memory, :stats]) + end + + test "returns a single section", %{cache: cache} do + assert %{evicted_keys: _} = cache.info!(:stats) + end + end +end diff --git a/test/shared/cache/queryable_test.exs b/test/shared/cache/queryable_test.exs index f21a2aa..72eb5b5 100644 --- a/test/shared/cache/queryable_test.exs +++ b/test/shared/cache/queryable_test.exs @@ -1,92 +1,145 @@ -defmodule NebulexRedisAdapter.Cache.QueryableTest do +defmodule Nebulex.Adapters.Redis.QueryableTest do import Nebulex.CacheCase deftests "queryable" do + use Mimic import Nebulex.CacheCase - test "all/2", %{cache: cache} do + test "get_all!/1 returns all cached keys", %{cache: cache} do set1 = cache_put(cache, 1..50) set2 = cache_put(cache, 51..100) - for x <- 1..100, do: assert(cache.get(x) == x) - expected = set1 ++ set2 + for x <- 1..100 do + assert cache.get!(x) == x + end - assert :lists.usort(cache.all()) == expected + expected = set1 ++ set2 + assert expected -- cache.get_all!(select: :key) == [] set3 = Enum.to_list(20..60) - :ok = Enum.each(set3, &cache.delete(&1)) - expected = :lists.usort(expected -- set3) + :ok = Enum.each(set3, &cache.delete!/1) + expected = Enum.sort(expected -- set3) - assert :lists.usort(cache.all()) == expected + assert expected -- cache.get_all!(select: :key) == [] end - test "stream/2", %{cache: cache} do + test "stream!/1 returns a stream", %{cache: cache} do entries = for x <- 1..10, into: %{}, do: {x, x * 2} assert cache.put_all(entries) == :ok expected = Map.keys(entries) - assert nil |> cache.stream() |> Enum.to_list() |> :lists.usort() == expected - assert nil - |> cache.stream(page_size: 3) - |> Enum.to_list() - |> :lists.usort() == expected + assert expected -- (cache.stream!(select: :key) |> Enum.to_list()) == [] + + result = + [query: nil, select: :key] + |> cache.stream!(max_entries: 3) + |> Enum.to_list() + + assert expected -- result == [] assert_raise Nebulex.QueryError, fn -> - :invalid_query - |> cache.stream() + cache.stream!(query: :invalid_query) |> Enum.to_list() end end - test "all/2 and stream/2 with key pattern", %{cache: cache} do + test "get_all!/2 and stream!/2 with key pattern", %{cache: cache} do + keys = ["age", "firstname", "lastname"] + cache.put_all(%{ "firstname" => "Albert", "lastname" => "Einstein", "age" => 76 }) - assert ["firstname", "lastname"] == "**name**" |> cache.all() |> :lists.sort() - assert ["age"] == "a??" |> cache.all() - assert ["age", "firstname", "lastname"] == :lists.sort(cache.all()) + assert cache.get_all!(query: "**name**", select: :key) |> Enum.sort() == [ + "firstname", + "lastname" + ] - stream = cache.stream("**name**") - assert ["firstname", "lastname"] == stream |> Enum.to_list() |> :lists.sort() + assert cache.get_all!(query: "a??", select: :key) == ["age"] + assert keys -- cache.get_all!(select: :key) == [] + + assert ["firstname", "lastname"] -- + (cache.stream!(query: "**name**", select: :key) |> Enum.to_list()) == [] + + assert cache.stream!(query: "a??", select: :key) |> Enum.to_list() == ["age"] + assert keys -- (cache.stream!(select: :key) |> Enum.to_list()) == [] + + assert cache.get_all!(query: "**name**") |> Map.new() == %{ + "firstname" => "Albert", + "lastname" => "Einstein" + } + end + + test "count_all!/1 returns the cached entries count", %{cache: cache} do + keys = ["age", "firstname", "lastname"] + + cache.put_all(%{ + "firstname" => "Albert", + "lastname" => "Einstein", + "age" => 76 + }) - stream = cache.stream("a??") - assert ["age"] == stream |> Enum.to_list() + assert cache.count_all!() >= 3 + assert cache.count_all!(query: "**name**") == 2 + assert cache.count_all!(query: "a??") == 1 - stream = cache.stream() - assert ["age", "firstname", "lastname"] == stream |> Enum.to_list() |> :lists.sort() + assert cache.delete_all!(query: "**name**") == 2 + assert cache.delete_all!(query: "a??") == 1 - assert %{"firstname" => "Albert", "lastname" => "Einstein"} == - "**name**" |> cache.all() |> cache.get_all() + assert cache.delete_all!(in: keys) == 0 + assert cache.count_all!(in: keys) == 0 end - test "delete_all/2", %{cache: cache} do + test "delete_all!/0 deletes all cached entries", %{cache: cache} do :ok = cache.put_all(a: 1, b: 2, c: 3) - assert cache.count_all() == 3 - assert cache.delete_all() == 3 - assert cache.count_all() == 0 + assert cache.count_all!() >= 3 + assert cache.delete_all!() >= 3 + assert cache.count_all!() == 0 end - test "delete_all/2 (list of keys)", %{cache: cache} do + test "delete_all!/1 deletes the given keys [in: keys]", %{cache: cache} do kv = for i <- 1..10, into: %{}, do: {:erlang.phash2(i), i} :ok = cache.put_all(kv) for {k, v} <- kv do - assert cache.get(k) == v + assert cache.get!(k) == v end - assert cache.count_all() == 10 - assert cache.delete_all({:in, Map.keys(kv)}) == 10 - assert cache.count_all() == 0 + keys = Map.keys(kv) + + assert cache.count_all!(in: keys) == 10 + assert cache.delete_all!(in: keys) == 10 + assert cache.count_all!(in: keys) == 0 for {k, _} <- kv do - refute cache.get(k) + refute cache.get!(k) + end + end + + test "stream!/2 [on_error: :raise] raises an exception", %{cache: cache} do + Redix + |> stub(:command, fn _, _, _ -> {:error, %Redix.ConnectionError{reason: :closed}} end) + + assert_raise Nebulex.Error, ~r"\*\* \(Redix.ConnectionError\)", fn -> + cache.stream!() |> Enum.to_list() + end + + assert_raise Nebulex.Error, ~r"\*\* \(Redix.ConnectionError\)", fn -> + cache.stream!(in: [1, 2, 3]) |> Enum.to_list() end end + + test "stream!/2 [on_error: :nothing] skips command errors", %{cache: cache} do + Redix + |> stub(:command, fn _, _, _ -> {:error, %Redix.ConnectionError{reason: :closed}} end) + + assert cache.stream!([], on_error: :nothing) |> Enum.to_list() == [] + assert cache.stream!([in: [1, 2, 3]], on_error: :nothing) |> Enum.to_list() == [] + end end end diff --git a/test/shared/cache/redix_conn_test.exs b/test/shared/cache/redix_conn_test.exs new file mode 100644 index 0000000..4c7897e --- /dev/null +++ b/test/shared/cache/redix_conn_test.exs @@ -0,0 +1,85 @@ +defmodule Nebulex.Adapters.Redis.RedixConnTest do + import Nebulex.CacheCase + + deftests "Redix" do + test "command/3 ok", %{cache: cache, name: name} do + assert {:ok, conn} = cache.fetch_conn(name: name, key: "foo") + + assert Redix.command(conn, ["SET", "foo", "bar"], timeout: 5000) == {:ok, "OK"} + assert Redix.command(conn, ["GET", "foo"]) == {:ok, "bar"} + end + + test "command/3 encode/decode ok", %{cache: cache, name: name} do + key = cache.encode_key({:key, "key"}) + value = cache.encode_value({:value, "value"}) + + assert {:ok, conn} = cache.fetch_conn(name: name, key: key) + + assert Redix.command!(conn, ["SET", key, value], timeout: 5000) == "OK" + assert Redix.command!(conn, ["GET", key]) |> cache.decode_value() == {:value, "value"} + end + + test "command/3 returns an error", %{cache: cache, name: name} do + assert {:ok, conn} = cache.fetch_conn(name: name, key: "counter") + + assert {:error, %Redix.Error{}} = Redix.command(conn, ["INCRBY", "counter", "invalid"]) + end + + test "command!/3 raises an error", %{cache: cache, name: name} do + assert {:ok, conn} = cache.fetch_conn(name: name, key: "counter") + + assert_raise Redix.Error, fn -> + Redix.command!(conn, ["INCRBY", "counter", "invalid"]) + end + end + + test "command!/3 with LIST", %{cache: cache, name: name} do + conn = cache.fetch_conn!(name: name, key: "mylist") + + assert Redix.command!(conn, ["LPUSH", "mylist", "world"]) == 1 + assert Redix.command!(conn, ["LPUSH", "mylist", "hello"]) == 2 + assert Redix.command!(conn, ["LRANGE", "mylist", "0", "-1"]) == ["hello", "world"] + end + + test "pipeline/3 runs the piped commands", %{cache: cache, name: name} do + assert {:ok, conn} = cache.fetch_conn(name: name, key: "mylist") + + assert Redix.pipeline( + conn, + [ + ["LPUSH", "mylist", "world"], + ["LPUSH", "mylist", "hello"], + ["LRANGE", "mylist", "0", "-1"] + ], + timeout: 5000 + ) == {:ok, [1, 2, ["hello", "world"]]} + end + + test "pipeline/3 returns an error", %{cache: cache, name: name} do + assert {:ok, conn} = cache.fetch_conn(name: name, key: "counter") + + assert {:ok, [%Redix.Error{}]} = Redix.pipeline(conn, [["INCRBY", "counter", "invalid"]]) + end + + test "pipeline!/3 runs the piped commands", %{cache: cache, name: name} do + assert {:ok, conn} = cache.fetch_conn(name: name, key: "mylist") + + assert Redix.pipeline!( + conn, + [ + ["LPUSH", "mylist", "world"], + ["LPUSH", "mylist", "hello"], + ["LRANGE", "mylist", "0", "-1"] + ] + ) == [1, 2, ["hello", "world"]] + end + + test "pipeline!/3 returns an error", %{cache: cache, name: name} do + assert {:ok, conn} = cache.fetch_conn(name: name, key: "counter") + + assert_raise Redix.Error, fn -> + Redix.command!(conn, [["INCRBY", "counter", "invalid"]]) + end + end + end +end diff --git a/test/shared/cache_test.exs b/test/shared/cache_test.exs index c9446c0..8844389 100644 --- a/test/shared/cache_test.exs +++ b/test/shared/cache_test.exs @@ -1,14 +1,14 @@ -defmodule NebulexRedisAdapter.CacheTest do +defmodule Nebulex.Adapters.Redis.CacheTest do @moduledoc """ Shared Tests """ defmacro __using__(_opts) do quote do - use Nebulex.Cache.EntryTest - use NebulexRedisAdapter.Cache.EntryExpTest - use NebulexRedisAdapter.Cache.QueryableTest - use NebulexRedisAdapter.Cache.CommandTest + use Nebulex.Adapters.Redis.QueryableTest + use Nebulex.Adapters.Redis.InfoTest + use Nebulex.Adapters.Redis.CommandErrorTest + use Nebulex.Adapters.Redis.RedixConnTest end end end diff --git a/test/support/test_cache.ex b/test/support/test_cache.ex index 6e2cadf..5435215 100644 --- a/test/support/test_cache.ex +++ b/test/support/test_cache.ex @@ -1,50 +1,57 @@ -defmodule NebulexRedisAdapter.TestCache do +defmodule Nebulex.Adapters.Redis.TestCache do @moduledoc false + defmodule Common do + @moduledoc false + + defmacro __using__(_opts) do + quote do + def get_and_update_fun(nil), do: {nil, 1} + def get_and_update_fun(current) when is_integer(current), do: {current, current * 2} + + def get_and_update_bad_fun(_), do: :other + end + end + end + defmodule Standalone do @moduledoc false use Nebulex.Cache, otp_app: :nebulex_redis_adapter, - adapter: NebulexRedisAdapter + adapter: Nebulex.Adapters.Redis + + use Nebulex.Adapters.Redis.TestCache.Common end - defmodule ClientCluster do + defmodule RedisCluster do @moduledoc false use Nebulex.Cache, otp_app: :nebulex_redis_adapter, - adapter: NebulexRedisAdapter + adapter: Nebulex.Adapters.Redis + + use Nebulex.Adapters.Redis.TestCache.Common end - defmodule RedisCluster do + defmodule ClientSideCluster do @moduledoc false use Nebulex.Cache, otp_app: :nebulex_redis_adapter, - adapter: NebulexRedisAdapter + adapter: Nebulex.Adapters.Redis + + use Nebulex.Adapters.Redis.TestCache.Common end defmodule RedisClusterConnError do @moduledoc false use Nebulex.Cache, otp_app: :nebulex_redis_adapter, - adapter: NebulexRedisAdapter + adapter: Nebulex.Adapters.Redis end defmodule RedisClusterWithKeyslot do @moduledoc false use Nebulex.Cache, otp_app: :nebulex_redis_adapter, - adapter: NebulexRedisAdapter - end - - defmodule Keyslot do - @moduledoc false - use Nebulex.Adapter.Keyslot - - @impl true - def hash_slot(key, range \\ 16_384) do - key - |> :erlang.phash2() - |> :jchash.compute(range) - end + adapter: Nebulex.Adapters.Redis end end diff --git a/test/test_helper.exs b/test/test_helper.exs index 0464b22..1a360d6 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -28,9 +28,9 @@ end # Mocks [ Redix, - NebulexRedisAdapter.Pool, - NebulexRedisAdapter.RedisCluster, - NebulexRedisAdapter.RedisCluster.Keyslot + Nebulex.Adapters.Redis.Cluster, + Nebulex.Adapters.Redis.Cluster.Keyslot, + Nebulex.Adapters.Redis.Pool ] |> Enum.each(&Mimic.copy/1)