diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index 24333d56..cea18636 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -43,28 +43,18 @@ export default defineConfig({ ] }, { - text: 'function', + text: 'Function', items: [ { text: '什么是 function', link: '/zh/function/introduction' }, { text: '最佳实践', link: '/zh/function/best_practices' } ] }, { - text: 'others', + text: 'Others', items: [ - { text: 'redis modle 支持', link: '/zh/others/module-supported' }, + { text: 'Redis Modules', link: '/zh/others/modules' }, ] }, - // { - // text: '进阶用法', - // items: [ - // { text: '监控', link: '/zh/function/best_practices' }, - // { text: '双向同步', link: '/zh/function/best_practices' }, - // { text: '容器部署', link: '/zh/function/best_practices' }, - // { text: '主从实例向集群实例迁移', link: '/zh/function/best_practices' }, - // { text: '大 key 重写', link: '/zh/function/best_practices' }, - // ] - // } ], footer: { message: 'Released under the MIT License.', @@ -74,8 +64,59 @@ export default defineConfig({ }, en: { label: 'English', - lang: 'en', + lang: 'en', // optional, will be added as `lang` attribute on `html` tag + themeConfig: { + // https://vitepress.dev/reference/default-theme-config + nav: [ + { text: 'Home', link: '/en/' }, + { text: 'User Guide', link: '/en/guide/getting-started' }, + { text: 'Tair', link: 'https://www.alibabacloud.com/product/tair' } + ], + sidebar: [ + { + text: 'Introduction', + items: [ + { text: 'What is RedisShake', link: '/en/guide/introduction' }, + { text: 'Getting Started', link: '/en/guide/getting-started' }, + { text: 'Configuration', link: '/en/guide/config' }, + { text: 'Migration Mode Selection', link: '/en/guide/mode' }, + ] + }, + { + text: 'Reader', + items: [ + { text: 'Sync Reader', link: '/en/reader/sync_reader' }, + { text: 'Scan Reader', link: '/en/reader/scan_reader' }, + { text: 'RDB Reader', link: '/en/reader/rdb_reader' }, + ] + }, + { + text: 'Writer', + items: [ + { text: 'Redis Writer', link: '/en/writer/redis_writer' }, + ] + }, + { + text: 'Function', + items: [ + { text: 'What is function', link: '/en/function/introduction' }, + { text: 'Best Practices', link: '/en/function/best_practices' } + ] + }, + { + text: 'Others', + items: [ + { text: 'Redis Modules', link: '/en/others/modules' }, + ] + }, + ], + footer: { + message: 'Released under the MIT License.', + copyright: 'Copyright © 2019-present Tair' + } + } }, + }, themeConfig: { socialLinks: [ diff --git a/docs/src/en/api-examples.md b/docs/src/en/api-examples.md deleted file mode 100644 index 6bd8bb5c..00000000 --- a/docs/src/en/api-examples.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -outline: deep ---- - -# Runtime API Examples - -This page demonstrates usage of some of the runtime APIs provided by VitePress. - -The main `useData()` API can be used to access site, theme, and page data for the current page. It works in both `.md` and `.vue` files: - -```md - - -## Results - -### Theme Data -
{{ theme }}- -### Page Data -
{{ page }}- -### Page Frontmatter -
{{ frontmatter }}-``` - - - -## Results - -### Theme Data -
{{ theme }}- -### Page Data -
{{ page }}- -### Page Frontmatter -
{{ frontmatter }}- -## More - -Check out the documentation for the [full list of runtime APIs](https://vitepress.dev/reference/runtime-api#usedata). diff --git a/docs/src/en/function/best_practices.md b/docs/src/en/function/best_practices.md new file mode 100644 index 00000000..f663956a --- /dev/null +++ b/docs/src/en/function/best_practices.md @@ -0,0 +1,99 @@ +--- +outline: deep +--- + +# 最佳实践 + +## 过滤 + +### 过滤 Key + +```lua +local prefix = "user:" +local prefix_len = #prefix + +if string.sub(KEYS[1], 1, prefix_len) ~= prefix then + return +end + +shake.call(DB, ARGV) +``` + +效果是只将 key 以 `user:` 开头的源数据写入到目标端。没有考虑 `mset` 等多 key 命令的情况。 + +### 过滤 DB + +```lua +shake.log(DB) +if DB == 0 +then + return +end +shake.call(DB, ARGV) +``` + +效果是丢弃源端 `db` 0 的数据,将其他 `db` 的数据写入到目标端。 + + +### 过滤某类数据结构 + +可以通过 `GROUP` 变量来判断数据结构类型,支持的数据结构类型有:`STRING`、`LIST`、`SET`、`ZSET`、`HASH`、`SCRIPTING` 等。 + +#### 过滤 Hash 类型数据 +```lua +if GROUP == "HASH" then + return +end +shake.call(DB, ARGV) +``` + +效果是丢弃源端的 `hash` 类型数据,将其他数据写入到目标端。 + +#### 过滤 [LUA 脚本](https://redis.io/docs/interact/programmability/eval-intro/) + +```lua +if GROUP == "SCRIPTING" then + return +end +shake.call(DB, ARGV) +``` + +效果是丢弃源端的 `lua` 脚本,将其他数据写入到目标端。常见于主从同步至集群时,存在集群不支持的 LUA 脚本。 + +## 修改 + +### 修改 Key 的前缀 + +```lua +local prefix_old = "prefix_old_" +local prefix_new = "prefix_new_" + +shake.log("old=" .. table.concat(ARGV, " ")) + +for i, index in ipairs(KEY_INDEXES) do + local key = ARGV[index] + if string.sub(key, 1, #prefix_old) == prefix_old then + ARGV[index] = prefix_new .. string.sub(key, #prefix_old + 1) + end +end + +shake.log("new=" .. table.concat(ARGV, " ")) +shake.call(DB, ARGV) +``` +效果是将源端的 key `prefix_old_key` 写入到目标端的 key `prefix_new_key`。 + +### 交换 DB + +```lua +local db1 = 1 +local db2 = 2 + +if DB == db1 then + DB = db2 +elseif DB == db2 then + DB = db1 +end +shake.call(DB, ARGV) +``` + +效果是将源端的 `db 1` 写入到目标端的 `db 2`,将源端的 `db 2` 写入到目标端的 `db 1`, 其他 `db` 不变。 \ No newline at end of file diff --git a/docs/src/en/function/introduction.md b/docs/src/en/function/introduction.md new file mode 100644 index 00000000..579d0eec --- /dev/null +++ b/docs/src/en/function/introduction.md @@ -0,0 +1,56 @@ +--- +outline: deep +--- + +# 什么是 function + +RedisShake 通过提供 function 功能,实现了的 [ETL(提取-转换-加载)](https://en.wikipedia.org/wiki/Extract,_transform,_load) 中的 `transform` 能力。通过利用 function 可以实现类似功能: +* 更改数据所属的 `db`,比如将源端的 `db 0` 写入到目的端的 `db 1`。 +* 对数据进行筛选,例如,只将 key 以 `user:` 开头的源数据写入到目标端。 +* 改变 Key 的前缀,例如,将源端的 key `prefix_old_key` 写入到目标端的 key `prefix_new_key`。 +* ... + +要使用 function 功能,只需编写一份 lua 脚本。RedisShake 在从源端获取数据后,会将数据转换为 Redis 命令。然后,它会处理这些命令,从中解析出 `KEYS`、`ARGV`、`SLOTS`、`GROUP` 等信息,并将这些信息传递给 lua 脚本。lua 脚本会处理这些数据,并返回处理后的命令。最后,RedisShake 会将处理后的数据写入到目标端。 + +以下是一个具体的例子: +```toml +function = """ +shake.log(DB) +if DB == 0 +then + return +end +shake.call(DB, ARGV) +""" + +[sync_reader] +address = "127.0.0.1:6379" + +[redis_writer] +address = "127.0.0.1:6380" +``` +`DB` 是 RedisShake 提供的信息,表示当前数据所属的 db。`shake.log` 用于打印日志,`shake.call` 用于调用 Redis 命令。上述脚本的目的是丢弃源端 `db` 0 的数据,将其他 `db` 的数据写入到目标端。 + +除了 `DB`,还有其他信息如 `KEYS`、`ARGV`、`SLOTS`、`GROUP` 等,可供调用的函数有 `shake.log` 和 `shake.call`,具体请参考 [function API](#function-api)。 + +关于更多的示例,可以参考 [最佳实践](./best_practices.md)。 + +## function API + +### 变量 + +因为有些命令中含有多个 key,比如 `mset` 等命令。所以,`KEYS`、`KEY_INDEXES`、`SLOTS` 这三个变量都是数组类型。如果确认命令只有一个 key,可以直接使用 `KEYS[1]`、`KEY_INDEXES[1]`、`SLOTS[1]`。 + +| 变量 | 类型 | 示例 | 描述 | +|-|-|-|-----| +| DB | number | 1 | 命令所属的 `db` | +| GROUP | string | "LIST" | 命令所属的 `group`,符合 [Command key specifications](https://redis.io/docs/reference/key-specs/),可以在 [commands](https://github.com/tair-opensource/RedisShake/tree/v4/scripts/commands) 中查询每个命令的 `group` 字段 | +| CMD | string | "XGROUP-DELCONSUMER" | 命令的名称 | +| KEYS | table | \{"key1", "key2"\} | 命令的所有 Key | +| KEY_INDEXES | table | \{2, 4\} | 命令的所有 Key 在 `ARGV` 中的索引 | +| SLOTS | table | \{9189, 4998\} | 当前命令的所有 Key 所属的 [slot](https://redis.io/docs/reference/cluster-spec/#key-distribution-model) | +| ARGV | table | \{"mset", "key1", "value1", "key2", "value2"\} | 命令的所有参数 | + +### 函数 +* `shake.call(DB, ARGV)`:返回一个 Redis 命令,RedisShake 会将该命令写入目标端。 +* `shake.log(msg)`:打印日志。 diff --git a/docs/src/en/guide/config.md b/docs/src/en/guide/config.md new file mode 100644 index 00000000..ffa19e1f --- /dev/null +++ b/docs/src/en/guide/config.md @@ -0,0 +1,82 @@ +--- +outline: deep +--- + +# Configuration File + +RedisShake uses the [TOML](https://toml.io/cn/) language for writing, and all configuration parameters are explained in all.toml. + +The configuration file is composed as follows: + +```toml +function = "..." + +[xxx_reader] +... + +[xxx_writer] +... + +[advanced] +... +``` + +Under normal usage, you only need to write the `xxx_reader` and `xxx_writer` parts. The `function` and `advanced` parts are for advanced usage, and users can configure them according to their needs. + +## function Configuration + +Refer to [What is function](../function/introduction.md). + +## reader Configuration + +RedisShake provides different Readers to interface with different sources, see the Reader section for configuration details: + +* [Sync Reader](../reader/sync_reader.md) +* [Scan Reader](../reader/scan_reader.md) +* [RDB Reader](../reader/rdb_reader.md) + +## writer Configuration + +RedisShake provides different Writers to interface with different targets, see the Writer section for configuration details: + +* [Redis Writer](../writer/redis_writer.md) + +## advanced Configuration + +```toml +[advanced] +dir = "data" +ncpu = 3 # runtime.GOMAXPROCS, 0 means use runtime.NumCPU() cpu cores + +pprof_port = 0 # pprof port, 0 means disable +status_port = 0 # status port, 0 means disable + +# log +log_file = "shake.log" +log_level = "info" # debug, info or warn +log_interval = 5 # in seconds + +# redis-shake gets key and value from rdb file, and uses RESTORE command to +# create the key in target redis. Redis RESTORE will return a "Target key name +# is busy" error when key already exists. You can use this configuration item +# to change the default behavior of restore: +# panic: redis-shake will stop when meet "Target key name is busy" error. +# rewrite: redis-shake will replace the key with new value. +# ignore: redis-shake will skip restore the key when meet "Target key name is busy" error. +rdb_restore_command_behavior = "rewrite" # panic, rewrite or skip + +# redis-shake uses pipeline to improve sending performance. +# This item limits the maximum number of commands in a pipeline. +pipeline_count_limit = 1024 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default. This amount is normally 1gb. +target_redis_client_max_querybuf_len = 1024_000_000 + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. +target_redis_proto_max_bulk_len = 512_000_000 + +# If the source is Elasticache or MemoryDB, you can set this item. +aws_psync = "" +``` \ No newline at end of file diff --git a/docs/src/en/guide/getting-started.md b/docs/src/en/guide/getting-started.md new file mode 100644 index 00000000..26e95291 --- /dev/null +++ b/docs/src/en/guide/getting-started.md @@ -0,0 +1,45 @@ +# Quick Start + +## Installation + +### Download Binary Package + +Directly download the binary package from [Release](https://github.com/tair-opensource/RedisShake/releases). + +### Compile from Source Code + +To compile from the source code, make sure you have set up the Golang environment on your local machine: + +```shell +git clone https://github.com/alibaba/RedisShake +cd RedisShake +sh build.sh +``` + +## Usage + +Assume you have two Redis instances: + +* Instance A: 127.0.0.1:6379 +* Instance B: 127.0.0.1:6380 + +Create a new configuration file `shake.toml`: + +```toml +[sync_reader] +address = "127.0.0.1:6379" + +[redis_writer] +address = "127.0.0.1:6380" +``` + +To start RedisShake, run the following command: + +```shell +./redis-shake shake.toml +``` + +## Precautions + +1. Do not run two RedisShake processes in the same directory, as the temporary files generated during runtime may be overwritten, leading to abnormal behavior. +2. Do not downgrade the Redis version, such as from 6.0 to 5.0, because each major version of RedisShake introduces some new commands and encoding methods. If the version is lowered, it may lead to incompatibility. \ No newline at end of file diff --git a/docs/src/en/guide/image.png b/docs/src/en/guide/image.png new file mode 100644 index 00000000..725251fa Binary files /dev/null and b/docs/src/en/guide/image.png differ diff --git a/docs/src/en/guide/introduction.md b/docs/src/en/guide/introduction.md new file mode 100644 index 00000000..40e13675 --- /dev/null +++ b/docs/src/en/guide/introduction.md @@ -0,0 +1,39 @@ +--- +outline: deep +--- + +# What is RedisShake + +RedisShake is a tool for processing and migrating Redis data, offering the following features: + +1. **Redis Compatibility**: RedisShake is compatible with Redis versions from 2.8 to 7.2 and supports various deployment methods, including standalone, master-slave, sentinel, and cluster. +2. **Cloud Service Compatibility**: RedisShake seamlessly works with popular Redis-like databases provided by mainstream cloud service providers, including but not limited to: + - [Alibaba Cloud - ApsaraDB for Redis](https://www.alibabacloud.com/product/apsaradb-for-redis) + - [Alibaba Cloud - Tair](https://www.alibabacloud.com/product/tair) + - [AWS - ElastiCache](https://aws.amazon.com/elasticache/) + - [AWS - MemoryDB](https://aws.amazon.com/memorydb/) +3. **Module Compatibility**: RedisShake is compatible with [TairString](https://github.com/tair-opensource/TairString), [TairZSet](https://github.com/tair-opensource/TairZset), and [TairHash](https://github.com/tair-opensource/TairHash) modules. +4. **Various Export Modes**: RedisShake supports PSync, RDB, and Scan export modes. +5. **Data Processing**: RedisShake implements data filtering and transformation through custom scripts. + +## Contributions + +We welcome contributions from the community. For significant changes, please open an issue first to discuss what you would like to change. We are particularly interested in: + +1. Adding support for more modules +2. Enhancing the support for Readers and Writers +3. Sharing your Lua scripts and best practices + +## History + +RedisShake is a project actively maintained by the Alibaba Cloud [Tair Team](https://github.com/tair-opensource). Its evolution can be traced back to its initial version, which was branched out from [redis-port](https://github.com/CodisLabs/redis-port). + +Versions (configurations are not interchangeable between different versions): + +- The [RedisShake 2.x](https://github.com/tair-opensource/RedisShake/tree/v2) version brought a series of improvements and updates, enhancing its overall stability and performance. +- The [RedisShake 3.x](https://github.com/tair-opensource/RedisShake/tree/v3) version was a significant milestone, with the entire codebase being completely rewritten and optimized for better efficiency and availability. +- The [RedisShake 4.x](https://github.com/tair-opensource/RedisShake/tree/v4) version further enhanced features related to the [Reader](../reader/scan_reader.md), configuration, observability, and [function](../function/introduction.md). + +## License + +RedisShake is open-source under the [MIT License](https://github.com/tair-opensource/RedisShake/blob/v2/license.txt). \ No newline at end of file diff --git a/docs/src/en/guide/mode.md b/docs/src/en/guide/mode.md new file mode 100644 index 00000000..a404e422 --- /dev/null +++ b/docs/src/en/guide/mode.md @@ -0,0 +1,53 @@ +--- +outline: deep +--- + +# Migration Mode Selection + +## Overview + +Currently, RedisShake has three migration modes: `PSync`, `RDB`, and `SCAN`, corresponding to [`sync_reader`](../reader/sync_reader.md), [`rdb_reader`](../reader/rdb_reader.md), and [`scan_reader`](../reader/scan_reader.md) respectively. + +* For scenarios of recovering data from backups, you can use `rdb_reader`. +* For data migration scenarios, `sync_reader` should be the preferred choice. Some cloud vendors do not provide support for the PSync protocol, in which case `scan_reader` can be chosen. +* For long-term data synchronization scenarios, RedisShake currently cannot handle them because the PSync protocol is not reliable. When the replication connection is disconnected, RedisShake will not be able to reconnect to the source database. If the demand for availability is not high, you can use `scan_reader`. If the write volume is not large and there are no large keys, `scan_reader` can also be considered. + +Different modes have their pros and cons, and you need to check each Reader section for more information. + +## Redis Cluster Architecture + +When the source Redis is deployed in a cluster architecture, you can use `sync_reader` or `scan_reader`. Both have switches in their configuration items to enable cluster mode, which will automatically obtain all nodes in the cluster through the `cluster nodes` command and establish connections. + +## Redis Sentinel Architecture + +When the source Redis is deployed in a sentinel architecture and RedisShake uses `sync_reader` to connect to the master, it will be treated as a slave by the master and may be elected as the new master by the sentinel. + +To avoid this, you should choose a replica as the source. + +## Cloud Redis Service + +Mainstream cloud vendors all provide Redis services, but there are several reasons that make using RedisShake on these services more complex: +1. Engine restrictions. Some self-developed Redis-like databases do not support the PSync protocol. +2. Architecture restrictions. Many cloud vendors support proxy mode, i.e., adding a Proxy component between the user and the Redis service. Because of the existence of the Proxy component, the PSync protocol cannot be supported. +3. Security restrictions. In native Redis, the PSync protocol will basically trigger fork(2), leading to memory bloat and increased user request latency. In worse cases, it may even lead to out of memory. Although there are solutions to alleviate these issues, not all cloud vendors have invested in this area. +4. Business strategies. Many users use RedisShake to migrate off the cloud or switch clouds, so some cloud vendors do not want users to use RedisShake, thus blocking the PSync protocol. + +The following will introduce some RedisShake usage schemes in special scenarios based on practical experience. + +### Alibaba Cloud Redis & Tair + +Alibaba Cloud Redis and Tair both support the PSync protocol, and `sync_reader` is recommended. Users need to create an account with replication permissions. RedisShake can use this account for data synchronization. The specific creation steps can be found in [Create and manage database accounts](https://help.aliyun.com/zh/redis/user-guide/create-and-manage-database-accounts). + +Exceptions: +1. Version 2.8 Redis instances do not support the creation of accounts with replication permissions. You need to [upgrade to a major version](https://help.aliyun.com/zh/redis/user-guide/upgrade-the-major-version-1). +2. Cluster architecture Redis and Tair instances do not support the PSync protocol under [proxy mode](https://help.aliyun.com/zh/redis/product-overview/cluster-master-replica-instances#section-h69-izd-531). +3. Read-write separation architecture does not support the PSync protocol. + +In scenarios where the PSync protocol is not supported, `scan_reader` can be used. It should be noted that `scan_reader` will put significant pressure on the source database. + +### AWS ElastiCache and MemoryDB + +`sync_reader` is preferred. AWS ElastiCache and MemoryDB do not enable the PSync protocol by default, but you can request to enable the PSync protocol by submitting a ticket. AWS will provide a renamed PSync command in the ticket, such as `xhma21yfkssync` and `nmfu2bl5osync`. This command has the same effect as the `psync` command, just with a different name. +Users only need to modify the `aws_psync` configuration item in the RedisShake configuration file. For a single instance, write one pair of `ip:port@cmd`. For cluster instances, write all `ip:port@cmd`, separated by commas. + +When it is inconvenient to submit a ticket, you can use `scan_reader`. It should be noted that `scan_reader` will put significant pressure on the source database. diff --git a/docs/src/en/index.md b/docs/src/en/index.md index 9f731cf8..cff1513b 100644 --- a/docs/src/en/index.md +++ b/docs/src/en/index.md @@ -4,22 +4,21 @@ layout: home hero: name: "RedisShake" - text: "Data Transform And Data Migration For Redis-like Database" - tagline: RedisShake is a tool for transform and migrating Redis data. + # text: "用于 Redis-like 数据库的数据迁移与处理服务" + tagline: 用于 Redis-like 数据库的数据迁移与处理服务 actions: - theme: brand - text: Markdown Examples - link: /markdown-examples + text: Get Started + link: /en/guide/getting-started - theme: alt - text: API Examples - link: /api-examples - + text: What is RedisShake + link: /en/guide/introduction features: - - title: Feature A - details: Lorem ipsum dolor sit amet, consectetur adipiscing elit - - title: Feature B - details: Lorem ipsum dolor sit amet, consectetur adipiscing elit - - title: Feature C - details: Lorem ipsum dolor sit amet, consectetur adipiscing elit + - title: Data Migration + details: Supports sync, scan, and restore modes for data migration + - title: Data Processing + details: Supports data filtering and modification using lua scripts + - title: Compatibility + details: Compatible with various Redis deployment forms and mainstream cloud vendor's Redis-like databases --- diff --git a/docs/src/zh/others/module-supported.md b/docs/src/en/others/modules.md similarity index 52% rename from docs/src/zh/others/module-supported.md rename to docs/src/en/others/modules.md index 3ca89bbb..6309515c 100644 --- a/docs/src/zh/others/module-supported.md +++ b/docs/src/en/others/modules.md @@ -2,14 +2,23 @@ outline: deep --- +# Redis Modules -# 介绍 -可以为 RedisShake 贡献代码支持其它自定义 module 类型 +Redis Modules 是 Redis 4.0 版本引入的一个新特性,它允许开发者扩展 Redis 的功能。通过创建模块,开发者可以定义新的命令,数据类型,甚至改变 Redis 的行为。因此,Redis Modules 可以极大地增强 Redis 的灵活性和可扩展性。 +由于 Redis Modules 可以定义新的数据类型和命令,RedisShake 需要对这些新的数据类型和命令进行专门的处理,才能正确地迁移或同步这些数据。否则,如果 RedisShake 不理解这些新的数据类型和命令,它可能会无法正确地处理这些数据,或者在处理过程中出错。因此,对于使用了 Redis Modules 的 Redis 实例,一般需要 RedisShake 为其使用的 Module 提供相应的适配器,以便正确地处理这些自定义的数据类型和命令。 -# 核心流程 -相关代码在`internal\rdb`目录下,如需要支持其它 redis module 类型,可分解为以下三个步骤—— +## 已支持的 Redis Modules 列表 +- [TairHash](https://github.com/tair-opensource/TairHash):支持 field 级别设置过期和版本的 Hash 数据结构。 +- [TairString](https://github.com/tair-opensource/TairString):支持版本的 String 结构,可以实现分布式锁/乐观锁。 +- [TairZset](https://github.com/tair-opensource/TairZset):支持最多 256 维的 double 排序,可以实现多维排行榜。 + +## 如何支持新的 Redis Modules + +### 核心流程 + +相关代码在`internal\rdb`目录下,如需要支持其它 Redis Modules 类型,可分解为以下三个步骤: - 从rdb文件中正确读入 - RedisShake 中已经 对 redis module 自定的几种类型进行了封装,从 rdb 文件进行读取时,可直接借助于已经封装好的函数进行读取(`internal\rdb\structure\module2_struct.go`) - 构建一个合适的中间数据结构类型,用于存储相应数据(key + value) @@ -21,20 +30,14 @@ outline: deep ![module-supported.jpg](/public/module-supported.jpg) - -# 其它 -## 补充命令测试 +### 补充命令测试 为了确保正常,需要在` tests\helpers\commands` 里面添加对应 module 的命令,来测试相关命令可以在 rdb、sync、scan 三个模式下工作正常。测试框架具体见[pybbt](https://pypi.org/project/pybbt/),具体思想——借助于redis-py 包,对其进行封装,模拟客户端发送命令,然后比对实际的返回值与已有的返回值。 -## 补充命令列表 +### 补充命令列表 RedisShake 在针对大 key 进行传输时,会查命令表格`RedisShake\internal\commands\table.go`,检查命令的合规性,因此在添加新 module 时,需要将对应的命令加入表格,具体可参照`RedisShake\scripts`部分代码 -## 补充 ci -在 ci 测试中,需要添加对自定义 module 的编译,具体可见` ci.yml` 内容 +### 补充 ci +在 ci 测试中,需要添加对自定义 module 的编译,具体可见` ci.yml` 内容 -# 已支持的 redis module 列表 -- [TairHash](https://github.com/tair-opensource/TairHash) -- [TairString](https://github.com/tair-opensource/TairString) -- [TairZset](https://github.com/tair-opensource/TairZset) diff --git a/docs/src/en/reader/rdb_reader.md b/docs/src/en/reader/rdb_reader.md new file mode 100644 index 00000000..e2db7402 --- /dev/null +++ b/docs/src/en/reader/rdb_reader.md @@ -0,0 +1,14 @@ +# rdb_reader + +## 介绍 + +可以使用 `rdb_reader` 来从 RDB 文件中读取数据,然后写入目标端。常见于从备份文件中恢复数据。 + +## 配置 + +```toml +[rdb_reader] +filepath = "/tmp/dump.rdb" +``` + +* 应传入绝对路径。 diff --git a/docs/src/en/reader/scan_reader.md b/docs/src/en/reader/scan_reader.md new file mode 100644 index 00000000..ea61718e --- /dev/null +++ b/docs/src/en/reader/scan_reader.md @@ -0,0 +1,41 @@ +# Scan Reader + +## 介绍 + +::: tip +本方案为次选方案,当可以使用 [`sync_reader`](sync_reader.md) 时,请优选 [`sync_reader`](sync_reader.md)。 +::: + +`scan_reader` 通过 `SCAN` 命令遍历源端数据库中的所有 Key,并使用 `DUMP` 与 `RESTORE` 命令来读取与写入 Key 的内容。 + +注意: +1. Redis 的 `SCAN` 命令只保证 `SCAN` 的开始与结束之前均存在的 Key 一定会被返回,但是新写入的 Key 有可能会被遗漏,期间删除的 Key 也可能已经被写入目的端。可以通过 `ksn` 配置解决 +2. `SCAN` 命令与 `DUMP` 命令会占用源端数据库较多的 CPU 资源。 + + + +## 配置 + +```toml +[scan_reader] +cluster = false # set to true if source is a redis cluster +address = "127.0.0.1:6379" # when cluster is true, set address to one of the cluster node +username = "" # keep empty if not using ACL +password = "" # keep empty if no authentication is required +tls = false +ksn = false # set to true to enabled Redis keyspace notifications (KSN) subscription +``` + +* `cluster`:源端是否为集群 +* `address`:源端地址, 当源端为集群时,`address` 为集群中的任意一个节点即可 +* 鉴权: + * 当源端使用 ACL 账号时,配置 `username` 和 `password` + * 当源端使用传统账号时,仅配置 `password` + * 当源端无鉴权时,不配置 `username` 和 `password` +* `tls`:源端是否开启 TLS/SSL,不需要配置证书因为 RedisShake 没有校验服务器证书 +* `ksn`:开启 `ksn` 参数后 RedisShake 会在 `SCAN` 之前使用 [Redis keyspace notifications](https://redis.io/docs/manual/keyspace-notifications/) +能力来订阅 Key 的变化。当 Key 发生变化时,RedisShake 会使用 `DUMP` 与 `RESTORE` 命令来从源端读取 Key 的内容,并写入目标端。 + +::: warning +Redis keyspace notifications 不会感知到 `FLUSHALL` 与 `FLUSHDB` 命令,因此在使用 `ksn` 参数时,需要确保源端数据库不会执行这两个命令。 +::: diff --git a/docs/src/en/reader/sync_reader.md b/docs/src/en/reader/sync_reader.md new file mode 100644 index 00000000..7aaf82e2 --- /dev/null +++ b/docs/src/en/reader/sync_reader.md @@ -0,0 +1,37 @@ +# Sync Reader + +## Introduction + +When the source database is compatible with the PSync protocol, `sync_reader` is recommended. Databases compatible with the PSync protocol include: + +* Redis +* Tair +* ElastiCache (partially compatible) +* MemoryDB (partially compatible) + +Advantages: Best data consistency, minimal impact on the source database, and allows for seamless switching. + +Principle: RedisShake simulates a Slave connecting to the Master node, and the Master will send data to RedisShake, which includes both full and incremental parts. The full data is an RDB file, and the incremental data is an AOF data stream. RedisShake will accept both full and incremental data and temporarily store them on the hard disk. During the full synchronization phase, RedisShake first parses the RDB file into individual Redis commands, then sends these commands to the destination. During the incremental synchronization phase, RedisShake continues to synchronize the AOF data stream to the destination. + +## Configuration + +```toml +[sync_reader] +cluster = false # set to true if source is a redis cluster +address = "127.0.0.1:6379" # when cluster is true, set address to one of the cluster node +username = "" # keep empty if not using ACL +password = "" # keep empty if no authentication is required +tls = false +sync_rdb = true # set to false if you don't want to sync rdb +sync_aof = true # set to false if you don't want to sync aof +``` + +* `cluster`: Whether the source is a cluster +* `address`: Source address, when the source is a cluster, `address` can be set to any node in the cluster +* Authentication: + * When the source uses ACL accounts, configure `username` and `password` + * When the source uses traditional accounts, only configure `password` + * When the source does not require authentication, do not configure `username` and `password` +* `tls`: Whether the source has enabled TLS/SSL, no need to configure a certificate because RedisShake does not verify the server certificate +* `sync_rdb`: Whether to synchronize RDB, when set to false, RedisShake will skip the full synchronization phase +* `sync_aof`: Whether to synchronize AOF, when set to false, RedisShake will skip the incremental synchronization phase, at which point RedisShake will exit after the full synchronization phase is complete. \ No newline at end of file diff --git a/docs/src/en/writer/redis_writer.md b/docs/src/en/writer/redis_writer.md new file mode 100644 index 00000000..80e870e4 --- /dev/null +++ b/docs/src/en/writer/redis_writer.md @@ -0,0 +1,28 @@ +# Redis Writer + +## 介绍 + +`redis_writer` 用于将数据写入 Redis-like 数据库。 + +## 配置 + +```toml +[redis_writer] +cluster = false +address = "127.0.0.1:6379" # when cluster is true, address is one of the cluster node +username = "" # keep empty if not using ACL +password = "" # keep empty if no authentication is required +tls = false +``` + +* `cluster`:是否为集群。 +* `address`:连接地址。当目的端为集群时,`address` 填写集群中的任意一个节点即可 +* 鉴权: + * 当使用 ACL 账号体系时,配置 `username` 和 `password` + * 当使用传统账号体系时,仅配置 `password` + * 当无鉴权时,不配置 `username` 和 `password` +* `tls`:是否开启 TLS/SSL,不需要配置证书因为 RedisShake 没有校验服务器证书 + +注意事项: +1. 当目的端为集群时,应保证源端发过来的命令满足 [Key 的哈希值属于同一个 slot](https://redis.io/docs/reference/cluster-spec/#implemented-subset)。 +2. 应尽量保证目的端版本大于等于源端版本,否则可能会出现不支持的命令。如确实需要降低版本,可以设置 `target_redis_proto_max_bulk_len` 为 0,来避免使用 `restore` 命令恢复数据。 diff --git a/docs/src/zh/function/best_practices.md b/docs/src/zh/function/best_practices.md index 9be902f2..f663956a 100644 --- a/docs/src/zh/function/best_practices.md +++ b/docs/src/zh/function/best_practices.md @@ -4,8 +4,96 @@ outline: deep # 最佳实践 +## 过滤 + +### 过滤 Key + +```lua +local prefix = "user:" +local prefix_len = #prefix + +if string.sub(KEYS[1], 1, prefix_len) ~= prefix then + return +end + +shake.call(DB, ARGV) +``` + +效果是只将 key 以 `user:` 开头的源数据写入到目标端。没有考虑 `mset` 等多 key 命令的情况。 + +### 过滤 DB + +```lua +shake.log(DB) +if DB == 0 +then + return +end +shake.call(DB, ARGV) +``` + +效果是丢弃源端 `db` 0 的数据,将其他 `db` 的数据写入到目标端。 + + +### 过滤某类数据结构 + +可以通过 `GROUP` 变量来判断数据结构类型,支持的数据结构类型有:`STRING`、`LIST`、`SET`、`ZSET`、`HASH`、`SCRIPTING` 等。 + +#### 过滤 Hash 类型数据 +```lua +if GROUP == "HASH" then + return +end +shake.call(DB, ARGV) +``` + +效果是丢弃源端的 `hash` 类型数据,将其他数据写入到目标端。 + +#### 过滤 [LUA 脚本](https://redis.io/docs/interact/programmability/eval-intro/) + +```lua +if GROUP == "SCRIPTING" then + return +end +shake.call(DB, ARGV) +``` + +效果是丢弃源端的 `lua` 脚本,将其他数据写入到目标端。常见于主从同步至集群时,存在集群不支持的 LUA 脚本。 + ## 修改 ### 修改 Key 的前缀 -TODO +```lua +local prefix_old = "prefix_old_" +local prefix_new = "prefix_new_" + +shake.log("old=" .. table.concat(ARGV, " ")) + +for i, index in ipairs(KEY_INDEXES) do + local key = ARGV[index] + if string.sub(key, 1, #prefix_old) == prefix_old then + ARGV[index] = prefix_new .. string.sub(key, #prefix_old + 1) + end +end + +shake.log("new=" .. table.concat(ARGV, " ")) +shake.call(DB, ARGV) +``` +效果是将源端的 key `prefix_old_key` 写入到目标端的 key `prefix_new_key`。 + +### 交换 DB + +```lua +local db1 = 1 +local db2 = 2 + +if DB == db1 then + DB = db2 +elseif DB == db2 then + DB = db1 +end +shake.call(DB, ARGV) +``` + +效果是将源端的 `db 1` 写入到目标端的 `db 2`,将源端的 `db 2` 写入到目标端的 `db 1`, 其他 `db` 不变。 \ No newline at end of file diff --git a/docs/src/zh/function/introduction.md b/docs/src/zh/function/introduction.md index be055b76..579d0eec 100644 --- a/docs/src/zh/function/introduction.md +++ b/docs/src/zh/function/introduction.md @@ -38,6 +38,9 @@ address = "127.0.0.1:6380" ## function API ### 变量 + +因为有些命令中含有多个 key,比如 `mset` 等命令。所以,`KEYS`、`KEY_INDEXES`、`SLOTS` 这三个变量都是数组类型。如果确认命令只有一个 key,可以直接使用 `KEYS[1]`、`KEY_INDEXES[1]`、`SLOTS[1]`。 + | 变量 | 类型 | 示例 | 描述 | |-|-|-|-----| | DB | number | 1 | 命令所属的 `db` | diff --git a/docs/src/zh/guide/getting-started.md b/docs/src/zh/guide/getting-started.md index 1df9f343..d640771f 100644 --- a/docs/src/zh/guide/getting-started.md +++ b/docs/src/zh/guide/getting-started.md @@ -38,3 +38,8 @@ address = "127.0.0.1:6380" ```shell ./redis-shake shake.toml ``` + +## 注意事项 + +1. 不要在同一个目录运行两个 RedisShake 进程,因为运行时产生的临时文件可能会被覆盖,导致异常行为。 +2. 不要降低 Redis 版本,比如从 6.0 降到 5.0,因为 RedisShake 每个大版本都会引入一些新的命令和新的编码方式,如果降低版本,可能会导致不兼容。 diff --git a/docs/src/zh/markdown-examples.md b/docs/src/zh/markdown-examples.md deleted file mode 100644 index 8e55eb8a..00000000 --- a/docs/src/zh/markdown-examples.md +++ /dev/null @@ -1,85 +0,0 @@ -# Markdown Extension Examples - -This page demonstrates some of the built-in markdown extensions provided by VitePress. - -## Syntax Highlighting - -VitePress provides Syntax Highlighting powered by [Shiki](https://github.com/shikijs/shiki), with additional features like line-highlighting: - -**Input** - -```` -```js{4} -export default { - data () { - return { - msg: 'Highlighted!' - } - } -} -``` -```` - -**Output** - -```js{4} -export default { - data () { - return { - msg: 'Highlighted!' - } - } -} -``` - -## Custom Containers - -**Input** - -```md -::: info -This is an info box. -::: - -::: tip -This is a tip. -::: - -::: warning -This is a warning. -::: - -::: danger -This is a dangerous warning. -::: - -::: details -This is a details block. -::: -``` - -**Output** - -::: info -This is an info box. -::: - -::: tip -This is a tip. -::: - -::: warning -This is a warning. -::: - -::: danger -This is a dangerous warning. -::: - -::: details -This is a details block. -::: - -## More - -Check out the documentation for the [full list of markdown extensions](https://vitepress.dev/guide/markdown). diff --git a/docs/src/zh/others/modules.md b/docs/src/zh/others/modules.md new file mode 100644 index 00000000..6309515c --- /dev/null +++ b/docs/src/zh/others/modules.md @@ -0,0 +1,43 @@ +--- +outline: deep +--- + +# Redis Modules + +Redis Modules 是 Redis 4.0 版本引入的一个新特性,它允许开发者扩展 Redis 的功能。通过创建模块,开发者可以定义新的命令,数据类型,甚至改变 Redis 的行为。因此,Redis Modules 可以极大地增强 Redis 的灵活性和可扩展性。 + +由于 Redis Modules 可以定义新的数据类型和命令,RedisShake 需要对这些新的数据类型和命令进行专门的处理,才能正确地迁移或同步这些数据。否则,如果 RedisShake 不理解这些新的数据类型和命令,它可能会无法正确地处理这些数据,或者在处理过程中出错。因此,对于使用了 Redis Modules 的 Redis 实例,一般需要 RedisShake 为其使用的 Module 提供相应的适配器,以便正确地处理这些自定义的数据类型和命令。 + +## 已支持的 Redis Modules 列表 + +- [TairHash](https://github.com/tair-opensource/TairHash):支持 field 级别设置过期和版本的 Hash 数据结构。 +- [TairString](https://github.com/tair-opensource/TairString):支持版本的 String 结构,可以实现分布式锁/乐观锁。 +- [TairZset](https://github.com/tair-opensource/TairZset):支持最多 256 维的 double 排序,可以实现多维排行榜。 + +## 如何支持新的 Redis Modules + +### 核心流程 + +相关代码在`internal\rdb`目录下,如需要支持其它 Redis Modules 类型,可分解为以下三个步骤: +- 从rdb文件中正确读入 + - RedisShake 中已经 对 redis module 自定的几种类型进行了封装,从 rdb 文件进行读取时,可直接借助于已经封装好的函数进行读取(`internal\rdb\structure\module2_struct.go`) +- 构建一个合适的中间数据结构类型,用于存储相应数据(key + value) +- 大小key 的处理 + - 小key + - 在实际工作中,执行`LoadFromBuffer`函数从rdb读入数据时,其对应的 value 值会流动到两个地方,一个是直接存储在缓存区中一份,用于小 key 发送时直接读取(与`restore`命令有关),一个流动到上述的中间数据结构中,被下述的 `rewrite`函数使用 + - 大key + - 借助于` rewrite` 函数,从上述的中间数据结构中读取,并拆分为对应的命令进行发送 + +![module-supported.jpg](/public/module-supported.jpg) + +### 补充命令测试 +为了确保正常,需要在` tests\helpers\commands` 里面添加对应 module 的命令,来测试相关命令可以在 rdb、sync、scan 三个模式下工作正常。测试框架具体见[pybbt](https://pypi.org/project/pybbt/),具体思想——借助于redis-py 包,对其进行封装,模拟客户端发送命令,然后比对实际的返回值与已有的返回值。 + +### 补充命令列表 +RedisShake 在针对大 key 进行传输时,会查命令表格`RedisShake\internal\commands\table.go`,检查命令的合规性,因此在添加新 module 时,需要将对应的命令加入表格,具体可参照`RedisShake\scripts`部分代码 + +### 补充 ci +在 ci 测试中,需要添加对自定义 module 的编译,具体可见` ci.yml` 内容 + + + diff --git a/docs/src/zh/reader/sync_reader.md b/docs/src/zh/reader/sync_reader.md index a60001ff..5b624a3a 100644 --- a/docs/src/zh/reader/sync_reader.md +++ b/docs/src/zh/reader/sync_reader.md @@ -11,6 +11,8 @@ 优势:数据一致性最佳,对源库影响小,可以实现不停机的切换 +原理:RedisShake 模拟 Slave 连接到 Master 节点,Master 会向 RedisShake 发送数据,数据包含全量与增量两部分。全量是一个 RDB 文件,增量是 AOF 数据流,RedisShake 会接受全量与增量将其暂存到硬盘上。全量同步阶段:RedisShake 首先会将 RDB 文件解析为一条条的 Redis 命令,然后将这些命令发送至目的端。增量同步阶段:RedisShake 会持续将 AOF 数据流同步至目的端。 + ## 配置 ```toml @@ -20,6 +22,8 @@ address = "127.0.0.1:6379" # when cluster is true, set address to one of the clu username = "" # keep empty if not using ACL password = "" # keep empty if no authentication is required tls = false +sync_rdb = true # set to false if you don't want to sync rdb +sync_aof = true # set to false if you don't want to sync aof ``` * `cluster`:源端是否为集群 @@ -28,4 +32,6 @@ tls = false * 当源端使用 ACL 账号时,配置 `username` 和 `password` * 当源端使用传统账号时,仅配置 `password` * 当源端无鉴权时,不配置 `username` 和 `password` -* `tls`:源端是否开启 TLS/SSL,不需要配置证书因为 RedisShake 没有校验服务器证书 \ No newline at end of file +* `tls`:源端是否开启 TLS/SSL,不需要配置证书因为 RedisShake 没有校验服务器证书 +* `sync_rdb`:是否同步 RDB,设置为 false 时,RedisShake 会跳过全量同步阶段 +* `sync_aof`:是否同步 AOF,设置为 false 时,RedisShake 会跳过增量同步阶段,此时 RedisShake 会在全量同步阶段结束后退出 \ No newline at end of file diff --git a/docs/src/zh/writer/redis_writer.md b/docs/src/zh/writer/redis_writer.md index 72063c80..80e870e4 100644 --- a/docs/src/zh/writer/redis_writer.md +++ b/docs/src/zh/writer/redis_writer.md @@ -15,13 +15,13 @@ password = "" # keep empty if no authentication is required tls = false ``` -* `cluster`:源端是否为集群 -* `address`:源端地址, 当源端为集群时,`address` 为集群中的任意一个节点即可 +* `cluster`:是否为集群。 +* `address`:连接地址。当目的端为集群时,`address` 填写集群中的任意一个节点即可 * 鉴权: - * 当源端使用 ACL 账号时,配置 `username` 和 `password` - * 当源端使用传统账号时,仅配置 `password` - * 当源端无鉴权时,不配置 `username` 和 `password` -* `tls`:源端是否开启 TLS/SSL,不需要配置证书因为 RedisShake 没有校验服务器证书 + * 当使用 ACL 账号体系时,配置 `username` 和 `password` + * 当使用传统账号体系时,仅配置 `password` + * 当无鉴权时,不配置 `username` 和 `password` +* `tls`:是否开启 TLS/SSL,不需要配置证书因为 RedisShake 没有校验服务器证书 注意事项: 1. 当目的端为集群时,应保证源端发过来的命令满足 [Key 的哈希值属于同一个 slot](https://redis.io/docs/reference/cluster-spec/#implemented-subset)。 diff --git a/go.mod b/go.mod index 6c90ddca..f8098742 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.4.2 // indirect - golang.org/x/sys v0.5.0 // indirect + golang.org/x/sys v0.12.0 // indirect golang.org/x/text v0.7.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 23056550..4727f8bb 100644 --- a/go.sum +++ b/go.sum @@ -327,8 +327,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/internal/reader/sync_standalone_reader.go b/internal/reader/sync_standalone_reader.go index bde9a7be..8f5f8404 100644 --- a/internal/reader/sync_standalone_reader.go +++ b/internal/reader/sync_standalone_reader.go @@ -25,6 +25,8 @@ type SyncReaderOptions struct { Username string `mapstructure:"username" default:""` Password string `mapstructure:"password" default:""` Tls bool `mapstructure:"tls" default:"false"` + SyncRdb bool `mapstructure:"sync_rdb" default:"true"` + SyncAof bool `mapstructure:"sync_aof" default:"true"` } type State string @@ -38,6 +40,7 @@ const ( ) type syncStandaloneReader struct { + opts *SyncReaderOptions client *client.Redis ch chan *entry.Entry @@ -72,6 +75,7 @@ type syncStandaloneReader struct { func NewSyncStandaloneReader(opts *SyncReaderOptions) Reader { r := new(syncStandaloneReader) + r.opts = opts r.client = client.NewRedisClient(opts.Address, opts.Username, opts.Password, opts.Tls) r.rd = r.client.BufioReader() r.stat.Name = "reader_" + strings.Replace(opts.Address, ":", "_", -1) @@ -91,9 +95,14 @@ func (r *syncStandaloneReader) StartRead() chan *entry.Entry { r.receiveRDB() startOffset := r.stat.AofReceivedOffset go r.receiveAOF(r.rd) - r.sendRDB() - r.stat.Status = kSyncAof - r.sendAOF(startOffset) + if r.opts.SyncRdb { + r.sendRDB() + } + if r.opts.SyncAof { + r.stat.Status = kSyncAof + r.sendAOF(startOffset) + } + close(r.ch) }() return r.ch @@ -103,12 +112,9 @@ func (r *syncStandaloneReader) sendReplconfListenPort() { // use status_port as redis-shake port argv := []string{"replconf", "listening-port", strconv.Itoa(config.Opt.Advanced.StatusPort)} r.client.Send(argv...) - reply, err := r.client.Receive() + _, err := r.client.Receive() if err != nil { - log.Warnf("[%s] send replconf command to redis server failed. reply=[%s], error=[%v]", r.stat.Name, reply, err) - } - if reply != "OK" { - log.Warnf("[%s] send replconf command to redis server failed. reply=[%s]", r.stat.Name, reply) + log.Warnf("[%s] send replconf command to redis server failed. error=[%v]", r.stat.Name, err) } } diff --git a/shake.toml b/shake.toml index 6d3c5cd4..64f40c02 100644 --- a/shake.toml +++ b/shake.toml @@ -7,6 +7,8 @@ address = "127.0.0.1:6379" # when cluster is true, set address to one of the clu username = "" # keep empty if not using ACL password = "" # keep empty if no authentication is required tls = false +sync_rdb = true # set to false if you don't want to sync rdb +sync_aof = true # set to false if you don't want to sync aof # [scan_reader] # cluster = false # set to true if source is a redis cluster diff --git a/tests/cases/sync.py b/tests/cases/sync.py index dfd4f41f..6d5df062 100644 --- a/tests/cases/sync.py +++ b/tests/cases/sync.py @@ -1,3 +1,5 @@ +import time + import pybbt as p import helpers as h @@ -23,6 +25,7 @@ def test(src, dst): # wait sync done p.ASSERT_TRUE_TIMEOUT(lambda: shake.is_consistent()) p.log(shake.get_status()) + time.sleep(5) # check data inserter.check_data(src, cross_slots_cmd=cross_slots_cmd)