
在分布式系统架构中,Redis作为高性能的键值对数据库,其部署方式直接决定了系统的可用性、可靠性和扩展性。不同的业务场景(如单机测试、高并发读写、海量数据存储)需要匹配不同的Redis部署方案。本文将从底层逻辑出发,全面拆解Redis的4种核心部署方式(单机版、主从复制、哨兵模式、Redis Cluster集群),结合实战配置与Java代码示例,帮你理清每种部署方式的适用场景、优缺点及落地要点,兼顾基础夯实与实际问题解决。
单机版是Redis最基础的部署方式,即单个Redis进程独立运行,所有的读写操作都在同一个节点上完成。其架构简单,无额外依赖,本质是“单进程+单线程(IO多路复用)”处理请求,适合对可用性要求不高的场景。
yum install -y gcc gcc-c++ make# 下载并解压Redis
wget https://download.redis.io/releases/redis-7.2.5.tar.gz
tar -zxvf redis-7.2.5.tar.gz -C /usr/local/
cd /usr/local/redis-7.2.5/
# 编译安装
make && make install
# 修改配置文件(redis.conf)
vim /usr/local/redis-7.2.5/redis.conf
# 核心配置项
bind 0.0.0.0 # 允许所有IP访问(生产环境需指定具体IP)
protected-mode no # 关闭保护模式
port 6379 # 端口
daemonize yes # 后台运行
requirepass 123456 # 密码(生产环境必须设置)
appendonly yes # 开启AOF持久化(默认RDB,结合使用更安全)
appendfsync everysec # AOF同步策略:每秒同步
# 启动Redis
redis-server /usr/local/redis-7.2.5/redis.conf
# 验证启动
redis-cli -h 127.0.0.1 -p 6379 -a 123456 ping
# 输出PONG则启动成功
<dependencies>
<!-- Spring Boot核心依赖 -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
<version>3.2.5</version>
</dependency>
<!-- Redis依赖 -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-redis</artifactId>
<version>3.2.5</version>
</dependency>
<!-- Lombok -->
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.30</version>
<scope>provided</scope>
</dependency>
<!-- FastJSON2 -->
<dependency>
<groupId>com.alibaba.fastjson2</groupId>
<artifactId>fastjson2</artifactId>
<version>2.0.45</version>
</dependency>
<!-- Spring Util工具类 -->
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>6.1.6</version>
</dependency>
<!-- Google Collections -->
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>33.2.1-jre</version>
</dependency>
<!-- Swagger3 -->
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-starter-webmvc-ui</artifactId>
<version>2.2.0</version>
</dependency>
</dependencies>
package com.jam.demo.config;
import com.alibaba.fastjson2.support.spring.data.redis.FastJson2RedisSerializer;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import org.springframework.util.ObjectUtils;
/**
* Redis配置类
* @author ken
*/
@Configuration
@Slf4j
publicclass RedisConfig {
/**
* 自定义RedisTemplate,使用FastJSON2序列化
* @param connectionFactory Redis连接工厂
* @return RedisTemplate<String, Object>
*/
@Bean
public RedisTemplate<String, Object> redisTemplate(RedisConnectionFactory connectionFactory) {
if (ObjectUtils.isEmpty(connectionFactory)) {
log.error("RedisConnectionFactory is null");
thrownew IllegalArgumentException("RedisConnectionFactory cannot be null");
}
RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
redisTemplate.setConnectionFactory(connectionFactory);
// 字符串序列化器(key)
StringRedisSerializer stringRedisSerializer = new StringRedisSerializer();
// FastJSON2序列化器(value)
FastJson2RedisSerializer<Object> fastJson2RedisSerializer = new FastJson2RedisSerializer<>(Object.class);
// 设置key序列化方式
redisTemplate.setKeySerializer(stringRedisSerializer);
redisTemplate.setHashKeySerializer(stringRedisSerializer);
// 设置value序列化方式
redisTemplate.setValueSerializer(fastJson2RedisSerializer);
redisTemplate.setHashValueSerializer(fastJson2RedisSerializer);
redisTemplate.afterPropertiesSet();
log.info("RedisTemplate initialized successfully");
return redisTemplate;
}
}
package com.jam.demo.service;
import com.google.common.collect.Maps;
import com.jam.demo.entity.User;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import javax.annotation.Resource;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* Redis单机版业务服务
* @author ken
*/
@Service
@Slf4j
publicclass RedisSingleService {
@Resource
private RedisTemplate<String, Object> redisTemplate;
privatestaticfinal String USER_KEY_PREFIX = "user:";
privatestaticfinallong USER_EXPIRE_TIME = 30L; // 30分钟
/**
* 保存用户信息到Redis
* @param user 用户实体
* @return boolean 保存结果
*/
public boolean saveUser(User user) {
if (ObjectUtils.isEmpty(user) || StringUtils.isEmpty(user.getId())) {
log.error("保存用户失败:用户信息为空或ID为空");
returnfalse;
}
try {
String key = USER_KEY_PREFIX + user.getId();
redisTemplate.opsForValue().set(key, user, USER_EXPIRE_TIME, TimeUnit.MINUTES);
log.info("保存用户成功,key:{}", key);
returntrue;
} catch (Exception e) {
log.error("保存用户失败", e);
returnfalse;
}
}
/**
* 根据用户ID查询用户信息
* @param userId 用户ID
* @return User 用户实体
*/
public User getUserById(String userId) {
if (StringUtils.isEmpty(userId)) {
log.error("查询用户失败:用户ID为空");
returnnull;
}
String key = USER_KEY_PREFIX + userId;
return (User) redisTemplate.opsForValue().get(key);
}
/**
* 批量查询用户信息
* @param userIds 用户ID列表
* @return Map<String, User> 用户ID-用户实体映射
*/
public Map<String, User> batchGetUsers(String... userIds) {
if (ObjectUtils.isEmpty(userIds)) {
log.error("批量查询用户失败:用户ID数组为空");
return Maps.newHashMap();
}
Map<String, User> userMap = Maps.newHashMap();
for (String userId : userIds) {
if (StringUtils.hasText(userId)) {
String key = USER_KEY_PREFIX + userId;
User user = (User) redisTemplate.opsForValue().get(key);
if (!ObjectUtils.isEmpty(user)) {
userMap.put(userId, user);
}
}
}
return userMap;
}
/**
* 删除用户信息
* @param userId 用户ID
* @return boolean 删除结果
*/
public boolean deleteUser(String userId) {
if (StringUtils.isEmpty(userId)) {
log.error("删除用户失败:用户ID为空");
returnfalse;
}
String key = USER_KEY_PREFIX + userId;
Boolean delete = redisTemplate.delete(key);
return Boolean.TRUE.equals(delete);
}
}
package com.jam.demo.controller;
import com.jam.demo.entity.User;
import com.jam.demo.service.RedisSingleService;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.Parameter;
import io.swagger.v3.oas.annotations.tags.Tag;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import org.springframework.util.ObjectUtils;
import javax.annotation.Resource;
import java.util.Map;
/**
* Redis单机版控制器
* @author ken
*/
@RestController
@RequestMapping("/redis/single")
@Slf4j
@Tag(name = "Redis单机版接口", description = "基于Redis单机版的用户信息操作接口")
publicclass RedisSingleController {
@Resource
private RedisSingleService redisSingleService;
@PostMapping("/user")
@Operation(summary = "保存用户信息", description = "将用户信息存入Redis,设置30分钟过期")
public ResponseEntity<Boolean> saveUser(@RequestBody User user) {
boolean result = redisSingleService.saveUser(user);
returnnew ResponseEntity<>(result, HttpStatus.OK);
}
@GetMapping("/user/{userId}")
@Operation(summary = "查询用户信息", description = "根据用户ID从Redis查询用户信息")
public ResponseEntity<User> getUserById(
@Parameter(description = "用户ID", required = true) @PathVariable String userId) {
User user = redisSingleService.getUserById(userId);
returnnew ResponseEntity<>(user, HttpStatus.OK);
}
@GetMapping("/users")
@Operation(summary = "批量查询用户信息", description = "根据多个用户ID批量查询Redis中的用户信息")
public ResponseEntity<Map<String, User>> batchGetUsers(
@Parameter(description = "用户ID列表(多个用逗号分隔)", required = true) @RequestParam String userIds) {
if (ObjectUtils.isEmpty(userIds)) {
returnnew ResponseEntity<>(HttpStatus.BAD_REQUEST);
}
String[] userIdArray = userIds.split(",");
Map<String, User> userMap = redisSingleService.batchGetUsers(userIdArray);
returnnew ResponseEntity<>(userMap, HttpStatus.OK);
}
@DeleteMapping("/user/{userId}")
@Operation(summary = "删除用户信息", description = "根据用户ID删除Redis中的用户信息")
public ResponseEntity<Boolean> deleteUser(
@Parameter(description = "用户ID", required = true) @PathVariable String userId) {
boolean result = redisSingleService.deleteUser(userId);
returnnew ResponseEntity<>(result, HttpStatus.OK);
}
}
package com.jam.demo.entity;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.Data;
import java.io.Serializable;
/**
* 用户实体类
* @author ken
*/
@Data
@Schema(description = "用户实体")
publicclass User implements Serializable {
privatestaticfinallong serialVersionUID = 1L;
@Schema(description = "用户ID")
private String id;
@Schema(description = "用户名")
private String username;
@Schema(description = "用户年龄")
private Integer age;
@Schema(description = "用户邮箱")
private String email;
}
主从复制是Redis高可用的基础方案,通过“一主多从”的架构实现:
核心价值:
底层同步逻辑:

# 1. 复制3份配置文件
cd /usr/local/redis-7.2.5/
cp redis.conf redis-6379.conf
cp redis.conf redis-6380.conf
cp redis.conf redis-6381.conf
# 2. 配置主节点(redis-6379.conf)
vim redis-6379.conf
# 核心配置(同单机版,确保以下配置)
bind 0.0.0.0
protected-mode no
port 6379
daemonize yes
requirepass 123456
appendonly yes
# 3. 配置从节点1(redis-6380.conf)
vim redis-6380.conf
# 核心配置
bind 0.0.0.0
protected-mode no
port 6380
daemonize yes
requirepass 123456
appendonly yes
# 关键:指定主节点信息
replicaof 127.0.0.1 6379 # 主节点IP:端口
masterauth 123456 # 主节点密码(主节点有密码时必须配置)
replica-read-only yes # 从节点只读(默认开启)
# 4. 配置从节点2(redis-6381.conf)
vim redis-6381.conf
# 核心配置(同从节点1,仅端口不同)
port 6381
replicaof 127.0.0.1 6379
masterauth 123456
# 5. 启动所有节点
redis-server redis-6379.conf
redis-server redis-6380.conf
redis-server redis-6381.conf
# 6. 验证主从关系
# 连接主节点
redis-cli -h 127.0.0.1 -p 6379 -a 123456 info replication
# 输出结果中应包含:role:master,connected_slaves:2,及两个从节点信息
# 连接从节点验证
redis-cli -h 127.0.0.1 -p 6380 -a 123456 info replication
# 输出结果中应包含:role:slave,master_host:127.0.0.1,master_port:6379
package com.jam.demo.config;
import com.alibaba.fastjson2.support.spring.data.redis.FastJson2RedisSerializer;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.connection.RedisStandaloneConfiguration;
import org.springframework.data.redis.connection.jedis.JedisClientConfiguration;
import org.springframework.data.redis.connection.jedis.JedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import org.springframework.util.ObjectUtils;
import redis.clients.jedis.JedisPoolConfig;
import java.time.Duration;
/**
* Redis主从复制配置(读写分离)
* @author ken
*/
@Configuration
@Slf4j
publicclass RedisMasterSlaveConfig {
// 主节点配置
privatestaticfinal String MASTER_HOST = "127.0.0.1";
privatestaticfinalint MASTER_PORT = 6379;
privatestaticfinal String MASTER_PASSWORD = "123456";
// 从节点1配置
privatestaticfinal String SLAVE1_HOST = "127.0.0.1";
privatestaticfinalint SLAVE1_PORT = 6380;
privatestaticfinal String SLAVE1_PASSWORD = "123456";
// 从节点2配置
privatestaticfinal String SLAVE2_HOST = "127.0.0.1";
privatestaticfinalint SLAVE2_PORT = 6381;
privatestaticfinal String SLAVE2_PASSWORD = "123456";
/**
* 连接池配置
* @return JedisPoolConfig
*/
@Bean
public JedisPoolConfig jedisPoolConfig() {
JedisPoolConfig poolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(100); // 最大连接数
poolConfig.setMaxIdle(20); // 最大空闲连接数
poolConfig.setMinIdle(5); // 最小空闲连接数
poolConfig.setMaxWait(Duration.ofMillis(3000)); // 最大等待时间
poolConfig.setTestOnBorrow(true); // 借连接时测试可用性
return poolConfig;
}
/**
* 主节点连接工厂
* @param poolConfig 连接池配置
* @return JedisConnectionFactory
*/
@Bean(name = "masterRedisConnectionFactory")
public JedisConnectionFactory masterRedisConnectionFactory(JedisPoolConfig poolConfig) {
RedisStandaloneConfiguration config = new RedisStandaloneConfiguration();
config.setHostName(MASTER_HOST);
config.setPort(MASTER_PORT);
config.setPassword(MASTER_PASSWORD);
JedisClientConfiguration clientConfig = JedisClientConfiguration.builder()
.usePooling()
.poolConfig(poolConfig)
.build();
returnnew JedisConnectionFactory(config, clientConfig);
}
/**
* 从节点1连接工厂
* @param poolConfig 连接池配置
* @return JedisConnectionFactory
*/
@Bean(name = "slave1RedisConnectionFactory")
public JedisConnectionFactory slave1RedisConnectionFactory(JedisPoolConfig poolConfig) {
RedisStandaloneConfiguration config = new RedisStandaloneConfiguration();
config.setHostName(SLAVE1_HOST);
config.setPort(SLAVE1_PORT);
config.setPassword(SLAVE1_PASSWORD);
JedisClientConfiguration clientConfig = JedisClientConfiguration.builder()
.usePooling()
.poolConfig(poolConfig)
.build();
returnnew JedisConnectionFactory(config, clientConfig);
}
/**
* 主节点RedisTemplate(写操作)
* @return RedisTemplate<String, Object>
*/
@Bean(name = "masterRedisTemplate")
public RedisTemplate<String, Object> masterRedisTemplate() {
RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
redisTemplate.setConnectionFactory(masterRedisConnectionFactory(jedisPoolConfig()));
setSerializer(redisTemplate);
redisTemplate.afterPropertiesSet();
return redisTemplate;
}
/**
* 从节点RedisTemplate(读操作)
* @return RedisTemplate<String, Object>
*/
@Bean(name = "slaveRedisTemplate")
public RedisTemplate<String, Object> slaveRedisTemplate() {
RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
// 简单轮询选择从节点(生产环境可使用更复杂的负载均衡策略)
redisTemplate.setConnectionFactory(slave1RedisConnectionFactory(jedisPoolConfig()));
setSerializer(redisTemplate);
redisTemplate.afterPropertiesSet();
return redisTemplate;
}
/**
* 设置序列化器
* @param redisTemplate RedisTemplate
*/
private void setSerializer(RedisTemplate<String, Object> redisTemplate) {
StringRedisSerializer stringSerializer = new StringRedisSerializer();
FastJson2RedisSerializer<Object> fastJsonSerializer = new FastJson2RedisSerializer<>(Object.class);
redisTemplate.setKeySerializer(stringSerializer);
redisTemplate.setHashKeySerializer(stringSerializer);
redisTemplate.setValueSerializer(fastJsonSerializer);
redisTemplate.setHashValueSerializer(fastJsonSerializer);
}
}
package com.jam.demo.service;
import com.google.common.collect.Maps;
import com.jam.demo.entity.User;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import javax.annotation.Resource;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* Redis主从复制服务(读写分离)
* @author ken
*/
@Service
@Slf4j
publicclass RedisMasterSlaveService {
// 主节点Template(写操作)
@Resource(name = "masterRedisTemplate")
private RedisTemplate<String, Object> masterRedisTemplate;
// 从节点Template(读操作)
@Resource(name = "slaveRedisTemplate")
private RedisTemplate<String, Object> slaveRedisTemplate;
privatestaticfinal String USER_KEY_PREFIX = "user:";
privatestaticfinallong USER_EXPIRE_TIME = 30L;
/**
* 保存用户(写操作,走主节点)
* @param user 用户实体
* @return boolean
*/
public boolean saveUser(User user) {
if (ObjectUtils.isEmpty(user) || StringUtils.isEmpty(user.getId())) {
log.error("保存用户失败:用户信息无效");
returnfalse;
}
try {
String key = USER_KEY_PREFIX + user.getId();
masterRedisTemplate.opsForValue().set(key, user, USER_EXPIRE_TIME, TimeUnit.MINUTES);
log.info("主节点保存用户成功,key:{}", key);
returntrue;
} catch (Exception e) {
log.error("主节点保存用户失败", e);
returnfalse;
}
}
/**
* 查询用户(读操作,走从节点)
* @param userId 用户ID
* @return User
*/
public User getUserById(String userId) {
if (StringUtils.isEmpty(userId)) {
log.error("查询用户失败:用户ID为空");
returnnull;
}
try {
String key = USER_KEY_PREFIX + userId;
User user = (User) slaveRedisTemplate.opsForValue().get(key);
log.info("从节点查询用户,key:{},结果:{}", key, ObjectUtils.isEmpty(user) ? "空" : "存在");
return user;
} catch (Exception e) {
log.error("从节点查询用户失败", e);
// 从节点故障时降级到主节点查询
String key = USER_KEY_PREFIX + userId;
return (User) masterRedisTemplate.opsForValue().get(key);
}
}
/**
* 批量查询用户(读操作,走从节点)
* @param userIds 用户ID列表
* @return Map<String, User>
*/
public Map<String, User> batchGetUsers(String... userIds) {
if (ObjectUtils.isEmpty(userIds)) {
log.error("批量查询用户失败:用户ID数组为空");
return Maps.newHashMap();
}
Map<String, User> userMap = Maps.newHashMap();
try {
for (String userId : userIds) {
if (StringUtils.hasText(userId)) {
String key = USER_KEY_PREFIX + userId;
User user = (User) slaveRedisTemplate.opsForValue().get(key);
if (!ObjectUtils.isEmpty(user)) {
userMap.put(userId, user);
}
}
}
log.info("从节点批量查询用户完成,查询数量:{},成功数量:{}", userIds.length, userMap.size());
} catch (Exception e) {
log.error("从节点批量查询用户失败,降级到主节点", e);
// 降级到主节点
for (String userId : userIds) {
if (StringUtils.hasText(userId)) {
String key = USER_KEY_PREFIX + userId;
User user = (User) masterRedisTemplate.opsForValue().get(key);
if (!ObjectUtils.isEmpty(user)) {
userMap.put(userId, user);
}
}
}
}
return userMap;
}
/**
* 删除用户(写操作,走主节点)
* @param userId 用户ID
* @return boolean
*/
public boolean deleteUser(String userId) {
if (StringUtils.isEmpty(userId)) {
log.error("删除用户失败:用户ID为空");
returnfalse;
}
try {
String key = USER_KEY_PREFIX + userId;
Boolean delete = masterRedisTemplate.delete(key);
log.info("主节点删除用户,key:{},结果:{}", key, delete);
return Boolean.TRUE.equals(delete);
} catch (Exception e) {
log.error("主节点删除用户失败", e);
returnfalse;
}
}
}
repl-diskless-sync yes(无盘同步),减少RDB文件写入磁盘的耗时;appendonly yes,appendfsync everysec,确保写操作持久化到磁盘;min-replicas-to-write 1(至少1个从节点同步完成才确认写成功),min-replicas-max-lag 10(同步延迟不超过10秒),牺牲部分性能换数据一致性。哨兵模式(Sentinel)是在主从复制基础上实现的“自动故障转移”方案,通过引入“哨兵节点”监控主从节点状态:
故障转移流程:

哨兵集群原理:
quorum(配置的阈值)个哨兵认为主节点不可用,才确认主节点故障。# 1. 复制3份哨兵配置文件
cd /usr/local/redis-7.2.5/
cp sentinel.conf sentinel-26379.conf
cp sentinel.conf sentinel-26380.conf
cp sentinel.conf sentinel-26381.conf
# 2. 配置哨兵节点1(sentinel-26379.conf)
vim sentinel-26379.conf
# 核心配置
bind 0.0.0.0
protected-mode no
port 26379
daemonize yes
logfile "/var/log/redis/sentinel-26379.log"# 日志文件
# 监控主节点:sentinel monitor <主节点名称> <主节点IP> <主节点端口> <quorum阈值>
sentinel monitor mymaster 127.0.0.1 6379 2 # 2个哨兵确认即客观下线
# 主节点密码
sentinel auth-pass mymaster 123456
# 主节点无响应超时时间(默认30秒,单位毫秒)
sentinel down-after-milliseconds mymaster 30000
# 故障转移超时时间(默认180秒)
sentinel failover-timeout mymaster 180000
# 故障转移时,最多有多少个从节点同时同步新主节点(默认1,避免占用过多带宽)
sentinel parallel-syncs mymaster 1
# 3. 配置哨兵节点2(sentinel-26380.conf)
vim sentinel-26380.conf
# 核心配置(仅端口、日志文件不同)
port 26380
logfile "/var/log/redis/sentinel-26380.log"
sentinel monitor mymaster 127.0.0.1 6379 2
sentinel auth-pass mymaster 123456
# 4. 配置哨兵节点3(sentinel-26381.conf)
vim sentinel-26381.conf
# 核心配置(仅端口、日志文件不同)
port 26381
logfile "/var/log/redis/sentinel-26381.log"
sentinel monitor mymaster 127.0.0.1 6379 2
sentinel auth-pass mymaster 123456
# 5. 创建日志目录
mkdir -p /var/log/redis/
# 6. 启动哨兵节点
redis-sentinel sentinel-26379.conf
redis-sentinel sentinel-26380.conf
redis-sentinel sentinel-26381.conf
# 7. 验证哨兵状态
redis-cli -h 127.0.0.1 -p 26379 info sentinel
# 输出结果中应包含:sentinel_masters:1,sentinel_monitored_slaves:2,sentinel_running_sentinels:3
package com.jam.demo.config;
import com.alibaba.fastjson2.support.spring.data.redis.FastJson2RedisSerializer;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisNode;
import org.springframework.data.redis.connection.RedisSentinelConfiguration;
import org.springframework.data.redis.connection.jedis.JedisClientConfiguration;
import org.springframework.data.redis.connection.jedis.JedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import org.springframework.util.ObjectUtils;
import redis.clients.jedis.JedisPoolConfig;
import java.time.Duration;
import java.util.HashSet;
import java.util.Set;
/**
* Redis哨兵模式配置
* @author ken
*/
@Configuration
@Slf4j
publicclass RedisSentinelConfig {
// 主节点名称(需与哨兵配置一致)
privatestaticfinal String MASTER_NAME = "mymaster";
// 哨兵节点列表
privatestaticfinal String SENTINEL_NODES = "127.0.0.1:26379,127.0.0.1:26380,127.0.0.1:26381";
// Redis密码
privatestaticfinal String REDIS_PASSWORD = "123456";
/**
* 连接池配置
* @return JedisPoolConfig
*/
@Bean
public JedisPoolConfig jedisPoolConfig() {
JedisPoolConfig poolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(100);
poolConfig.setMaxIdle(20);
poolConfig.setMinIdle(5);
poolConfig.setMaxWait(Duration.ofMillis(3000));
poolConfig.setTestOnBorrow(true);
return poolConfig;
}
/**
* 哨兵配置
* @return RedisSentinelConfiguration
*/
@Bean
public RedisSentinelConfiguration redisSentinelConfiguration() {
RedisSentinelConfiguration sentinelConfig = new RedisSentinelConfiguration();
sentinelConfig.setMasterName(MASTER_NAME);
sentinelConfig.setPassword(REDIS_PASSWORD);
// 解析哨兵节点
Set<RedisNode> sentinelNodeSet = new HashSet<>();
String[] sentinelNodeArray = SENTINEL_NODES.split(",");
for (String node : sentinelNodeArray) {
if (ObjectUtils.isEmpty(node)) {
continue;
}
String[] hostPort = node.split(":");
if (hostPort.length != 2) {
log.error("哨兵节点格式错误:{}", node);
continue;
}
sentinelNodeSet.add(new RedisNode(hostPort[0], Integer.parseInt(hostPort[1])));
}
sentinelConfig.setSentinels(sentinelNodeSet);
return sentinelConfig;
}
/**
* 哨兵模式连接工厂
* @return JedisConnectionFactory
*/
@Bean
public JedisConnectionFactory jedisConnectionFactory() {
RedisSentinelConfiguration sentinelConfig = redisSentinelConfiguration();
JedisClientConfiguration clientConfig = JedisClientConfiguration.builder()
.usePooling()
.poolConfig(jedisPoolConfig())
.build();
JedisConnectionFactory connectionFactory = new JedisConnectionFactory(sentinelConfig, clientConfig);
connectionFactory.afterPropertiesSet();
return connectionFactory;
}
/**
* 哨兵模式RedisTemplate
* @return RedisTemplate<String, Object>
*/
@Bean
public RedisTemplate<String, Object> sentinelRedisTemplate() {
RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
redisTemplate.setConnectionFactory(jedisConnectionFactory());
// 序列化配置
StringRedisSerializer stringSerializer = new StringRedisSerializer();
FastJson2RedisSerializer<Object> fastJsonSerializer = new FastJson2RedisSerializer<>(Object.class);
redisTemplate.setKeySerializer(stringSerializer);
redisTemplate.setHashKeySerializer(stringSerializer);
redisTemplate.setValueSerializer(fastJsonSerializer);
redisTemplate.setHashValueSerializer(fastJsonSerializer);
redisTemplate.afterPropertiesSet();
log.info("Redis哨兵模式Template初始化成功");
return redisTemplate;
}
}
package com.jam.demo.service;
import com.google.common.collect.Maps;
import com.jam.demo.entity.User;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import javax.annotation.Resource;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* Redis哨兵模式服务
* @author ken
*/
@Service
@Slf4j
publicclass RedisSentinelService {
@Resource
private RedisTemplate<String, Object> sentinelRedisTemplate;
privatestaticfinal String USER_KEY_PREFIX = "user:";
privatestaticfinallong USER_EXPIRE_TIME = 30L;
/**
* 保存用户(自动路由到主节点)
* @param user 用户实体
* @return boolean
*/
public boolean saveUser(User user) {
if (ObjectUtils.isEmpty(user) || StringUtils.isEmpty(user.getId())) {
log.error("保存用户失败:用户信息无效");
returnfalse;
}
try {
String key = USER_KEY_PREFIX + user.getId();
sentinelRedisTemplate.opsForValue().set(key, user, USER_EXPIRE_TIME, TimeUnit.MINUTES);
log.info("保存用户成功,key:{}", key);
returntrue;
} catch (Exception e) {
log.error("保存用户失败", e);
returnfalse;
}
}
/**
* 查询用户(自动路由到从节点,故障时路由到新主节点)
* @param userId 用户ID
* @return User
*/
public User getUserById(String userId) {
if (StringUtils.isEmpty(userId)) {
log.error("查询用户失败:用户ID为空");
returnnull;
}
try {
String key = USER_KEY_PREFIX + userId;
User user = (User) sentinelRedisTemplate.opsForValue().get(key);
log.info("查询用户,key:{},结果:{}", key, ObjectUtils.isEmpty(user) ? "空" : "存在");
return user;
} catch (Exception e) {
log.error("查询用户失败", e);
returnnull;
}
}
/**
* 批量查询用户
* @param userIds 用户ID列表
* @return Map<String, User>
*/
public Map<String, User> batchGetUsers(String... userIds) {
if (ObjectUtils.isEmpty(userIds)) {
log.error("批量查询用户失败:用户ID数组为空");
return Maps.newHashMap();
}
Map<String, User> userMap = Maps.newHashMap();
try {
for (String userId : userIds) {
if (StringUtils.hasText(userId)) {
String key = USER_KEY_PREFIX + userId;
User user = (User) sentinelRedisTemplate.opsForValue().get(key);
if (!ObjectUtils.isEmpty(user)) {
userMap.put(userId, user);
}
}
}
log.info("批量查询用户完成,查询数量:{},成功数量:{}", userIds.length, userMap.size());
} catch (Exception e) {
log.error("批量查询用户失败", e);
}
return userMap;
}
/**
* 删除用户(自动路由到主节点)
* @param userId 用户ID
* @return boolean
*/
public boolean deleteUser(String userId) {
if (StringUtils.isEmpty(userId)) {
log.error("删除用户失败:用户ID为空");
returnfalse;
}
try {
String key = USER_KEY_PREFIX + userId;
Boolean delete = sentinelRedisTemplate.delete(key);
log.info("删除用户,key:{},结果:{}", key, delete);
return Boolean.TRUE.equals(delete);
} catch (Exception e) {
log.error("删除用户失败", e);
returnfalse;
}
}
}
sentinel monitor mymaster 127.0.0.1 6379 2;down-after-milliseconds:主节点无响应超时时间,默认30秒;failover-timeout:故障转移超时时间,默认180秒;parallel-syncs:故障转移时同时同步新主节点的从节点数量,默认1;Redis Cluster 是 Redis 官方提供的分布式解决方案,核心解决「海量数据存储」和「高并发读写」两大痛点,通过「分片存储」+「自动高可用」的设计,实现横向扩展和容错能力。其核心逻辑基于「哈希槽(Hash Slot)」和「主从分片」:
Redis Cluster 将整个键空间划分为 16384 个哈希槽(编号 0-16383),数据存储的核心规则:
CRC16(key) % 16384 计算键对应的哈希槽,将数据存储到负责该槽的主节点;为保证高可用,Redis Cluster 要求每个主节点必须配置至少 1 个从节点,形成「主从分片」:
集群节点间通过「Gossip 协议」实时交换节点状态信息(如节点存活、哈希槽分配、故障状态),无需中心化协调节点,保证集群去中心化特性。


# 1. 创建节点配置目录
mkdir -p /usr/local/redis-cluster/{6379,6380,6381,6382,6383,6384}
# 2. 复制 Redis 核心文件到各节点目录
cd /usr/local/redis-7.2.5/
cp redis.conf /usr/local/redis-cluster/6379/
cp redis.conf /usr/local/redis-cluster/6380/
cp redis.conf /usr/local/redis-cluster/6381/
cp redis.conf /usr/local/redis-cluster/6382/
cp redis.conf /usr/local/redis-cluster/6383/
cp redis.conf /usr/local/redis-cluster/6384/
# 3. 统一修改所有节点配置(以 6379 为例,其他节点仅端口不同)
vim /usr/local/redis-cluster/6379/redis.conf
# 核心配置项
bind 0.0.0.0
protected-mode no
port 6379
daemonize yes
requirepass 123456 # 节点密码
masterauth 123456 # 主从同步密码(与 requirepass 一致)
appendonly yes # 开启 AOF 持久化
cluster-enabled yes # 开启集群模式
cluster-config-file nodes-6379.conf # 集群配置文件(自动生成)
cluster-node-timeout 15000 # 节点超时时间(15 秒,故障转移判断依据)
cluster-require-full-coverage no # 非全槽覆盖时集群仍可用(避免部分槽故障导致集群不可用)
# 4. 批量修改其他节点配置(替换端口)
for port in 6380 6381 6382 6383 6384; do
sed "s/6379/$port/g" /usr/local/redis-cluster/6379/redis.conf > /usr/local/redis-cluster/$port/redis.conf
done
# 5. 启动所有节点
for port in 6379 6380 6381 6382 6383 6384; do
redis-server /usr/local/redis-cluster/$port/redis.conf
done
# 6. 验证节点启动状态
ps -ef | grep redis-server | grep -v grep
# 应显示 6 个 redis-server 进程,分别对应 6379-6384 端口
# 7. 创建 Redis Cluster 集群
redis-cli --cluster create \
127.0.0.1:6379 127.0.0.1:6380 127.0.0.1:6381 \
127.0.0.1:6382 127.0.0.1:6383 127.0.0.1:6384 \
--cluster-replicas 1 \
-a 123456
# 参数说明:
# --cluster-replicas 1:每个主节点对应 1 个从节点
# -a 123456:节点密码(所有节点密码必须一致)
# 执行后会提示哈希槽分配方案,输入 yes 确认
# 示例输出:
# [OK] All 16384 slots covered. 表示集群创建成功
# 8. 验证集群状态
redis-cli -h 127.0.0.1 -p 6379 -a 123456 cluster info
# 关键输出:cluster_state:ok(集群正常)、cluster_slots_assigned:16384(所有槽已分配)
# 9. 查看节点信息
redis-cli -h 127.0.0.1 -p 6379 -a 123456 cluster nodes
# 输出包含各节点角色(master/slave)、哈希槽分配、主从对应关系
package com.jam.demo.config;
import com.alibaba.fastjson2.support.spring.data.redis.FastJson2RedisSerializer;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisClusterConfiguration;
import org.springframework.data.redis.connection.RedisNode;
import org.springframework.data.redis.connection.jedis.JedisClientConfiguration;
import org.springframework.data.redis.connection.jedis.JedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import org.springframework.util.ObjectUtils;
import redis.clients.jedis.JedisPoolConfig;
import java.time.Duration;
import java.util.HashSet;
import java.util.Set;
/**
* Redis Cluster 集群配置
* @author ken
*/
@Configuration
@Slf4j
publicclass RedisClusterConfig {
// 集群节点列表(格式:ip:port)
privatestaticfinal String CLUSTER_NODES = "127.0.0.1:6379,127.0.0.1:6380,127.0.0.1:6381,127.0.0.1:6382,127.0.0.1:6383,127.0.0.1:6384";
// Redis 密码
privatestaticfinal String REDIS_PASSWORD = "123456";
// 集群最大重定向次数(默认 5)
privatestaticfinalint MAX_REDIRECTS = 3;
/**
* 连接池配置(优化性能)
* @return JedisPoolConfig
*/
@Bean
public JedisPoolConfig jedisPoolConfig() {
JedisPoolConfig poolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(200); // 最大连接数(根据业务调整)
poolConfig.setMaxIdle(50); // 最大空闲连接数
poolConfig.setMinIdle(10); // 最小空闲连接数
poolConfig.setMaxWait(Duration.ofMillis(5000)); // 最大等待时间(5 秒)
poolConfig.setTestOnBorrow(true); // 借连接时验证可用性
poolConfig.setTestOnReturn(true); // 还连接时验证可用性
poolConfig.setTestWhileIdle(true); // 空闲时验证可用性(避免死连接)
return poolConfig;
}
/**
* Redis Cluster 配置
* @return RedisClusterConfiguration
*/
@Bean
public RedisClusterConfiguration redisClusterConfiguration() {
RedisClusterConfiguration clusterConfig = new RedisClusterConfiguration();
clusterConfig.setPassword(REDIS_PASSWORD);
clusterConfig.setMaxRedirects(MAX_REDIRECTS);
// 解析集群节点
Set<RedisNode> nodeSet = new HashSet<>();
String[] nodes = CLUSTER_NODES.split(",");
for (String node : nodes) {
if (ObjectUtils.isEmpty(node)) {
continue;
}
String[] hostPort = node.split(":");
if (hostPort.length != 2) {
log.error("Redis Cluster 节点格式错误:{}", node);
thrownew IllegalArgumentException("Redis Cluster 节点格式错误,正确格式:ip:port");
}
nodeSet.add(new RedisNode(hostPort[0], Integer.parseInt(hostPort[1])));
}
clusterConfig.setClusterNodes(nodeSet);
return clusterConfig;
}
/**
* Cluster 模式连接工厂
* @return JedisConnectionFactory
*/
@Bean
public JedisConnectionFactory jedisConnectionFactory() {
RedisClusterConfiguration clusterConfig = redisClusterConfiguration();
JedisClientConfiguration clientConfig = JedisClientConfiguration.builder()
.usePooling()
.poolConfig(jedisPoolConfig())
.connectTimeout(Duration.ofMillis(3000)) // 连接超时时间
.readTimeout(Duration.ofMillis(3000)) // 读取超时时间
.build();
JedisConnectionFactory connectionFactory = new JedisConnectionFactory(clusterConfig, clientConfig);
connectionFactory.afterPropertiesSet();
log.info("Redis Cluster 连接工厂初始化成功");
return connectionFactory;
}
/**
* 自定义 RedisTemplate(适配 Cluster 模式,FastJSON2 序列化)
* @return RedisTemplate<String, Object>
*/
@Bean
public RedisTemplate<String, Object> clusterRedisTemplate() {
RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
redisTemplate.setConnectionFactory(jedisConnectionFactory());
// 序列化配置(与单机/主从/哨兵模式一致,保证序列化统一)
StringRedisSerializer stringSerializer = new StringRedisSerializer();
FastJson2RedisSerializer<Object> fastJsonSerializer = new FastJson2RedisSerializer<>(Object.class);
redisTemplate.setKeySerializer(stringSerializer);
redisTemplate.setHashKeySerializer(stringSerializer);
redisTemplate.setValueSerializer(fastJsonSerializer);
redisTemplate.setHashValueSerializer(fastJsonSerializer);
redisTemplate.afterPropertiesSet();
log.info("Redis Cluster RedisTemplate 初始化成功");
return redisTemplate;
}
}
package com.jam.demo.service;
import com.google.common.collect.Maps;
import com.jam.demo.entity.User;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import javax.annotation.Resource;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* Redis Cluster 集群业务服务
* @author ken
*/
@Service
@Slf4j
publicclass RedisClusterService {
@Resource
private RedisTemplate<String, Object> clusterRedisTemplate;
privatestaticfinal String USER_KEY_PREFIX = "user:";
privatestaticfinallong USER_EXPIRE_TIME = 30L; // 30 分钟过期
/**
* 保存用户(自动路由到对应哈希槽的主节点)
* @param user 用户实体
* @return boolean 保存结果
*/
public boolean saveUser(User user) {
if (ObjectUtils.isEmpty(user) || StringUtils.isEmpty(user.getId())) {
log.error("保存用户失败:用户信息为空或 ID 无效");
returnfalse;
}
try {
String key = USER_KEY_PREFIX + user.getId();
// 自动路由:根据 key 计算哈希槽,定位到对应主节点
clusterRedisTemplate.opsForValue().set(key, user, USER_EXPIRE_TIME, TimeUnit.MINUTES);
log.info("Cluster 模式保存用户成功,key:{}", key);
returntrue;
} catch (Exception e) {
log.error("Cluster 模式保存用户失败", e);
returnfalse;
}
}
/**
* 根据 ID 查询用户(自动路由到对应哈希槽的节点)
* @param userId 用户 ID
* @return User 用户实体
*/
public User getUserById(String userId) {
if (StringUtils.isEmpty(userId)) {
log.error("查询用户失败:用户 ID 为空");
returnnull;
}
try {
String key = USER_KEY_PREFIX + userId;
User user = (User) clusterRedisTemplate.opsForValue().get(key);
log.info("Cluster 模式查询用户,key:{},结果:{}", key, ObjectUtils.isEmpty(user) ? "空" : "存在");
return user;
} catch (Exception e) {
log.error("Cluster 模式查询用户失败", e);
returnnull;
}
}
/**
* 批量查询用户(注意:若 keys 分布在不同槽,会触发多节点查询)
* @param userIds 用户 ID 列表
* @return Map<String, User> 用户 ID-实体映射
*/
public Map<String, User> batchGetUsers(String... userIds) {
if (ObjectUtils.isEmpty(userIds)) {
log.error("批量查询用户失败:用户 ID 数组为空");
return Maps.newHashMap();
}
Map<String, User> userMap = Maps.newHashMap();
try {
for (String userId : userIds) {
if (StringUtils.hasText(userId)) {
String key = USER_KEY_PREFIX + userId;
User user = (User) clusterRedisTemplate.opsForValue().get(key);
if (!ObjectUtils.isEmpty(user)) {
userMap.put(userId, user);
}
}
}
log.info("Cluster 模式批量查询用户完成,查询数量:{},成功数量:{}", userIds.length, userMap.size());
} catch (Exception e) {
log.error("Cluster 模式批量查询用户失败", e);
}
return userMap;
}
/**
* 删除用户(自动路由到对应哈希槽的主节点)
* @param userId 用户 ID
* @return boolean 删除结果
*/
public boolean deleteUser(String userId) {
if (StringUtils.isEmpty(userId)) {
log.error("删除用户失败:用户 ID 为空");
returnfalse;
}
try {
String key = USER_KEY_PREFIX + userId;
Boolean delete = clusterRedisTemplate.delete(key);
log.info("Cluster 模式删除用户,key:{},结果:{}", key, delete);
return Boolean.TRUE.equals(delete);
} catch (Exception e) {
log.error("Cluster 模式删除用户失败", e);
returnfalse;
}
}
/**
* 验证集群容错性(模拟主节点故障后的数据一致性)
* @param userId 用户 ID
* @return boolean 故障后是否能正常查询
*/
public boolean testClusterFaultTolerance(String userId) {
if (StringUtils.isEmpty(userId)) {
log.error("验证集群容错性失败:用户 ID 为空");
returnfalse;
}
String key = USER_KEY_PREFIX + userId;
// 1. 先查询数据(确认存在)
User user = (User) clusterRedisTemplate.opsForValue().get(key);
if (ObjectUtils.isEmpty(user)) {
log.error("验证失败:用户数据不存在,key:{}", key);
returnfalse;
}
// 2. 模拟主节点故障(实际生产环境无需手动执行,集群自动检测)
log.warn("模拟主节点故障:可通过命令停止对应主节点,如 redis-cli -p 6379 -a 127.0.0.1 shutdown");
// 3. 故障转移后再次查询(验证数据一致性)
try {
// 等待故障转移完成(15-30 秒)
Thread.sleep(20000);
User faultUser = (User) clusterRedisTemplate.opsForValue().get(key);
boolean result = !ObjectUtils.isEmpty(faultUser);
log.info("集群容错性验证结果:{},故障后查询用户:{}", result, faultUser);
return result;
} catch (InterruptedException e) {
log.error("验证集群容错性失败", e);
Thread.currentThread().interrupt();
returnfalse;
}
}
}
package com.jam.demo.controller;
import com.jam.demo.entity.User;
import com.jam.demo.service.RedisClusterService;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.Parameter;
import io.swagger.v3.oas.annotations.tags.Tag;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import org.springframework.util.ObjectUtils;
import javax.annotation.Resource;
import java.util.Map;
/**
* Redis Cluster 集群接口
* @author ken
*/
@RestController
@RequestMapping("/redis/cluster")
@Slf4j
@Tag(name = "Redis Cluster 集群接口", description = "基于 Redis Cluster 集群的用户信息操作接口(支持分片存储和自动高可用)")
publicclass RedisClusterController {
@Resource
private RedisClusterService redisClusterService;
@PostMapping("/user")
@Operation(summary = "保存用户信息", description = "自动路由到对应哈希槽的主节点,设置 30 分钟过期")
public ResponseEntity<Boolean> saveUser(@RequestBody User user) {
boolean result = redisClusterService.saveUser(user);
returnnew ResponseEntity<>(result, HttpStatus.OK);
}
@GetMapping("/user/{userId}")
@Operation(summary = "查询用户信息", description = "自动路由到对应哈希槽的节点(主/从)")
public ResponseEntity<User> getUserById(
@Parameter(description = "用户 ID", required = true) @PathVariable String userId) {
User user = redisClusterService.getUserById(userId);
returnnew ResponseEntity<>(user, HttpStatus.OK);
}
@GetMapping("/users")
@Operation(summary = "批量查询用户信息", description = "多 ID 可能分布在不同槽,触发多节点并行查询")
public ResponseEntity<Map<String, User>> batchGetUsers(
@Parameter(description = "用户 ID 列表(多个用逗号分隔)", required = true) @RequestParam String userIds) {
if (ObjectUtils.isEmpty(userIds)) {
returnnew ResponseEntity<>(HttpStatus.BAD_REQUEST);
}
String[] userIdArray = userIds.split(",");
Map<String, User> userMap = redisClusterService.batchGetUsers(userIdArray);
returnnew ResponseEntity<>(userMap, HttpStatus.OK);
}
@DeleteMapping("/user/{userId}")
@Operation(summary = "删除用户信息", description = "自动路由到对应哈希槽的主节点执行删除")
public ResponseEntity<Boolean> deleteUser(
@Parameter(description = "用户 ID", required = true) @PathVariable String userId) {
boolean result = redisClusterService.deleteUser(userId);
returnnew ResponseEntity<>(result, HttpStatus.OK);
}
@GetMapping("/fault-tolerance/{userId}")
@Operation(summary = "验证集群容错性", description = "模拟主节点故障后,验证是否能正常查询数据(需等待 20 秒故障转移)")
public ResponseEntity<Boolean> testFaultTolerance(
@Parameter(description = "用户 ID(需提前保存)", required = true) @PathVariable String userId) {
boolean result = redisClusterService.testClusterFaultTolerance(userId);
returnnew ResponseEntity<>(result, HttpStatus.OK);
}
}
MSET 多个 key 分布在不同槽、KEYS 通配符匹配多槽 key),会抛出 CROSSSLOT Keys in request don't hash to the same slot 异常;{hashTag} 让多个 key 映射到同一槽,如 user:{1001}:name、user:{1001}:age(CRC16 计算仅基于 {} 内的内容);# 1. 启动新增节点(6385 主、6386 从)
redis-server /usr/local/redis-cluster/6385/redis.conf
redis-server /usr/local/redis-cluster/6386/redis.conf
# 2. 将新增主节点加入集群
redis-cli --cluster add-node 127.0.0.1:6385 127.0.0.1:6379 -a 123456
# 3. 将新增从节点加入集群,并指定主节点(6385)
# 先获取 6385 节点的 ID(通过 cluster nodes 命令)
redis-cli --cluster add-node 127.0.0.1:6386 127.0.0.1:6379 -a 123456 --cluster-slave --cluster-master-id 6385节点ID
# 4. 迁移哈希槽到新增主节点(按需分配槽数量)
redis-cli --cluster reshard 127.0.0.1:6379 -a 123456
# 执行后按提示输入:
# 1. 需要迁移的槽数量(如 2000)
# 2. 接收槽的主节点 ID(6385 节点 ID)
# 3. 迁移来源(输入 all 从所有主节点迁移,或输入具体节点 ID 定向迁移)
# 4. 输入 yes 确认迁移
# 1. 将 6385 主节点的哈希槽迁移到其他主节点
redis-cli --cluster reshard 127.0.0.1:6379 -a 123456
# 提示输入:
# 1. 迁移槽数量(6385 节点的所有槽,如 2000)
# 2. 接收槽的主节点 ID(如 6379)
# 3. 迁移来源(6385 节点 ID)
# 4. 输入 yes 确认迁移
# 2. 移除从节点 6386
redis-cli --cluster del-node 127.0.0.1:6386 6386节点ID -a 123456
# 3. 移除主节点 6385(需确保无哈希槽)
redis-cli --cluster del-node 127.0.0.1:6385 6385节点ID -a 123456
# 4. 停止节点
redis-cli -h 127.0.0.1 -p 6385 -a 123456 shutdown
redis-cli -h 127.0.0.1 -p 6386 -a 123456 shutdown
repl-diskless-sync yes(无盘同步),减少 RDB 写入耗时;WAIT 1 5000 命令(等待至少 1 个从节点同步完成,超时 5 秒),牺牲性能换一致性;appendfsync everysec,确保数据持久化到磁盘;MSET、KEYS、SINTER 等);部署方式 | 核心优势 | 核心劣势 | 可用性 | 扩展性 | 适用场景 |
|---|---|---|---|---|---|
单机版 | 部署简单、运维成本低 | 单点故障、无扩展性 | 低 | 无 | 开发/测试环境、小型工具 |
主从复制 | 读写分离、数据备份 | 手动故障转移、无扩展性 | 中 | 读扩展 | 读多写少、中小型应用 |
哨兵模式 | 自动故障转移、读写分离 | 无存储扩展性、运维较复杂 | 中高 | 读扩展 | 中小型到中大型应用、需自动容错 |
Redis Cluster | 分片存储、自动高可用、横向扩展 | 部署复杂、不支持跨槽操作 | 高 | 全扩展 | 大型分布式系统、海量数据、高并发 |

Redis 的四种部署方式各有侧重,从简单到复杂,从单节点到分布式,覆盖了从开发测试到大型生产环境的全场景需求:
选型的核心是匹配业务需求:无需过度设计(如小型应用无需 Cluster),也不可忽视风险(如生产环境不可用单机版)。同时,无论选择哪种部署方式,都需关注持久化配置、主从同步、故障转移等核心要点,确保数据安全和服务稳定。