Redis主从集群主机下线使用cluster failover修复主从关系恢复原有的主从关系-----Redis
Redis主从集群主机下线使用cluster failover修复主从关系恢复原有的主从关系-----Redis
[root@localhost ~]# redis-server /myredis/cluster/redisCluster6382.conf [root@localhost ~]# ifconfig ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 192.168.92.129 netmask 255.255.255.0 broadcast 192.168.92.255 inet6 fe80::5d0e:f975:2a1e:b6d2 prefixlen 64 scopeid 0x20<link> ether 00:0c:29:8f:1b:2b txqueuelen 1000 (Ethernet) RX packets 305 bytes 54134 (52.8 KiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 166 bytes 17055 (16.6 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536 inet 127.0.0.1 netmask 255.0.0.0 inet6 ::1 prefixlen 128 scopeid 0x10<host> loop txqueuelen 1000 (Local Loopback) RX packets 64 bytes 5568 (5.4 KiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 64 bytes 5568 (5.4 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 virbr0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500 inet 192.168.122.1 netmask 255.255.255.0 broadcast 192.168.122.255 ether 52:54:00:55:d3:7b txqueuelen 1000 (Ethernet) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 0 bytes 0 (0.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 [root@localhost ~]# systemctl status firewalld ● firewalld.service - firewalld - dynamic firewall daemon Loaded: loaded (/usr/lib/systemd/system/firewalld.service; enabled; vendor preset: enabled) Active: active (running) since 二 2024-01-23 19:45:32 CST; 7min ago Docs: man:firewalld(1) Main PID: 818 (firewalld) Tasks: 2 CGroup: /system.slice/firewalld.service └─818 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid 1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam... 1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami... 1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif... Hint: Some lines were ellipsized, use -l to show in full. [root@localhost ~]# systemctl disable firewalld Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service. Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service. [root@localhost ~]# systemctl status firewalld ● firewalld.service - firewalld - dynamic firewall daemon Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled) Active: active (running) since 二 2024-01-23 19:45:32 CST; 8min ago Docs: man:firewalld(1) Main PID: 818 (firewalld) CGroup: /system.slice/firewalld.service └─818 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid 1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam... 1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami... 1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif... Hint: Some lines were ellipsized, use -l to show in full. [root@localhost ~]# systemctl disable firewalld [root@localhost ~]# systemctl status firewalld ● firewalld.service - firewalld - dynamic firewall daemon Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled) Active: active (running) since 二 2024-01-23 19:45:32 CST; 8min ago Docs: man:firewalld(1) Main PID: 818 (firewalld) CGroup: /system.slice/firewalld.service └─818 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid 1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam... 1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami... 1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif... Hint: Some lines were ellipsized, use -l to show in full. [root@localhost ~]# systemctl status firewalld ● firewalld.service - firewalld - dynamic firewall daemon Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled) Active: active (running) since 二 2024-01-23 19:45:32 CST; 8min ago Docs: man:firewalld(1) Main PID: 818 (firewalld) CGroup: /system.slice/firewalld.service └─818 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid 1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam... 1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami... 1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif... Hint: Some lines were ellipsized, use -l to show in full. [root@localhost ~]# systemctl disable firewalld [root@localhost ~]# systemctl status firewalld ● firewalld.service - firewalld - dynamic firewall daemon Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled) Active: active (running) since 二 2024-01-23 19:45:32 CST; 8min ago Docs: man:firewalld(1) Main PID: 818 (firewalld) CGroup: /system.slice/firewalld.service └─818 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid 1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam... 1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami... 1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif... Hint: Some lines were ellipsized, use -l to show in full. [root@localhost ~]# systemctl stop firewalld [root@localhost ~]# systemctl status firewalld ● firewalld.service - firewalld - dynamic firewall daemon Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled) Active: inactive (dead) Docs: man:firewalld(1) 1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam... 1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami... 1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif... 1月 23 19:55:01 localhost.localdomain systemd[1]: Stopping firewalld - dynam... 1月 23 19:55:03 localhost.localdomain systemd[1]: Stopped firewalld - dynami... Hint: Some lines were ellipsized, use -l to show in full. [root@localhost ~]# redis-cli -a abc123 --cluster create --cluster-replicas 1 192.168.92.129:6381 Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. [ERR] Wrong number of arguments for specified --cluster sub command [root@localhost ~]# 192.168.92.129:6382 192.168.92.130:6383 192.168.92.130:6384 192.168.92.131:6385 bash: 192.168.92.129:6382: 未找到命令... [root@localhost ~]# redis-cli -a abc123 --cluster create --cluster-replicas 1 192.168.92.129:6381 192.168.92.129:6382 192.168.92.130:6383 192.168.92.130:6384 192.168.92.131:6385 192.168.92.131:6386 Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. Could not connect to Redis at 192.168.92.129:6382: Connection refused [root@localhost ~]# ps -ef|grep redis root 2612 1 0 19:47 ? 00:00:01 redis-server 0.0.0.0:6381 [cluster] root 3005 2544 0 20:00 pts/0 00:00:00 grep --color=auto redis [root@localhost ~]# redis-server /myredis/cluster/redisCluster6382.conf [root@localhost ~]# ps -ef|grep redis root 2612 1 0 19:47 ? 00:00:01 redis-server 0.0.0.0:6381 [cluster] root 3034 2544 0 20:01 pts/0 00:00:00 grep --color=auto redis [root@localhost ~]# cd /myredis/cluster/ [root@localhost cluster]# vim redisCluster6382.conf [root@localhost cluster]# vim redisCluster6382.conf [root@localhost cluster]# redis-cli -a abc123 -p 6381 Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. 127.0.0.1:6381> shutdown not connected> quit [root@localhost cluster]# redis-server /myredis/cluster/redisCluster6382.conf [root@localhost cluster]# redis-server /myredis/cluster/redisCluster6381.conf [root@localhost cluster]# ps -ef|grep redis root 3142 1 0 20:07 ? 00:00:00 redis-server 0.0.0.0:6382 [cluster] root 3154 1 0 20:07 ? 00:00:00 redis-server 0.0.0.0:6381 [cluster] root 3175 2544 0 20:07 pts/0 00:00:00 grep --color=auto redis [root@localhost cluster]# redis-cli -a abc123 --cluster create --cluster-replicas 1 192.168.92.129:6381 192.168.92.129:6382 192.168.92.130:6383 192.168.92.130:6384 192.168.92.131:6385 192.168.92.131:6386 Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. >>> Performing hash slots allocation on 6 nodes... Master[0] -> Slots 0 - 5460 Master[1] -> Slots 5461 - 10922 Master[2] -> Slots 10923 - 16383 Adding replica 192.168.92.130:6384 to 192.168.92.129:6381 Adding replica 192.168.92.131:6386 to 192.168.92.130:6383 Adding replica 192.168.92.129:6382 to 192.168.92.131:6385 M: ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381 slots:[0-5460] (5461 slots) master S: 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382 replicates 30b1f7bfccd93049117e1dab1cd6385eaf800015 M: 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383 slots:[5461-10922] (5462 slots) master S: 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384 replicates ee718fe879729ec1f961b08c4342c78537997ab4 M: 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385 slots:[10923-16383] (5461 slots) master S: a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386 replicates 1fca4878cd2a2a4fe055003f53d53c84cb589859 Can I set the above configuration? (type 'yes' to accept): yes >>> Nodes configuration updated >>> Assign a different config epoch to each node >>> Sending CLUSTER MEET messages to join the cluster Waiting for the cluster to join . >>> Performing Cluster Check (using node 192.168.92.129:6381) M: ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381 slots:[0-5460] (5461 slots) master 1 additional replica(s) M: 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383 slots:[5461-10922] (5462 slots) master 1 additional replica(s) S: a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386 slots: (0 slots) slave replicates 1fca4878cd2a2a4fe055003f53d53c84cb589859 S: 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382 slots: (0 slots) slave replicates 30b1f7bfccd93049117e1dab1cd6385eaf800015 M: 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385 slots:[10923-16383] (5461 slots) master 1 additional replica(s) S: 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384 slots: (0 slots) slave replicates ee718fe879729ec1f961b08c4342c78537997ab4 [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered. [root@localhost cluster]# cd /myredis/cluster/ [root@localhost cluster]# ll 总用量 36 drwxr-xr-x. 2 root root 224 1月 23 20:12 appendonlydir -rw-r--r--. 1 root root 7189 1月 23 20:12 cluster6381.log -rw-r--r--. 1 root root 4044 1月 23 20:12 cluster6382.log -rw-r--r--. 1 root root 88 1月 23 20:07 dump6381.rdb -rw-r--r--. 1 root root 171 1月 23 20:12 dump6382.rdb -rw-r--r--. 1 root root 1183 1月 23 20:12 nodes-6381.conf -rw-r--r--. 1 root root 1183 1月 23 20:12 nodes-6382.conf -rw-r--r--. 1 root root 347 1月 23 19:36 redisCluster6381.conf -rw-r--r--. 1 root root 348 1月 23 20:04 redisCluster6382.conf [root@localhost cluster]# ps -ef|grep redis root 3142 1 0 20:07 ? 00:00:00 redis-server 0.0.0.0:6382 [cluster] root 3154 1 0 20:07 ? 00:00:00 redis-server 0.0.0.0:6381 [cluster] root 3302 2544 0 20:15 pts/0 00:00:00 grep --color=auto redis [root@localhost cluster]# redis-cli -a abc123 -p 6381 Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. 127.0.0.1:6381> info replication # Replication role:master connected_slaves:1 slave0:ip=192.168.92.130,port=6384,state=online,offset=308,lag=1 master_failover_state:no-failover master_replid:e33ee80b0ca4bd8c4030f194d3b1b262f20f42b0 master_replid2:0000000000000000000000000000000000000000 master_repl_offset:308 second_repl_offset:-1 repl_backlog_active:1 repl_backlog_size:1048576 repl_backlog_first_byte_offset:1 repl_backlog_histlen:308 127.0.0.1:6381> cluster nodes 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706012179829 3 connected 5461-10922 a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706012180000 3 connected 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706012178621 5 connected 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706012180000 5 connected 10923-16383 ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 myself,master - 0 1706012180000 1 connected 0-5460 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 slave ee718fe879729ec1f961b08c4342c78537997ab4 0 1706012180837 1 connected 127.0.0.1:6381> cluster info cluster_state:ok cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:6 cluster_size:3 cluster_current_epoch:6 cluster_my_epoch:1 cluster_stats_messages_ping_sent:642 cluster_stats_messages_pong_sent:657 cluster_stats_messages_sent:1299 cluster_stats_messages_ping_received:652 cluster_stats_messages_pong_received:642 cluster_stats_messages_meet_received:5 cluster_stats_messages_received:1299 total_cluster_links_buffer_limit_exceeded:0 127.0.0.1:6381> keys * (empty array) 127.0.0.1:6381> set k1 v1 (error) MOVED 12706 192.168.92.131:6385 127.0.0.1:6381> set k2 v2 OK 127.0.0.1:6381> keys * 1) "k2" 127.0.0.1:6381> flushall OK 127.0.0.1:6381> keys * (empty array) 127.0.0.1:6381> quit [root@localhost cluster]# redis-cli -a abc123 -p 6381 -c Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. 127.0.0.1:6381> keys * (empty array) 127.0.0.1:6381> set k1 v1 -> Redirected to slot [12706] located at 192.168.92.131:6385 OK 192.168.92.131:6385> get k1 "v1" 192.168.92.131:6385> set k2 v2 -> Redirected to slot [449] located at 192.168.92.129:6381 OK 192.168.92.129:6381> set k3 v3 OK 192.168.92.129:6381> set k4 v4 -> Redirected to slot [8455] located at 192.168.92.130:6383 OK 192.168.92.130:6383> cluster keyslot k1 (integer) 12706 192.168.92.130:6383> cluster keyslot k2 (integer) 449 192.168.92.130:6383> cluster nodes a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706013258974 3 connected 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 slave ee718fe879729ec1f961b08c4342c78537997ab4 0 1706013258572 1 connected 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706013259477 5 connected 10923-16383 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706013259000 5 connected 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 myself,master - 0 1706013259000 3 connected 5461-10922 ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 master - 0 1706013260082 1 connected 0-5460 192.168.92.130:6383> quit [root@localhost cluster]# redis-cli -a abc123 -p 6381 -c Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. 127.0.0.1:6381> shutdown not connected> quit [root@localhost cluster]# redis-cli -a abc123 -p 6382 -c Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. 127.0.0.1:6382> cluster nodes a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706013398731 3 connected ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 master,fail - 1706013346893 1706013345000 1 disconnected 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 master - 0 1706013398000 7 connected 0-5460 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 myself,slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706013398000 5 connected 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706013399538 5 connected 10923-16383 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706013399538 3 connected 5461-10922 127.0.0.1:6382> set k1 v11 -> Redirected to slot [12706] located at 192.168.92.131:6385 OK 192.168.92.131:6385> set k2 v2 -> Redirected to slot [449] located at 192.168.92.130:6384 OK 192.168.92.130:6384> 192.168.92.130:6384> set k2 v22 OK 192.168.92.130:6384> set k3 v33 OK 192.168.92.130:6384> cluster nodes 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706013534549 5 connected 10923-16383 a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706013534041 3 connected 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 myself,master - 0 1706013535000 7 connected 0-5460 ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 master,fail - 1706013348018 1706013345453 1 disconnected 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706013533535 3 connected 5461-10922 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706013535560 5 connected 192.168.92.130:6384> quit [root@localhost cluster]# redis-server /myredis/cluster/redisCluster6381.conf [root@localhost cluster]# redis-cli -a abc123 -p 6381 -c Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe. 127.0.0.1:6381> cluster nodes 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 master - 0 1706013588124 7 connected 0-5460 a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706013588000 3 connected 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706013586513 5 connected 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706013587116 3 connected 5461-10922 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706013587000 5 connected 10923-16383 ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 myself,slave 69d920127eec13adc0d9fcaf0e9448a80d1a4803 0 1706013587000 7 connected 127.0.0.1:6381> info replication # Replication role:slave master_host:192.168.92.130 master_port:6384 master_link_status:up master_last_io_seconds_ago:6 master_sync_in_progress:0 slave_read_repl_offset:2293 slave_repl_offset:2293 slave_priority:100 slave_read_only:1 replica_announced:1 connected_slaves:0 master_failover_state:no-failover master_replid:d2d35f3efda780fa7d82c678cb0b888439db221c master_replid2:0000000000000000000000000000000000000000 master_repl_offset:2293 second_repl_offset:-1 repl_backlog_active:1 repl_backlog_size:1048576 repl_backlog_first_byte_offset:2252 repl_backlog_histlen:42 127.0.0.1:6381> cluster nodes 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 master - 0 1706014276000 7 connected 0-5460 a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706014277063 3 connected 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706014276000 5 connected 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706014277566 3 connected 5461-10922 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706014276560 5 connected 10923-16383 ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 myself,slave 69d920127eec13adc0d9fcaf0e9448a80d1a4803 0 1706014275000 7 connected 127.0.0.1:6381> cluster failover OK 127.0.0.1:6381> cluster nodes 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 slave ee718fe879729ec1f961b08c4342c78537997ab4 0 1706014301000 8 connected a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706014300075 3 connected 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706014300075 5 connected 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706014301785 3 connected 5461-10922 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706014301000 5 connected 10923-16383 ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 myself,master - 0 1706014300000 8 connected 0-5460
[root@localhost ~]# redis-server /myredis/cluster/redisCluster6382.conf
[root@localhost ~]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.92.129 netmask 255.255.255.0 broadcast 192.168.92.255
inet6 fe80::5d0e:f975:2a1e:b6d2 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:8f:1b:2b txqueuelen 1000 (Ethernet)
RX packets 305 bytes 54134 (52.8 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 166 bytes 17055 (16.6 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (Local Loopback)
RX packets 64 bytes 5568 (5.4 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 64 bytes 5568 (5.4 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0virbr0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 192.168.122.1 netmask 255.255.255.0 broadcast 192.168.122.255
ether 52:54:00:55:d3:7b txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0[root@localhost ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; enabled; vendor preset: enabled)
Active: active (running) since 二 2024-01-23 19:45:32 CST; 7min ago
Docs: man:firewalld(1)
Main PID: 818 (firewalld)
Tasks: 2
CGroup: /system.slice/firewalld.service
└─818 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam...
1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami...
1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif...
Hint: Some lines were ellipsized, use -l to show in full.
[root@localhost ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@localhost ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: active (running) since 二 2024-01-23 19:45:32 CST; 8min ago
Docs: man:firewalld(1)
Main PID: 818 (firewalld)
CGroup: /system.slice/firewalld.service
└─818 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam...
1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami...
1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif...
Hint: Some lines were ellipsized, use -l to show in full.
[root@localhost ~]# systemctl disable firewalld
[root@localhost ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: active (running) since 二 2024-01-23 19:45:32 CST; 8min ago
Docs: man:firewalld(1)
Main PID: 818 (firewalld)
CGroup: /system.slice/firewalld.service
└─818 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam...
1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami...
1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif...
Hint: Some lines were ellipsized, use -l to show in full.
[root@localhost ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: active (running) since 二 2024-01-23 19:45:32 CST; 8min ago
Docs: man:firewalld(1)
Main PID: 818 (firewalld)
CGroup: /system.slice/firewalld.service
└─818 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam...
1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami...
1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif...
Hint: Some lines were ellipsized, use -l to show in full.
[root@localhost ~]# systemctl disable firewalld
[root@localhost ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: active (running) since 二 2024-01-23 19:45:32 CST; 8min ago
Docs: man:firewalld(1)
Main PID: 818 (firewalld)
CGroup: /system.slice/firewalld.service
└─818 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam...
1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami...
1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif...
Hint: Some lines were ellipsized, use -l to show in full.
[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)1月 23 19:45:32 localhost.localdomain systemd[1]: Starting firewalld - dynam...
1月 23 19:45:32 localhost.localdomain systemd[1]: Started firewalld - dynami...
1月 23 19:45:33 localhost.localdomain firewalld[818]: WARNING: AllowZoneDrif...
1月 23 19:55:01 localhost.localdomain systemd[1]: Stopping firewalld - dynam...
1月 23 19:55:03 localhost.localdomain systemd[1]: Stopped firewalld - dynami...
Hint: Some lines were ellipsized, use -l to show in full.
[root@localhost ~]# redis-cli -a abc123 --cluster create --cluster-replicas 1 192.168.92.129:6381
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
[ERR] Wrong number of arguments for specified --cluster sub command
[root@localhost ~]# 192.168.92.129:6382 192.168.92.130:6383 192.168.92.130:6384 192.168.92.131:6385
bash: 192.168.92.129:6382: 未找到命令...
[root@localhost ~]# redis-cli -a abc123 --cluster create --cluster-replicas 1 192.168.92.129:6381 192.168.92.129:6382 192.168.92.130:6383 192.168.92.130:6384 192.168.92.131:6385 192.168.92.131:6386
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
Could not connect to Redis at 192.168.92.129:6382: Connection refused
[root@localhost ~]# ps -ef|grep redis
root 2612 1 0 19:47 ? 00:00:01 redis-server 0.0.0.0:6381 [cluster]
root 3005 2544 0 20:00 pts/0 00:00:00 grep --color=auto redis
[root@localhost ~]# redis-server /myredis/cluster/redisCluster6382.conf
[root@localhost ~]# ps -ef|grep redis
root 2612 1 0 19:47 ? 00:00:01 redis-server 0.0.0.0:6381 [cluster]
root 3034 2544 0 20:01 pts/0 00:00:00 grep --color=auto redis
[root@localhost ~]# cd /myredis/cluster/
[root@localhost cluster]# vim redisCluster6382.conf
[root@localhost cluster]# vim redisCluster6382.conf
[root@localhost cluster]# redis-cli -a abc123 -p 6381
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
127.0.0.1:6381> shutdown
not connected> quit
[root@localhost cluster]# redis-server /myredis/cluster/redisCluster6382.conf
[root@localhost cluster]# redis-server /myredis/cluster/redisCluster6381.conf
[root@localhost cluster]# ps -ef|grep redis
root 3142 1 0 20:07 ? 00:00:00 redis-server 0.0.0.0:6382 [cluster]
root 3154 1 0 20:07 ? 00:00:00 redis-server 0.0.0.0:6381 [cluster]
root 3175 2544 0 20:07 pts/0 00:00:00 grep --color=auto redis
[root@localhost cluster]# redis-cli -a abc123 --cluster create --cluster-replicas 1 192.168.92.129:6381 192.168.92.129:6382 192.168.92.130:6383 192.168.92.130:6384 192.168.92.131:6385 192.168.92.131:6386
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 192.168.92.130:6384 to 192.168.92.129:6381
Adding replica 192.168.92.131:6386 to 192.168.92.130:6383
Adding replica 192.168.92.129:6382 to 192.168.92.131:6385
M: ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381
slots:[0-5460] (5461 slots) master
S: 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382
replicates 30b1f7bfccd93049117e1dab1cd6385eaf800015
M: 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383
slots:[5461-10922] (5462 slots) master
S: 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384
replicates ee718fe879729ec1f961b08c4342c78537997ab4
M: 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385
slots:[10923-16383] (5461 slots) master
S: a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386
replicates 1fca4878cd2a2a4fe055003f53d53c84cb589859
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
.
>>> Performing Cluster Check (using node 192.168.92.129:6381)
M: ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: 1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386
slots: (0 slots) slave
replicates 1fca4878cd2a2a4fe055003f53d53c84cb589859
S: 2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382
slots: (0 slots) slave
replicates 30b1f7bfccd93049117e1dab1cd6385eaf800015
M: 30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: 69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384
slots: (0 slots) slave
replicates ee718fe879729ec1f961b08c4342c78537997ab4
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
[root@localhost cluster]# cd /myredis/cluster/
[root@localhost cluster]# ll
总用量 36
drwxr-xr-x. 2 root root 224 1月 23 20:12 appendonlydir
-rw-r--r--. 1 root root 7189 1月 23 20:12 cluster6381.log
-rw-r--r--. 1 root root 4044 1月 23 20:12 cluster6382.log
-rw-r--r--. 1 root root 88 1月 23 20:07 dump6381.rdb
-rw-r--r--. 1 root root 171 1月 23 20:12 dump6382.rdb
-rw-r--r--. 1 root root 1183 1月 23 20:12 nodes-6381.conf
-rw-r--r--. 1 root root 1183 1月 23 20:12 nodes-6382.conf
-rw-r--r--. 1 root root 347 1月 23 19:36 redisCluster6381.conf
-rw-r--r--. 1 root root 348 1月 23 20:04 redisCluster6382.conf
[root@localhost cluster]# ps -ef|grep redis
root 3142 1 0 20:07 ? 00:00:00 redis-server 0.0.0.0:6382 [cluster]
root 3154 1 0 20:07 ? 00:00:00 redis-server 0.0.0.0:6381 [cluster]
root 3302 2544 0 20:15 pts/0 00:00:00 grep --color=auto redis
[root@localhost cluster]# redis-cli -a abc123 -p 6381
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
127.0.0.1:6381> info replication
# Replication
role:master
connected_slaves:1
slave0:ip=192.168.92.130,port=6384,state=online,offset=308,lag=1
master_failover_state:no-failover
master_replid:e33ee80b0ca4bd8c4030f194d3b1b262f20f42b0
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:308
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:308
127.0.0.1:6381> cluster nodes
1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706012179829 3 connected 5461-10922
a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706012180000 3 connected
2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706012178621 5 connected
30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706012180000 5 connected 10923-16383
ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 myself,master - 0 1706012180000 1 connected 0-5460
69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 slave ee718fe879729ec1f961b08c4342c78537997ab4 0 1706012180837 1 connected
127.0.0.1:6381> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:642
cluster_stats_messages_pong_sent:657
cluster_stats_messages_sent:1299
cluster_stats_messages_ping_received:652
cluster_stats_messages_pong_received:642
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:1299
total_cluster_links_buffer_limit_exceeded:0
127.0.0.1:6381> keys *
(empty array)
127.0.0.1:6381> set k1 v1
(error) MOVED 12706 192.168.92.131:6385
127.0.0.1:6381> set k2 v2
OK
127.0.0.1:6381> keys *
1) "k2"
127.0.0.1:6381> flushall
OK
127.0.0.1:6381> keys *
(empty array)
127.0.0.1:6381> quit
[root@localhost cluster]# redis-cli -a abc123 -p 6381 -c
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
127.0.0.1:6381> keys *
(empty array)
127.0.0.1:6381> set k1 v1
-> Redirected to slot [12706] located at 192.168.92.131:6385
OK
192.168.92.131:6385> get k1
"v1"
192.168.92.131:6385> set k2 v2
-> Redirected to slot [449] located at 192.168.92.129:6381
OK
192.168.92.129:6381> set k3 v3
OK
192.168.92.129:6381> set k4 v4
-> Redirected to slot [8455] located at 192.168.92.130:6383
OK
192.168.92.130:6383> cluster keyslot k1
(integer) 12706
192.168.92.130:6383> cluster keyslot k2
(integer) 449
192.168.92.130:6383> cluster nodes
a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706013258974 3 connected
69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 slave ee718fe879729ec1f961b08c4342c78537997ab4 0 1706013258572 1 connected
30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706013259477 5 connected 10923-16383
2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706013259000 5 connected
1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 myself,master - 0 1706013259000 3 connected 5461-10922
ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 master - 0 1706013260082 1 connected 0-5460
192.168.92.130:6383> quit
[root@localhost cluster]# redis-cli -a abc123 -p 6381 -c
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
127.0.0.1:6381> shutdown
not connected> quit
[root@localhost cluster]# redis-cli -a abc123 -p 6382 -c
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
127.0.0.1:6382> cluster nodes
a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706013398731 3 connected
ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 master,fail - 1706013346893 1706013345000 1 disconnected
69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 master - 0 1706013398000 7 connected 0-5460
2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 myself,slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706013398000 5 connected
30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706013399538 5 connected 10923-16383
1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706013399538 3 connected 5461-10922
127.0.0.1:6382> set k1 v11
-> Redirected to slot [12706] located at 192.168.92.131:6385
OK
192.168.92.131:6385> set k2 v2
-> Redirected to slot [449] located at 192.168.92.130:6384
OK
192.168.92.130:6384>
192.168.92.130:6384> set k2 v22
OK
192.168.92.130:6384> set k3 v33
OK
192.168.92.130:6384> cluster nodes
30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706013534549 5 connected 10923-16383
a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706013534041 3 connected
69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 myself,master - 0 1706013535000 7 connected 0-5460
ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 master,fail - 1706013348018 1706013345453 1 disconnected
1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706013533535 3 connected 5461-10922
2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706013535560 5 connected
192.168.92.130:6384> quit
[root@localhost cluster]# redis-server /myredis/cluster/redisCluster6381.conf
[root@localhost cluster]# redis-cli -a abc123 -p 6381 -c
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
127.0.0.1:6381> cluster nodes
69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 master - 0 1706013588124 7 connected 0-5460
a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706013588000 3 connected
2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706013586513 5 connected
1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706013587116 3 connected 5461-10922
30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706013587000 5 connected 10923-16383
ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 myself,slave 69d920127eec13adc0d9fcaf0e9448a80d1a4803 0 1706013587000 7 connected
127.0.0.1:6381> info replication
# Replication
role:slave
master_host:192.168.92.130
master_port:6384
master_link_status:up
master_last_io_seconds_ago:6
master_sync_in_progress:0
slave_read_repl_offset:2293
slave_repl_offset:2293
slave_priority:100
slave_read_only:1
replica_announced:1
connected_slaves:0
master_failover_state:no-failover
master_replid:d2d35f3efda780fa7d82c678cb0b888439db221c
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:2293
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:2252
repl_backlog_histlen:42
127.0.0.1:6381> cluster nodes
69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 master - 0 1706014276000 7 connected 0-5460
a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706014277063 3 connected
2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706014276000 5 connected
1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706014277566 3 connected 5461-10922
30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706014276560 5 connected 10923-16383
ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 myself,slave 69d920127eec13adc0d9fcaf0e9448a80d1a4803 0 1706014275000 7 connected
127.0.0.1:6381> cluster failover
OK
127.0.0.1:6381> cluster nodes
69d920127eec13adc0d9fcaf0e9448a80d1a4803 192.168.92.130:6384@16384 slave ee718fe879729ec1f961b08c4342c78537997ab4 0 1706014301000 8 connected
a5757758fd0bdd8106e10905bbd1674c574f10c1 192.168.92.131:6386@16386 slave 1fca4878cd2a2a4fe055003f53d53c84cb589859 0 1706014300075 3 connected
2eabe702c6bba38aee780477fab74bc4969d9bf3 192.168.92.129:6382@16382 slave 30b1f7bfccd93049117e1dab1cd6385eaf800015 0 1706014300075 5 connected
1fca4878cd2a2a4fe055003f53d53c84cb589859 192.168.92.130:6383@16383 master - 0 1706014301785 3 connected 5461-10922
30b1f7bfccd93049117e1dab1cd6385eaf800015 192.168.92.131:6385@16385 master - 0 1706014301000 5 connected 10923-16383
ee718fe879729ec1f961b08c4342c78537997ab4 192.168.92.129:6381@16381 myself,master - 0 1706014300000 8 connected 0-5460
更多推荐
所有评论(0)