[root@n001 home]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
n001 Ready master 51d v1.19.7 10.1.1.101 <none> CentOS Linux 7 (Core) 5.4.162-1.el7.elrepo.x86_64 docker://19.3.14
n002 Ready master 51d v1.19.7 10.1.1.102 <none> CentOS Linux 7 (Core) 5.4.162-1.el7.elrepo.x86_64 docker://19.3.14
n003 Ready <none> 51d v1.19.7 10.1.1.103 <none> CentOS Linux 7 (Core) 5.4.162-1.el7.elrepo.x86_64 docker://19.3.14
n004 Ready <none> 51d v1.19.7 10.1.1.104 <none> CentOS Linux 7 (Core) 5.4.162-1.el7.elrepo.x86_64 docker://19.3.14
n005 Ready <none> 51d v1.19.7 10.1.1.105 <none> CentOS Linux 7 (Core) 5.4.162-1.el7.elrepo.x86_64 docker://19.3.14
n006 Ready <none> 51d v1.19.7 10.1.1.106 <none> CentOS Linux 7 (Core) 5.4.162-1.el7.elrepo.x86_64 docker://19.3.14
n007 Ready <none> 51d v1.19.7 10.1.1.107 <none> CentOS Linux 7 (Core) 5.4.162-1.el7.elrepo.x86_64 docker://19.3.14
[root@centos yum.repos.d]# redis-trib create --replicas 1 \
> `dig +short redis-statefulset-0.redis-headless-service.redis.svc.cluster.local`:6379 \
> `dig +short redis-statefulset-1.redis-headless-service.redis.svc.cluster.local`:6379 \
> `dig +short redis-statefulset-2.redis-headless-service.redis.svc.cluster.local`:6379 \
> `dig +short redis-statefulset-3.redis-headless-service.redis.svc.cluster.local`:6379 \
> `dig +short redis-statefulset-4.redis-headless-service.redis.svc.cluster.local`:6379 \
> `dig +short redis-statefulset-5.redis-headless-service.redis.svc.cluster.local`:6379
>>> Creating cluster
/usr/share/gems/gems/redis-3.2.1/lib/redis/client.rb:443: warning: constant ::Fixnum is deprecated
>>> Performing hash slots allocation on 6 nodes...
Using 3 masters:
10.233.74.14:6379
10.233.66.9:6379
10.233.100.12:6379
Adding replica 10.233.101.14:6379 to 10.233.74.14:6379
Adding replica 10.233.85.16:6379 to 10.233.66.9:6379
Adding replica 10.233.74.15:6379 to 10.233.100.12:6379
M: a3ac346763a2be76e9efc90b7188ba909e16e20b 10.233.74.14:6379
slots:0-5460 (5461 slots) master
M: 9c87726e47c43c604ea65bfbbec6d595bbb5562a 10.233.66.9:6379
slots:5461-10922 (5462 slots) master
M: 61f81bcdce8df837117838ee063f7f2b6bd93b04 10.233.100.12:6379
slots:10923-16383 (5461 slots) master
S: f8f561de7dd3a1c20376dd3fb63e41046bb57c3e 10.233.101.14:6379
replicates a3ac346763a2be76e9efc90b7188ba909e16e20b
S: 9fae2bf46721ae6b3f5fc5cedf1ae69d50cb6980 10.233.85.16:6379
replicates 9c87726e47c43c604ea65bfbbec6d595bbb5562a
S: a4e88c39dbb33c6b2c574860442a73d45ffd0861 10.233.74.15:6379
replicates 61f81bcdce8df837117838ee063f7f2b6bd93b04
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join...............................................................................
Why? It's always a wait state.
[root@n002 slaves]# kubectl get pods -n redis -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
redis-statefulset-0 1/1 Running 0 33m 10.233.74.14 n006 <none> <none>
redis-statefulset-1 1/1 Running 0 32m 10.233.66.9 n003 <none> <none>
redis-statefulset-2 1/1 Running 0 31m 10.233.100.12 n005 <none> <none>
redis-statefulset-3 1/1 Running 0 31m 10.233.101.14 n004 <none> <none>
redis-statefulset-4 1/1 Running 0 30m 10.233.85.16 n007 <none> <none>
redis-statefulset-5 1/1 Running 0 30m 10.233.74.15 n006 <none> <none>
[root@n002 slaves]# kubectl exec -it redis-statefulset-0 -n redis -- redis-cli -c
127.0.0.1:6379> cluster meet 10.233.66.9 6379
OK
127.0.0.1:6379> cluster meet 10.233.100.9 6379
OK
127.0.0.1:6379> cluster meet 10.233.101.14 6379
OK
127.0.0.1:6379> cluster meet 10.233.85.16 6379
OK
127.0.0.1:6379> cluster meet 10.233.74.15 6379
OK
127.0.0.1:6379> cluster info
cluster_state:fail
cluster_slots_assigned:10923
cluster_slots_ok:10923
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:5
cluster_size:2
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:18
cluster_stats_messages_pong_sent:2508
cluster_stats_messages_meet_sent:4
cluster_stats_messages_sent:2530
cluster_stats_messages_ping_received:2503
cluster_stats_messages_pong_received:19
cluster_stats_messages_meet_received:5
cluster_stats_messages_fail_received:3
cluster_stats_messages_received:2530
127.0.0.1:6379> cluster nodes
a3ac346763a2be76e9efc90b7188ba909e16e20b 10.233.74.14:6379@16379 myself,master - 0 1640780068000 1 connected 0-5460
a4e88c39dbb33c6b2c574860442a73d45ffd0861 10.1.1.106:6379@16379 master - 1640779982055 1640779980000 6 connected
f8f561de7dd3a1c20376dd3fb63e41046bb57c3e 10.233.101.14:6379@16379 master - 0 1640780070789 4 connected
9c87726e47c43c604ea65bfbbec6d595bbb5562a 10.1.1.106:6379@16379 master - 1640779952686 1640779952189 2 connected 5461-10922
9fae2bf46721ae6b3f5fc5cedf1ae69d50cb6980 10.1.1.106:6379@16379 master - 1640779971945 1640779971342 5 connected
127.0.0.1:6379>
Why? The node IP address 10.1.1.106 is displayed.
..........................................................................................................................................................................................................................................................................................................................................
[ERR] Sorry, can't connect to node 10.1.1.106:6379@16379
[ERR] Sorry, can't connect to node 10.1.1.106:6379@16379
[ERR] Sorry, can't connect to node 10.1.1.106:6379@16379
>>> Performing Cluster Check (using node 10.233.74.14:6379)
M: a3ac346763a2be76e9efc90b7188ba909e16e20b 10.233.74.14:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: f8f561de7dd3a1c20376dd3fb63e41046bb57c3e 10.233.101.14:6379@16379
slots: (0 slots) slave
replicates a3ac346763a2be76e9efc90b7188ba909e16e20b
M: 9c87726e47c43c604ea65bfbbec6d595bbb5562a 10.233.66.9:6379@16379
slots:5461-10922 (5462 slots)
I deployed it according to the "在k8s中部署redis cluster实战" scheme. The firewall is not closed. Do I need to open some ports?
Please help me. Thank you very much. I installed it over and over again with the same results. If there is any key information missing, please point it out and I will provide it as soon as possible.