• 首页 首页 icon
  • 工具库 工具库 icon
    • IP查询 IP查询 icon
  • 内容库 内容库 icon
    • 快讯库 快讯库 icon
    • 精品库 精品库 icon
    • 问答库 问答库 icon
  • 更多 更多 icon
    • 服务条款 服务条款 icon

K8S-8--案例:zookeeper/nginx+tomcat/redis/mysql/java/LNMP/微服务dubbo/Ingress/

武飞扬头像
哈密瓜小贼
帮助2

1、zookeeper–基于PV和PVC运行zookeeper集群

https://zookeeper.apache.org/ #官网
https://archive.apache.org/dist/zookeeper/ #官方下载网址
https://zookeeper.apache.org/doc/current/zookeeperStarted.html #参考文档
https://zookeeper.apache.org/doc/current/zookeeperOver.html #zookeeper集群架构文档
三个zookeeper效率高
五个zookeeper效率低,同步时间慢
https://zookeeper.apache.org/doc/current/zookeeperAdmin.html #zookeeper依赖jdk

(1)下载jdk镜像
[root@k8s-master1 ~]#docker pull elevy/slim_java:8
[root@k8s-master1 ~]#docker tag elevy/slim_java:8 harbor.magedu.net/baseimages/slim_java:8
[root@k8s-master1 ~]#docker push harbor.magedu.net/baseimages/slim_java:8
[root@k8s-master1 ~]#docker images
REPOSITORY                                 TAG     IMAGE ID            CREATED             SIZE
elevy/slim_java                            8       0776147f4957        4 years ago         85.3MB
harbor.magedu.net/baseimages/slim_java     8       0776147f4957        4 years ago         85.3MB

(2)zookeeper镜像准备
[root@k8s-master1 ~]#cd /opt/k8s-data/
[root@k8s-master1 k8s-data]#cd dockerfile/web/magedu/zookeeper
[root@k8s-master1 zookeeper]#ll
total 36884
drwxr-xr-x 4 root root      230 Oct 10 11:57 ./
drwxr-xr-x 9 root root      113 Oct 10 11:37 ../
-rw-r--r-- 1 root root     1748 Oct 10 11:39 Dockerfile
-rw-r--r-- 1 root root    63587 Jun 22  2021 KEYS
drwxr-xr-x 2 root root       24 Oct 10 11:37 bin/
-rwxr-xr-x 1 root root      142 Jun 22  2021 build-command.sh*
drwxr-xr-x 2 root root       45 Oct 10 12:03 conf/
-rwxr-xr-x 1 root root      278 Jun 22  2021 entrypoint.sh*
-rw-r--r-- 1 root root       91 Jun 22  2021 repositories
-rw-r--r-- 1 root root     2270 Jun 22  2021 zookeeper-3.12-Dockerfile.tar.gz
-rw-r--r-- 1 root root 37676320 Jun 22  2021 zookeeper-3.4.14.tar.gz
-rw-r--r-- 1 root root      836 Jun 22  2021 zookeeper-3.4.14.tar.gz.asc
[root@k8s-master1 zookeeper]#tree
.
├── Dockerfile
├── KEYS
├── bin
│   └── zkReady.sh
├── build-command.sh
├── conf
│   ├── log4j.properties
│   └── zoo.cfg
├── entrypoint.sh
├── repositories
├── zookeeper-3.12-Dockerfile.tar.gz
├── zookeeper-3.4.14.tar.gz
└── zookeeper-3.4.14.tar.gz.asc
[root@k8s-master1 zookeeper]#vim Dockerfile 
[root@k8s-master1 zookeeper]#vim conf/zoo.cfg 
[root@k8s-master1 zookeeper]#vim entrypoint.sh 
[root@k8s-master1 zookeeper]#vim build-command.sh  #harbor中创建项目magedu
[root@k8s-master1 zookeeper]#chmod a x *.sh
#构建镜像zookeeper
[root@k8s-master1 zookeeper]#bash build-command.sh 1mj8iugs-20211010_114312	
#验证镜像
[root@k8s-master1 zookeeper]#docker run -it --rm harbor.magedu.net/magedu/zookeeper:1mj8iugs-20211010_114312

(3)k8s运行zookeeper服务:
#yaml文件准备:
[root@k8s-master1 zookeeper]#cd ../../../../yaml/magedu/zookeeper/
[root@k8s-master1 zookeeper]#tree
.
├── pv
│   ├── zookeeper-persistentvolume.yaml
│   └── zookeeper-persistentvolumeclaim.yaml
└── zookeeper.yaml

#创建pv
[root@k8s-master1 zookeeper]#cd pv/
[root@k8s-master1 pv]#vim zookeeper-persistentvolume.yaml 
:%s/172.31.7.109/192.168.150.159/g
#准备nfs
[root@k8s-ha1 ~]#vim /etc/exports 
/data/k8sdata *(rw,no_root_squash)
[root@k8s-ha1 ~]#mkdir -p /data/k8sdata/magedu/zookeeper-datadir-1
[root@k8s-ha1 ~]#mkdir -p /data/k8sdata/magedu/zookeeper-datadir-2
[root@k8s-ha1 ~]#mkdir -p /data/k8sdata/magedu/zookeeper-datadir-3
[root@k8s-ha1 ~]#systemctl restart nfs-server.service 
#验证挂载nfs
[root@k8s-master1 ~]#showmount -e 192.168.150.159
Export list for 192.168.150.159:
/data/k8sdata *
[root@k8s-master1 pv]#kubectl apply -f zookeeper-persistentvolume.yaml 
persistentvolume/zookeeper-datadir-pv-1 created
persistentvolume/zookeeper-datadir-pv-2 created
persistentvolume/zookeeper-datadir-pv-3 created
[root@k8s-master1 pv]#kubectl get pv  #不用加-A,pv是全局资源,不属于某个namespace
NAME                     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
zookeeper-datadir-pv-1   20Gi       RWO            Retain           Available                                   23s
zookeeper-datadir-pv-2   20Gi       RWO            Retain           Available                                   23s
zookeeper-datadir-pv-3   20Gi       RWO            Retain           Available                                   23s

#创建pvc
[root@k8s-master1 pv]#vim zookeeper-persistentvolumeclaim.yaml
[root@k8s-master1 pv]#kubectl create ns magedu
namespace/magedu created
[root@k8s-master1 pv]#kubectl get ns
NAME                   STATUS   AGE
default                Active   22d
magedu                 Active   4s
[root@k8s-master1 pv]#kubectl apply -f zookeeper-persistentvolumeclaim.yaml 
persistentvolumeclaim/zookeeper-datadir-pvc-1 created
persistentvolumeclaim/zookeeper-datadir-pvc-2 created
persistentvolumeclaim/zookeeper-datadir-pvc-3 created
[root@k8s-master1 pv]#kubectl get pvc -A #需要-A或是指名namespace
NAMESPACE   NAME                      STATUS   VOLUME                   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
magedu      zookeeper-datadir-pvc-1   Bound    zookeeper-datadir-pv-1   20Gi       RWO                           11s
magedu      zookeeper-datadir-pvc-2   Bound    zookeeper-datadir-pv-2   20Gi       RWO                           11s
magedu      zookeeper-datadir-pvc-3   Bound    zookeeper-datadir-pv-3   20Gi       RWO                           11s
[root@k8s-master1 pv]#kubectl get pvc -n magedu
NAME                      STATUS   VOLUME                   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
zookeeper-datadir-pvc-1   Bound    zookeeper-datadir-pv-1   20Gi       RWO                           77s
zookeeper-datadir-pvc-2   Bound    zookeeper-datadir-pv-2   20Gi       RWO                           77s
zookeeper-datadir-pvc-3   Bound    zookeeper-datadir-pv-3   20Gi       RWO                           77s

#运行zookeeper集群
[root@k8s-master1 pv]#cd ..
[root@k8s-master1 zookeeper]#vim zookeeper.yaml 
#传入servers参数,在文件中调用/opt/k8s-data/dockerfile/web/magedu/zookeeper/entrypoint.sh
[root@k8s-master1 zookeeper]#kubectl apply -f zookeeper.yaml 
service/zookeeper created
service/zookeeper1 created
service/zookeeper2 created
service/zookeeper3 created
deployment.apps/zookeeper1 created
deployment.apps/zookeeper2 created
deployment.apps/zookeeper3 created
[root@k8s-master1 zookeeper]#kubectl get pod -n magedu 
NAME                          READY   STATUS    RESTARTS   AGE
zookeeper1-5fb7f77757-5xm5w   1/1     Running   0          42s
zookeeper2-5cc7977b68-gmx8b   1/1     Running   0          42s
zookeeper3-5dbdbc8bc-b4nhq    1/1     Running   0          42s
[root@k8s-master1 zookeeper]#kubectl  get svc -n magedu
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
zookeeper    ClusterIP   10.100.180.159   <none>        2181/TCP                                       11m
zookeeper1   NodePort    10.100.173.181   <none>        2181:42181/TCP,2888:60108/TCP,3888:57600/TCP   11m
zookeeper2   NodePort    10.100.76.207    <none>        2181:42182/TCP,2888:45941/TCP,3888:60048/TCP   11m
zookeeper3   NodePort    10.100.235.146   <none>        2181:42183/TCP,2888:58673/TCP,3888:31671/TCP   11m

#验证nfs上zk的数据写入情况
[root@k8s-ha1 ~]#ls /data/k8sdata/magedu/zookeeper-datadir-1
myid  version-2
[root@k8s-ha1 ~]#ls /data/k8sdata/magedu/zookeeper-datadir-2
myid  version-2
[root@k8s-ha1 ~]#ls /data/k8sdata/magedu/zookeeper-datadir-3
myid  version-2
[root@k8s-ha1 ~]#cat /data/k8sdata/magedu/zookeeper-datadir-3/myid 
3

#dashboard验证  
bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Error contacting service. It is probably not running.
!!状态异常,原因未明!!
怀疑是zk集群为3 但node节点启动了2个  此外zoo.cfg不显示具体的集群ip



#后续操作:
验证删除zookeeper的leader pod,会自动选举一个flower pod为新的leader pod

#客户端验证zk:
客户端工具:ZooInspector
#先在Windows上安装jdk
#cd C:\Users\nantian\Desktop\zk\ZooInspector\build
#java -jar zookeeper-dev-ZooInspector.jar
#登录客户端验证
学新通

2、web服务–自定义镜像-运行nginx与tomcat并实现动静分离

#1、运行nginx
#(1)centos基础镜像制作:
[root@k8s-master1 k8s-data]#cd dockerfile/system/centos/
[root@k8s-master1 centos]#ll
total 24120
drwxr-xr-x 2 root root       81 Jan 11 15:28 ./
drwxr-xr-x 3 root root       20 Oct 10 11:37 ../
-rw-r--r-- 1 root root      465 Jan 11 15:24 Dockerfile
-rw-r--r-- 1 root root      158 Jun 22  2021 build-command.sh
-rw-r--r-- 1 root root 24688647 Jun 22  2021 filebeat-7.6.2-x86_64.rpm
[root@k8s-master1 centos]#sh build-command.sh 
[root@k8s-master1 centos]

这篇好文章是转载于:学新通技术网

  • 版权申明: 本站部分内容来自互联网,仅供学习及演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,请提供相关证据及您的身份证明,我们将在收到邮件后48小时内删除。
  • 本站站名: 学新通技术网
  • 本文地址: /boutique/detail/tanhieciai
系列文章
更多 icon
同类精品
更多 icon
继续加载