k8s搭建微服务⽇志收集log-pilot+kafka+logstash+es+kibana
k8s 搭建微服务⽇志收集 log-pilot + kafka + logstash + es + kibana
本⽂采⽤的是阿⾥云开源的log-pilot,根据filebeat⼆次开发⽽来的,⽽log-pilot对微服务的⽇志收集有着巨⼤的优势的,关于log-pilot更加详细的报告,⼤家可以看看这篇《》
简单画了个图
各组件版本介绍
log-pilot: 0.9.7
kafka: 2.2.0
zookeeper: 3.4.10
logstash: 6.8.0
es: 6.8.0
kibana: 6.8.0
1. 部署kafka+zk
#存储⼤⼩和相关配置⼤家可以根据各⾃情况修改,
#没有具体需求⼤家可以先保持默认
cat kafka.yaml
-
--
apiVersion: v1
kind: Service
metadata:
name: kafka-svc
namespace: kafka
labels:
app: kafka
spec:
ports:
-port:9092
name: server
clusterIP: None
selector:
app: kafka
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: kafka-pdb
namespace: kafka
spec:
selector:
matchLabels:
app: kafka
minAvailable:2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: kafka
namespace: kafka
spec:
selector:
matchLabels:
app: kafka
serviceName: kafka-svc
replicas:3
template:
metadata:
labels:
app: kafka
spec:
tolerations:
-
key:"travis.io/schedule-only"
operator:"Equal"
value:"kafka"
effect:"NoSchedule"
-key:"travis.io/schedule-only"
operator:"Equal"
value:"kafka"
effect:"NoExecute"
tolerationSeconds:3600
-key:"travis.io/schedule-only"
operator:"Equal"
value:"kafka"
effect:"PreferNoSchedule"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
-labelSelector:
matchExpressions:
-key:"app"
operator: In
values:
- kafka
topologyKey:"kubernetes.io/hostname"
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
-weight:1
podAffinityTerm:
labelSelector:
matchExpressions:
-key:"app"
operator: In
values:
- zk
topologyKey:"kubernetes.io/hostname"
terminationGracePeriodSeconds:300
containers:
-name: k8s-kafka
imagePullPolicy: Always
image: harbor.suzhuchang/devops/kafka:2.2.0
resources:
requests:
memory:"600Mi"
cpu: 500m
ports:
-
containerPort:9092
name: server
command:
- sh
--c
- "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
--override listeners=PLAINTEXT://:9092 \
--t=zk-0.zk-hs.kafka.svc.cluster.local:2181,zk-1.zk-hs.kafka.svc.cluster.local:2181,zk-2.zk-hs.kafka.svc.cluster.local:2181 \ --override log.dir=/var/lib/kafka \
--able=true \
-
-override able=true \
--override background.threads=10 \
--pe=producer \
--able=false \
--override leader.imbalance.check.interval.seconds=300 \
--override leader.imbalance.per.broker.percentage=10 \
--override log.ssages=9223372036854775807 \
--override log.flush.offset.checkpoint.interval.ms=60000 \
--override log.flush.scheduler.interval.ms=9223372036854775807 \
--ion.bytes=-1 \
--ion.hours=1 \
-
-ll.hours=168 \
--ll.jitter.hours=0 \
--override log.segment.bytes=1073741824 \
--override log.segment.delete.delay.ms=60000 \
--override message.max.bytes=1000012 \
--override plicas=1 \
--override num.io.threads=8 \
--override numwork.threads=3 \
--very.threads.per.data.dir=1 \
--plica.fetchers=1 \
--adata.max.bytes=4096 \
-
-quired.acks=-1 \
--override offsetsmit.timeout.ms=5000 \
--override offsets.load.buffer.size=5242880 \
--ion.check.interval.ms=600000 \
--ion.minutes=1440 \
--dec=0 \
--pic.num.partitions=50 \
--plication.factor=3 \
--pic.segment.bytes=104857600 \
--override quests=500 \
--sumer.default=9223372036854775807 \
-
-override quota.producer.default=9223372036854775807 \
--override replica.fetch.min.bytes=1 \
--override replica.fetch.wait.max.ms=500 \
--override replica.high.watermark.checkpoint.interval.ms=5000 \
--override replica.lag.time.max.ms=10000 \
--override ive.buffer.bytes=65536 \
--override replica.socket.timeout.ms=30000 \
--override request.timeout.ms=30000 \
--ive.buffer.bytes=102400 \
--quest.max.bytes=104857600 \
--override socket.send.buffer.bytes=102400 \
-
-override unclean.able=true \
--override zookeeper.session.timeout.ms=6000 \
--override zookeeper.set.acl=false \
--override able=true \
--override connections.max.idle.ms=600000 \
--override able=true \
--override controlled.ies=3 \
--override backoff.ms=5000 \
--override controller.socket.timeout.ms=30000 \
--plication.factor=1 \
--override fetch.purgatory.quests=1000 \
-
-override group.max.session.timeout.ms=300000 \
--override group.min.session.timeout.ms=6000 \
--override inter.broker.protocol.version=2.2.0 \
--override log.cleaner.backoff.ms=15000 \
--override log.cleaner.dedupe.buffer.size=134217728 \
--override log.ion.ms=3200000 \
--override able=true \
--override log.cleaner.io.buffer.load.factor=0.9 \
--override log.cleaner.io.buffer.size=524288 \
--override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \
--override log.cleaner.min.cleanable.ratio=0.5 \
-
-override log.cleaner.minpaction.lag.ms=0 \
--override log.cleaner.threads=1 \
--override log.cleanup.policy=delete \
--override log.index.interval.bytes=4096 \
--override log.index.size.max.bytes=10485760 \
--ssage.timestamp.difference.max.ms=9223372036854775807 \ --pe=CreateTime \
--override log.preallocate=false \
--ion.check.interval.ms=300000 \
--tions.per.ip=2147483647 \
--override num.partitions=4 \
--override producer.purgatory.quests=1000 \
--override replica.fetch.backoff.ms=1000 \
--override replica.fetch.max.bytes=1048576 \
--override sponse.max.bytes=10485760 \
--override reserved.broker.max.id=1000 "
env:
-name: KAFKA_HEAP_OPTS
value:"-Xmx512M -Xms512M"
-name: KAFKA_OPTS
value:"-Dlogging.level=INFO"
volumeMounts:
-name: datadir
mountPath: /var/lib/kafka
readinessProbe:
tcpSocket:
port:9092
timeoutSeconds:1
initialDelaySeconds:5
securityContext:
runAsUser:1000
fsGroup:1000
volumeClaimTemplates:
-metadata:
name: datadir
spec:
accessModes:["ReadWriteOnce"]
storageClassName: rook-ceph-block
resources:
requests:
storage: 50Gi
#
cat zk.yaml
nodeselectorapiVersion: v1
kind: Service
metadata:
name: zk-hs
namespace: kafka
labels:
app: zk
spec:
ports:
-port:2888
name: server
-port:3888
name: leader-election clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
namespace: kafka
labels:
app: zk
spec:
ports:
-port:2181
name: client
selector:
app: zk
---
apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata:
name: zk-pdb
namespace: kafka
spec:
selector:
matchLabels:
app: zk maxUnavailable:1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
namespace: kafka
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs replicas:3 updateStrategy:
type: RollingUpdate podManagementPolicy: Parallel template:
metadata:
labels:
app: zk
spec:
tolerations:
-key:"travis.io/schedule-only"
operator:"Equal"
value:"kafka"
effect:"NoSchedule"
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论