如何搭建⼀个完整的后台管理系统(附:教程,源码)
⼀、环境准备
1.安装java环境:
yum install java-1.8.0-openjdk* -y
2.添加elk执⾏⽤户:
groupadd -g 77 elk
useradd -u 77 -g elk -d /home/elk -s /bin/bash elk
3.在 /etc/f 追加以下内容:
elk soft memlock unlimited
elk hard memlock unlimited
soft nofile 65536
hard nofile 131072
4.执⾏⽣效
sysctl -p
5.配置主机名
hostnamectl set-hostname monitor-elk
echo “10.135.3.135 monitor-elk” >> /etc/hosts
⼆、服务部署
1.服务端:
1)下载ELK相关的源码包:
2)创建elk⽬录,并将以上源码包解压⾄该⽬录:
mkdir /usr/local/elk
mkdir -p /data/elasticsearch/
chown -R elk.elk /data/elasticsearch/
mkdir -p /data/{kafka,zookeeper}
mv logstash-5.2.2 logstash && mv kibana-5.2.2-linux-x86_64 kibana && mv elasticsearch-5.2.2 elasticsearch && mv filebeat-5.2.2-linux-x86_64 filebeat && mv kafka_2.12-0.10.2.0 kafka && mv zookeeper-3.4.9 zookeeper
chown -R elk.elk /usr/local/elk/
程序⽬录列表如下:
3)修改以下程序的相应配置⽂件
①kibana:
②elasticsearch:
#| [root@monitor-elk ~]# cat /usr/local/elk/elasticsearch/l |grep -v “$”node.na
me: node01
path.data: /data/elasticsearch/data
path.logs: /data/elk/logs/elasticsearch
<_lock: true
network.host: 127.0.0.1
http.port: 9200
[root@monitor-elk ~]# /usr/local/elk/elasticsearch/config/jvm.options
#修改以下参数
-Xms1g
-Xmx1g
③logstash:
[root@monitor-elk ~]# cat /usr/local/elk/logstash/l
input {
#使⽤kafka的数据作为⽇志数据源
kafka
{
bootstrap_servers => [“127.0.0.1:9092”]
topics => “beats”
codec => json
}
}
filter {
#过滤数据,如果⽇志数据⾥⾯包含有该IP地址,将会被丢弃
if [message] =~ “123.151.4.10” {
drop{}
}
转码,转成正常的url编码,如中⽂
urldecode {
all_fields => true
}
nginx access
#通过type来判断传⼊的⽇志类型
if [type] == “hongbao-nginx-access” or [type] == “pano-nginx-access” or [type] == “logstash-nginx-access” { grok {
#指定⾃定义的grok表达式路径
patterns_dir => “./patterns”
#指定⾃定义的正则表达式名称解析⽇志内容,拆分成各个字段
match => { “message” => “%{NGINXACCESS}” }
#解析完毕后,移除默认的message字段
remove_field => [“message”]
}
#使⽤geoip库解析IP地址
geoip {
#指定解析后的字段作为数据源
source => “clientip”
fields => [“country_name”, “ip”, “region_name”]
}
date {
#匹配⽇志内容⾥⾯的时间,如 05/Jun/2017:03:54:01 +0800
match => [“timestamp”,“dd/MMM/yyyy:HH:mm:ss Z”]
#将匹配到的时间赋值给@timestamp字段
target => “@timestamp”
remove_field => [“timestamp”]
java安装完整教程}
}
tomcat access
if [type] == “hongbao-tomcat-access” or [type] == “ljq-tomcat-access” {
grok {
patterns_dir => “./patterns”
match => { “message” => “%{TOMCATACCESS}” }
remove_field => [“message”]
}
geoip {
source => “clientip”
fields => [“country_name”, “ip”, “region_name”]
}
date {
match => [“timestamp”,“dd/MMM/yyyy:HH:mm:ss Z”]
target => “@timestamp”
remove_field => [“timestamp”]
}
}
tomcat catalina
if [type] == “hongbao-tomcat-catalina” {
grok {
match => {
“message” => “^(?<log_time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}) (?\w*) (?<log_data>.+)”}
remove_field => [“message”]
}
date {
match => [“log_time”,“yyyy-MM-dd HH:mm:ss,SSS”]
target => “@timestamp”
remove_field => [“log_time”]
}
}
}
output {
#将解析失败的记录写⼊到指定的⽂件中
if "_grokparsefailure" in [tags] {
file {
path => "/data/elk/logs/grokparsefailure-%{[type]}-%{+YYYY.MM}.log"
}
}
nginx access
#根据type⽇志类型分别输出到elasticsearch不同的索引
if [type] == "hongbao-nginx-access" {
#将处理后的结果输出到elasticsearch
elasticsearch {
hosts => ["127.0.0.1:9200"]
#指定输出到当天的索引
index => "hongbao-nginx-access-%{+YYYY.MM.dd}"
}
}
if [type] == "pano-nginx-access" {
elasticsearch {
hosts => ["127.0.0.1:9200"]
index => "pano-nginx-access-%{+YYYY.MM.dd}"
}
}
if [type] == "logstash-nginx-access" {
elasticsearch {
hosts => ["127.0.0.1:9200"]
index => "logstash-nginx-access-%{+YYYY.MM.dd}"
}
}
tomcat access
if [type] == "hongbao-tomcat-access" {
elasticsearch {
hosts => ["127.0.0.1:9200"]
index => "hongbao-tomcat-access-%{+YYYY.MM.dd}"
}
}
if [type] == "ljq-tomcat-access" {
elasticsearch {
hosts => ["127.0.0.1:9200"]
index => "ljq-tomcat-access-%{+YYYY.MM.dd}"
}
}
tomcat catalina
if [type] == "hongbao-tomcat-catalina" {
elasticsearch {
hosts => ["127.0.0.1:9200"]
index => "hongbao-tomcat-catalina-%{+YYYY.MM.dd}"
}
}
}
[root@monitor-elk ~]#
配置正则表达式
[root@monitor-elk ~]# cp /usr/local/elk/logstash/vendor/bundle/jruby/1.9/gems/logstash-patterns-core-4.0.2/patterns/grok-patterns /usr/local/elk/logstash/config/patterns
[root@monitor-elk ~]# tail -5 /usr/local/elk/logstash/config/patterns
Nginx
NGINXACCESS %{COMBINEDAPACHELOG} %{QS:x_forwarded_for}
Tomcat
TOMCATACCESS %{COMMONAPACHELOG}
[root@monitor-elk ~]# chown elk.elk /usr/local/elk/logstash/config/patterns
4)配置zookeeper:
cp /usr/local/elk/zookeeper/conf/zoo_sample.cfg /usr/local/elk/zookeeper/conf/zoo.cfg
修改配置⽂件中的数据存储路径
vim /usr/local/elk/zookeeper/conf/zoo.cfg
dataDir=/data/zookeeper
备份并修改脚本 /usr/local/elk/zookeeper/bin/zkEnv.sh
修改以下变量的参数
ZOO_LOG_DIR="/data/zookeeper-logs"
ZOO_LOG4J_PROP=“INFO,ROLLINGFILE”
备份并修改⽇志配置 /usr/local/elk/zookeeper/conf/log4j.properties
修改以下变量的参数
log4j.appender.ROLLINGFILE=org.apache.log4j.DailyRollingFileAppender# 每天轮转⽇志
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论