更新脚本

This commit is contained in:
Zhang Peng 2018-02-02 15:59:55 +08:00
parent 36c501a08e
commit 3a55232d6c
14 changed files with 36 additions and 704 deletions

View File

@ -1,88 +0,0 @@
#!/bin/bash
# xyz 下载代码通用脚本
# 需要指定参数:仓库名、分支名
# xyz 源码根目录
SOURCE_PATH=/home/zp
GITHUB_HOST=https://github.com/dunwu
gitok=false
isGitExist() {
cd ${SOURCE_PATH}
if [ -d "./${target}" ]; then
cd ./${target}
#(1)删除git状态零时文件
if [ -f "gitstatus.tmp" ]; then
rm -rf gitstatus.tmp
fi
#(2) 判断是否存在.git目录
if [ -d "./.git" ]; then
#(3) 判断git是否可用
git status &> gitstatus.tmp
grep -iwq 'not a git repository' gitstatus.tmp && gitok=false || gitok=true
fi
#返回到主目录
cd ${SOURCE_PATH}
fi
}
doFetchOrClone() {
if ${gitok}; then
cd ./${target}
git reset --hard
git clean -ffdx
git fetch
echo "git fetch ${repository} remote repository 到本地成功"
else
#删除所有内容,便于重新进行git clone
rm -rf ${repository}
git clone --no-checkout ${GITHUB_HOST}/${repository} ./${target}
echo "git clone ${repository} remote repository 到本地成功"
cd ./${target}
fi
}
doCheckout() {
echo "检出 ${repository} ${branch} 分支代码"
isRemoteBranch=false
gitRemoteBranch=`git branch -r`
echo -e "$gitRemoteBranch" | grep -iwq ${branch} && isRemoteBranch=true || isRemoteBranch=false
if ${isRemoteBranch}; then
echo "找到 ${branch} 对应的远端分支"
git checkout -f 'origin/'${branch}
else
git checkout -f ${branch}
fi
echo "更新子模块代码"
git submodule update --init --recursive --force
}
##############################__MAIN__########################################
export LANG="zh_CN.UTF-8"
# 0. 检查传入的参数
repository=`echo $1`
branch=`echo $2`
target=`echo $3`
if [ "${repository}" == "" ] || [ "${branch}" == "" ];
then
echo "用法repository branch target"
echo " repository: git仓储。"
echo " branch: git分支。"
echo " target: 代码存放目录。可选参数,默认为脚本所在目录。"
exit 0
fi
# 1. 判断本地是否已存在 Git 仓库
isGitExist
# 2. 如果本地已有代码,执行 fetch反之从远程 clone
doFetchOrClone
# 3. 切换到指定分支
doCheckout
echo "代码检出完成!"

View File

@ -79,7 +79,7 @@ function chooseOper() {
nodejs) ${filepath}/tool/nodejs/install-nodejs.sh;; nodejs) ${filepath}/tool/nodejs/install-nodejs.sh;;
tomcat) ${filepath}/tool/tomcat/install-tomcat8.sh;; tomcat) ${filepath}/tool/tomcat/install-tomcat8.sh;;
elk) ${filepath}/tool/elk/install-elk.sh;; elk) ${filepath}/tool/elk/install-elk.sh;;
* ) echo "invalid key";; * ) echo "${key} is invalid key";;
esac esac
showMenu showMenu

View File

@ -1,19 +1,12 @@
#!/bin/bash -li #!/bin/bash -li
app=$1
ELASTICSEARCH_BIN_PATH=/opt/software/elastic/elasticsearch-6.1.1/bin
LOGSTASH_BIN_PATH=/opt/software/elastic/logstash-6.1.1/bin
KIBANA_BIN_PATH=/opt/software/elastic/kibana-6.1.1-linux-x86_64/bin
FILEBEAT_PATH=/opt/software/elastic/filebeat-6.1.1-linux-x86_64
# 检查脚本输入参数 # 检查脚本输入参数
checkInput() { checkInput() {
if [ "${app}" == "" ]; then if [ "${app}" == "" ] || [ "${oper}" == "" ]; then
echo "请输入脚本参数name" echo "请输入脚本参数name"
echo " name: 要启动的进程关键字必填。可选值elasticsearch|logstash|kibana|filebeat" echo " app: 要启动的进程关键字必填。可选值elasticsearch|logstash|kibana|filebeat"
echo "例:./shutdown.sh logstash" echo " oper: 执行操作必填。可选值start|stop"
echo "例:./boot-elk.sh logstash start"
exit 0 exit 0
fi fi
@ -39,7 +32,7 @@ startup() {
nohup sh ${ELASTICSEARCH_BIN_PATH}/elasticsearch >>${ELASTICSEARCH_BIN_PATH}/nohup.out 2>&1 & nohup sh ${ELASTICSEARCH_BIN_PATH}/elasticsearch >>${ELASTICSEARCH_BIN_PATH}/nohup.out 2>&1 &
elif [ "${app}" == "logstash" ]; then elif [ "${app}" == "logstash" ]; then
checkFileExist ${LOGSTASH_BIN_PATH}/logstash checkFileExist ${LOGSTASH_BIN_PATH}/logstash
nohup sh ${LOGSTASH_BIN_PATH}/logstash -f ${LOGSTASH_BIN_PATH}/logstash-input-tcp.conf >>${LOGSTASH_BIN_PATH}/nohup.out 2>&1 & nohup sh ${LOGSTASH_BIN_PATH}/logstash -f ${LOGSTASH_BIN_PATH}/logstash.conf >>${LOGSTASH_BIN_PATH}/nohup.out 2>&1 &
elif [ "${app}" == "kibana" ]; then elif [ "${app}" == "kibana" ]; then
checkFileExist ${KIBANA_BIN_PATH}/kibana checkFileExist ${KIBANA_BIN_PATH}/kibana
nohup sh ${KIBANA_BIN_PATH}/kibana >> ${KIBANA_BIN_PATH}/nohup.out 2>&1 & nohup sh ${KIBANA_BIN_PATH}/kibana >> ${KIBANA_BIN_PATH}/nohup.out 2>&1 &
@ -50,6 +43,31 @@ startup() {
fi fi
} }
shutdown() {
pid=`ps -ef | grep java | grep ${app} | awk '{print $2}'`
kill -9 ${pid}
}
##############################__MAIN__######################################## ##############################__MAIN__########################################
app=$1
oper=$2
version=6.1.1
ELASTICSEARCH_BIN_PATH=/opt/software/elastic/elasticsearch-${version}/bin
LOGSTASH_BIN_PATH=/opt/software/elastic/logstash-${version}/bin
KIBANA_BIN_PATH=/opt/software/elastic/kibana-${version}-linux-x86_64/bin
FILEBEAT_PATH=/opt/software/elastic/filebeat-${version}-linux-x86_64
checkInput checkInput
case ${oper} in
start)
echo "启动 ${app}"
startup startup
;;
stop)
echo "终止 ${app}"
shutdown
;;
* ) echo "${oper} is invalid oper";;
esac

View File

@ -91,7 +91,7 @@ replaceLogstashConfig() {
sed -i "s/# http.host: \"127.0.0.1\"/ http.host: ${IP}/g" ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/config/logstash.yml sed -i "s/# http.host: \"127.0.0.1\"/ http.host: ${IP}/g" ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/config/logstash.yml
touch ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/bin/nohup.out touch ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/bin/nohup.out
cd ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/bin cd ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/bin
wget https://github.com/dunwu/linux-notes/blob/master/codes/deploy/tool/elk/config/logstash-input-tcp.conf wget https://github.com/dunwu/linux-notes/blob/master/codes/deploy/tool/elk/config/logstash.conf
} }
# 替换 Kibana 配置 # 替换 Kibana 配置
@ -138,5 +138,8 @@ replaceKibanaConfig
installFilebeat installFilebeat
replaceFilebeatConfig replaceFilebeatConfig
# 最后,将启动脚本下载到本地
mkdir -p /home/zp/script
wget -P /home/zp/script "https://raw.githubusercontent.com/dunwu/linux/master/codes/deploy/tool/elk/boot-elk.sh"
#setPrivilegeForUser #setPrivilegeForUser

View File

@ -1,4 +0,0 @@
# deploy
> 本目录下的代码用于部署 linux 环境。
>

View File

@ -1,167 +0,0 @@
#!/usr/bin/env bash
#######################################################################
# 启动退出开关当执行命令返回非00表示成功状态码时脚本退出执行。
# 次脚本适用于 Centos/RedHat
#######################################################################
# 获取当前机器 IP
IP=""
getDeviceIp() {
IP=`ifconfig eth0 | grep "inet addr" | awk '{ print $2}' | awk -F: '{print $2}'`
if [ "$IP" == "" ]
then
IP=`ifconfig ens32 | grep "inet"|grep "broadcast" | awk '{ print $2}' | awk -F: '{print $1}'`
fi
echo "${IP} 机器环境部署开始" |tee ${DEPLOY_LOG_PATH}
if [ "$IP" == "" ]
then
IP=`echo $1`
fi
if [ "${IP}" == "" ]
then
echo " "
echo " 请输入服务器IP地址................ "
echo " "
exit 0
fi
}
touch ${DEPLOY_LOG_PATH}
chmod 777 ${DEPLOY_LOG_PATH}
installGit() {
echo "安装 git" |tee ${DEPLOY_LOG_PATH}
yum install -y git-core
yum install -y git
}
copyXyzdeploy() {
echo "克隆 xyzdeploy 项目到本地" | tee ${DEPLOY_LOG_PATH}
rm -rf ${SOFTWARE_ROOT}*
rm -rf ${DEPLOY_ROOT}
git clone git@github.com:dunwu/linux-notes.git ${DEPLOY_ROOT}
chmod -R 755 ${DEPLOY_ROOT}/*
cp -rf ${DEPLOY_ROOT}/software ${SOFTWARE_ROOT}
cp -rf ${DEPLOY_ROOT}/config/ /home/zp/
cp -rf ${DEPLOY_ROOT}/script/ /home/zp/
sed -i 's/127.0.0.1/'"${IP}"'/g' /home/zp/config/nginx/vmhosts/*.conf
}
initEnviromentConfig() {
echo "修改环境配置文件 profile 和 hosts" | tee ${DEPLOY_LOG_PATH}
if [ ! -f /etc/profile.bak ]
then
cp -f /etc/profile /etc/profile.bak
fi
cp -f ${DEPLOY_ROOT}/config/enviroment/profile /etc/profile
source /etc/profile
if [ ! -f /etc/hosts.bak ]
then
cp -f /etc/hosts /etc/hosts.bak
fi
cp -f ${DEPLOY_ROOT}/config/enviroment/hosts /etc/hosts
sed -i 's/0.0.0.0/'"${IP}"'/g' /etc/hosts
}
installJava() {
echo "安装 jdk" | tee ${DEPLOY_LOG_PATH}
yum -y install java-1.8.0-openjdk-devel-debug.x86_64 | tee ${DEPLOY_LOG_PATH}
}
installMaven() {
echo "安装 maven" | tee ${DEPLOY_LOG_PATH}
mkdir ${SOFTWARE_PATH}/maven
cd ${SOFTWARE_PATH}/maven
echo "解压 apache-maven-3.5.2-bin.tar.gz" | tee ${DEPLOY_LOG_PATH}
wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://mirrors.shuosc.org/apache/maven/maven-3/3.5.2/binaries/apache-maven-3.5.2-bin.tar.gz
tar -zxvf apache-maven-3.5.2-bin.tar.gz
}
installGcc() {
echo "安装 gcc" | tee ${DEPLOY_LOG_PATH}
yum -y install make gcc gcc-c++ kernel-devel
}
installZlib() {
echo "安装 zlib" | tee ${DEPLOY_LOG_PATH}
yum -y install make zlib zlib-devel libtool openssl openssl-devel
}
installOpenssl() {
echo "安装 openssl" | tee ${DEPLOY_LOG_PATH}
yum -y install make openssl openssl-devel
}
installPcre() {
echo "安装 Pcre" | tee ${DEPLOY_LOG_PATH}
yum -y install pcre-devel.x86_64
}
installNginx() {
echo "安装 Nginx" | tee ${DEPLOY_LOG_PATH}
yum -y install make nginx.x86_64
cp /etc/nginx/mime.types /usr/local/nginx/conf/
}
installNodejsAndNvm() {
echo "安装 Nvm 和 Nodejs" | tee ${DEPLOY_LOG_PATH}
rm -rf /home/admin/.nvm
git clone https://github.com/creationix/nvm.git ~/.nvm && cd ~/.nvm
source ~/.nvm/nvm.sh
# 使用 nvm 安装 Node 指定版本
nvm install 0.10.48
}
installNtp() {
echo "************************同步时钟************************" |tee -a /home/depoly.log
yum install -y ntp
vi /etc/crontab
echo "*/30 * * * * /usr/local/bin/ntpdate 192.168.16.182" | tee /etc/crontab
}
shutdownFirewall() {
echo "************************关闭防火墙************************" |tee -a /home/depoly.log
/etc/init.d/iptables stop
chkconfig --level 35 iptables off
}
setPrivilegeForUserIns() {
userdel INS
groupdel INS
groupadd INS
useradd -g INS INS
mkdir -p /search/statistics
mkdir -p /home/mic
mkdir -p /home/INS/logs
chown -R INS.INS /home/mic
chown -R INS.INS /search/
chown -R INS.INS /home/INS/
chown -R INS.INS /opt/
chown -R INS.INS /tmp/
}
##############################__MAIN__########################################
DEPLOY_LOG_PATH=/home/zp/log/deploy.log
DEPLOY_ROOT=/home/zp/source/xyzdeploy
SOFTWARE_ROOT=/opt/software
init
getDeviceIp
installGit
copyXyzdeploy
initEnviromentConfig
installJava
installGcc
installZlib
installOpenssl
installPcre
installNginx
installMaven
installNodejsAndNvm
installNtp
shutdownFirewall
setPrivilegeForUserIns

View File

@ -1,177 +0,0 @@
###################### Filebeat Configuration Example #########################
# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html
# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.
#=========================== Filebeat prospectors =============================
filebeat.prospectors:
# Each - is a prospector. Most options can be set at the prospector level, so
# you can use different prospectors for various configurations.
# Below are the prospector specific configurations.
- type: log
# Change to true to enable this prospector configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
#- /var/log/*.log
#- c:\programdata\elasticsearch\logs\*
- /home/zp/log/*.log
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list.
#exclude_lines: ['^DBG']
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
#exclude_files: ['.gz$']
# Optional additional fields. These fields can be freely picked
# to add additional information to the crawled log files for filtering
#fields:
# level: debug
# review: 1
### Multiline options
# Mutiline can be used for log messages spanning multiple lines. This is common
# for Java Stack Traces or C-Line Continuation
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
#multiline.pattern: ^\[
# Defines if the pattern set under pattern should be negated or not. Default is false.
#multiline.negate: false
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
#multiline.match: after
#============================= Filebeat modules ===============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: true
# Period on which files under path should be checked for changes
#reload.period: 10s
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 3
#index.codec: best_compression
#_source.enabled: false
#================================ General =====================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
name: 127.0.0.1
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
fields:
profile: development
#============================== Dashboards =====================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here, or by using the `-setup` CLI flag or the `setup` command.
setup.dashboards.enabled: true
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
#============================== Kibana =====================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
host: "192.168.28.11:5601"
#============================= Elastic Cloud ==================================
# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
#================================ Outputs =====================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output ------------------------------
#output.elasticsearch:
# Array of hosts to connect to.
#hosts: ["192.168.28.11:9200"]
# Optional protocol and basic auth credentials.
protocol: "http"
#username: "elastic"
#password: "changeme"
#----------------------------- Logstash output --------------------------------
output.logstash:
# The Logstash hosts
hosts: ["192.168.28.32:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
#================================ Logging =====================================
# Sets log level. The default log level is info.
# Available log levels are: critical, error, warning, info, debug
#logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
logging.selectors: ["*"]

View File

@ -1,57 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!-- logback中一共有5种有效级别分别是TRACE、DEBUG、INFO、WARN、ERROR优先级依次从低到高 -->
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<property name="FILE_NAME" value="javatool"/>
<!-- 将记录日志打印到控制台 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{HH:mm:ss.SSS} [%thread] [%-5p] %c{36}.%M - %m%n</pattern>
</encoder>
</appender>
<!-- RollingFileAppender begin -->
<appender name="ALL" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 根据时间来制定滚动策略 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${user.dir}/logs/${FILE_NAME}-all.%d{yyyy-MM-dd}.log</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<!-- 根据文件大小来制定滚动策略 -->
<triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
<maxFileSize>30MB</maxFileSize>
</triggeringPolicy>
<encoder>
<pattern>%d{HH:mm:ss.SSS} [%thread] [%-5p] %c{36}.%M - %m%n</pattern>
</encoder>
</appender>
<appender name="ELK-TCP" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<!--
destination 是 logstash 服务的 host:port
相当于和 logstash 建立了管道,将日志数据定向传输到 logstash
-->
<destination>192.168.28.32:9251</destination>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder">
<customFields>{"appname":"javatool"}</customFields>
</encoder>
</appender>
<!-- RollingFileAppender end -->
<!-- logger begin -->
<!-- 本项目的日志记录,分级打印 -->
<logger name="cn.xyz" level="TRACE">
<appender-ref ref="ELK-TCP"/>
<appender-ref ref="ALL"/>
</logger>
<root level="TRACE">
<appender-ref ref="STDOUT"/>
</root>
<!-- logger end -->
</configuration>

View File

@ -1,12 +0,0 @@
input {
tcp {
port => 9251
codec => json_lines
mode => server
tags => ["javaapp"]
}
}
output {
elasticsearch { hosts => ["localhost:9200"] }
stdout { codec => rubydebug }
}

View File

@ -1,139 +0,0 @@
#!/usr/bin/env bash
# 本脚本为一键式安装 ELK 脚本
# 执行脚本前,请先执行以下命令,创建用户
# groupadd elk
# useradd -g elk elk
# passwd elk
# 获取当前设备IP
IP=""
getDeviceIp() {
IP=`ifconfig eth0 | grep "inet" | awk '{ print $2}' | awk -F: '{print $2}'`
if [ "$IP" == "" ]; then
IP=`ifconfig eth0 | grep "inet" | awk '{ print $2}'`
fi
if [ "$IP" == "" ]; then
IP=`ifconfig ens32 | grep "inet"|grep "broadcast" | awk '{ print $2}' | awk -F: '{print $1}'`
fi
if [ "${IP}" == "" ]; then
echo " "
echo " 请输入服务器IP地址................ "
echo " "
exit 0
else
echo "当前设备IP: $IP"
fi
}
# 检查文件是否存在,不存在则退出脚本
checkFileExist() {
if [ ! -f "$1" ]
then
echo "关键文件 $1 找不到,脚本执行结束"
exit 0
fi
}
init() {
mkdir -p ${ELASTIC_SOFTWARE_PATH}
getDeviceIp
}
# 安装 elasticsearch
installElasticsearch() {
cd ${ELASTIC_SOFTWARE_PATH}
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${version}.tar.gz
tar -xzf elasticsearch-${version}.tar.gz
}
installRuby() {
cd ${RUBY_SOFTWARE_PATH}
wget https://cache.ruby-lang.org/pub/ruby/2.5/ruby-2.5.0.tar.gz
tar -xzf ruby-2.5.0.tar.gz
cd ruby-2.5.0
./configure
make & make install
}
# 安装 logstash
installLogstash() {
cd ${ELASTIC_SOFTWARE_PATH}
wget https://artifacts.elastic.co/downloads/logstash/logstash-${version}.tar.gz
tar -xzf logstash-${version}.tar.gz
}
# 安装 kibana
installKibana() {
cd ${ELASTIC_SOFTWARE_PATH}
wget https://artifacts.elastic.co/downloads/kibana/kibana-${version}-linux-x86_64.tar.gz
tar -xzf kibana-${version}-linux-x86_64.tar.gz
}
# 安装 filebeat
installFilebeat() {
cd ${ELASTIC_SOFTWARE_PATH}
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-${version}-linux-x86_64.tar.gz
tar -zxf filebeat-${version}-linux-x86_64.tar.gz
}
# 替换 Elasticsearch 配置
# 1. 替换 192.168.0.1 为本机 IP
replaceElasticsearchConfig() {
cp ${ELASTIC_SOFTWARE_PATH}/elasticsearch-${version}/config/elasticsearch.yml ${ELASTIC_SOFTWARE_PATH}/elasticsearch-${version}/config/elasticsearch.yml.bak
sed -i "s/#network.host: 192.168.0.1/network.host: ${IP}/g" ${ELASTIC_SOFTWARE_PATH}/elasticsearch-${version}/config/elasticsearch.yml
touch ${ELASTIC_SOFTWARE_PATH}/elasticsearch-${version}/bin/nohup.out
}
replaceLogstashConfig() {
cp ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/config/logstash.yml ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/config/logstash.yml.bak
sed -i "s/# http.host: \"127.0.0.1\"/ http.host: ${IP}/g" ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/config/logstash.yml
touch ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/bin/nohup.out
cd ${ELASTIC_SOFTWARE_PATH}/logstash-${version}/bin
wget https://github.com/dunwu/linux-notes/blob/master/codes/deploy/elk/config/logstash-input-tcp.conf
}
# 替换 Kibana 配置
# 1. 替换 localhost 为本机 IP
replaceKibanaConfig() {
cp ${ELASTIC_SOFTWARE_PATH}/kibana-${version}-linux-x86_64/config/kibana.yml ${ELASTIC_SOFTWARE_PATH}/kibana-${version}-linux-x86_64/config/kibana.yml.bak
sed -i "s/#server.host: \"localhost\"/server.host: ${IP}/g" ${ELASTIC_SOFTWARE_PATH}/kibana-${version}-linux-x86_64/config/kibana.yml
sed -i "s/#elasticsearch.url: \"http://localhost:9200\"/#elasticsearch.url: \"${IP}\"/g" ${ELASTIC_SOFTWARE_PATH}/kibana-${version}-linux-x86_64/config/kibana.yml
touch ${ELASTIC_SOFTWARE_PATH}/kibana-${version}-linux-x86_64/bin/nohup.out
}
# 替换 Filebeat 配置
replaceFilebeatConfig() {
cp ${ELASTIC_SOFTWARE_PATH}/filebeat-${version}-linux-x86_64/filebeat.yml ${ELASTIC_SOFTWARE_PATH}/filebeat-${version}-linux-x86_64/filebeat.yml.bak
cd ${ELASTIC_SOFTWARE_PATH}/filebeat-${version}-linux-x86_64
wget https://github.com/dunwu/linux-notes/blob/master/codes/deploy/elk/config/filebeat.yml
sed -i 's/127.0.0.1/'"${IP}"'/g' ${ELASTIC_SOFTWARE_PATH}/filebeat-${version}-linux-x86_64/filebeat.yml
}
# 为 elk.elk 用户设置权限
setPrivilegeForUser() {
chown -R elk.elk ${ELASTIC_SOFTWARE_PATH}
chown -R elk.elk /var/log/
}
######################################## MAIN ########################################
version=6.1.1
RUBY_SOFTWARE_PATH=/opt/software/ruby
ELASTIC_SOFTWARE_PATH=/opt/software/elastic
ELASTIC_SETTINGS_PATH=/opt/software/elastic/settings
init
installElasticsearch
replaceElasticsearchConfig
installLogstash
replaceLogstashConfig
installKibana
replaceKibanaConfig
installFilebeat
replaceFilebeatConfig
#setPrivilegeForUser

View File

@ -1,27 +0,0 @@
#!/bin/bash -li
app=$1
checkInput() {
if [ "${app}" == "" ]; then
echo "请输入脚本参数name"
echo " name: 要终止的进程关键字必填。可选值elasticsearch|logstash|kibana|filebeat"
echo "例:./shutdown.sh logstash"
exit 0
fi
if [ "${app}" != "elasticsearch" ] && [ "${app}" != "logstash" ] && [ "${app}" != "kibana" ] && [ "${app}" != "filebeat" ]; then
echo "name 输入错误"
echo "可选值elasticsearch|logstash|kibana|filebeat"
exit 0
fi
}
shutdown() {
PID=`ps -ef | grep ${app} | awk '{ print $2}' | head -n 1`
kill -9 ${PID}
}
##############################__MAIN__########################################
checkInput
shutdown

View File

@ -1,18 +0,0 @@
# 环境变量
export LANG=en_US.UTF-8
export JAVA_HOME=/opt/software/java/jdk1.6.0_38
export MAVEN_HOME=/opt/software/maven/apache-maven-3.0.5
export CLASSPATH=$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH:$MAVEN_HOME/bin
# http 代理
# 如果虚拟机连入的公司网络需要设置代理,可在此处配置
# export http_proxy=****
# export https_proxy=****
# export no_proxy="****"
# nvm
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion