es服务器及其他优化

This commit is contained in:
KennyLee 2017-10-24 16:24:36 +08:00
parent 0d2c9df1cf
commit 28ec5b0862
28 changed files with 471 additions and 180 deletions

View File

@ -8,4 +8,16 @@
* 安装 curl bash tzdata tar unzip 包。
* 中国时区。
部署时,注意配置修改部署机的 _vm.max_map_count_ 配置,方法参考下面:
```
vim /etc/sysctl.conf
# 新增内容
vm.max_map_count=262144
# 读取配置
sysctl -p
# 检查
sysctl -a | grep 'vm.max_map_count'
```

View File

@ -0,0 +1,9 @@
# https://github.com/elastic/elasticsearch-docker
FROM registry.cn-hangzhou.aliyuncs.com/kennylee/elasticsearch:5.3.0
# Add your elasticsearch plugins setup here
# Example: RUN elasticsearch-plugin install analysis-icu
COPY x-pack-5.3.0.jar /usr/share/elasticsearch/plugins/x-pack/
# 创建备份的目录,可通过外部挂载出来
RUN mkdir -p /usr/share/elasticsearch/backups/

View File

@ -0,0 +1,77 @@
# ElasticSearch破解
简要说明直接构建即可把破解包传入到容器内然后把修改后的合法License传入到es服务器即可。
## License问题说明
目前license分为 _OPEN SOURCE(trial)__BASIC__GOLD__PLATINUM__ENTERPRISE_ ,其中免费的是前两个。[see also](https://www.elastic.co/subscriptions)
而docker内默认的是 `OPEN SOURCE` 的license只有一个月的使用时间。在实际使用时肯定是不够的。而 `BASIC` 的license使用时间是一年也勉强可以最多一年更新一次。
注册的 [base free license](https://register.elastic.co/)
```
# 查看证书信息
curl 'http://<host>:<port>/_xpack/license'?pretty
# 上传证书
curl -XPUT -u elastic 'http://<host>:<port>/_xpack/license' -H "Content-Type: application/json" -d @license.json
```
## 破解License(docker环境)
大部分情况下,可能还是希望一劳永逸的办法,所以只能破解了。
1. 编写破解的x-pack-5.3.0.jar包
破解的关键文件是 org.elasticsearch/license/LicenseVerifier.class
首先把x-pack的jar包拷贝出来路径在 `/usr/share/elasticsearch/plugins/x-pack/x-pack-5.3.0.jar`
```sh
# 编译LicenseVerifier.java
javac -cp /usr/share/elasticsearch/plugins/x-pack/x-pack-5.3.0.jar LicenseVerifier.java
# 移动编译好的class文件到目录
mkdir -p org/elasticsearch/license/ && cp LicenseVerifier.class org/elasticsearch/license/
# 单个更新jar包的同名文件
jar uvf /usr/share/elasticsearch/plugins/x-pack/x-pack-5.3.0.jar org/elasticsearch/license/LicenseVerifier.class
```
包搞定后传回 /usr/share/elasticsearch/plugins/x-pack/x-pack-5.3.0.jar
*附: LicenseVerifier.java源码*
```java
package org.elasticsearch.license;
public class LicenseVerifier{
public static boolean verifyLicense(final License license, final byte[] encryptedPublicKeyData) {
return true;
}
public static boolean verifyLicense(final License license) {
return true;
}
}
```
*使用破解好的x-pack-5.3.0.jar包可跳过上述步骤*
2. 申请合法的license
注册的free license https://register.elastic.co/
拿到license后修改两个地方
①"type":"platinum"
②"expiry_date_in_millis":2524579200999
3. 上传license
```sh
#! /bin/bash
LICENSE_PATH='license.json'
curl -XPUT -u elastic:changeme 'http://192.168.3.157:9200/_xpack/license?pretty' -d @$LICENSE_PATH
```
参考: [elasticsearch之x-pack破解](http://blog.csdn.net/u013066244/article/details/73927756)

Binary file not shown.

View File

@ -0,0 +1,57 @@
# ElasticSearch+Logstash JDBC同步-实现搜索引擎功能
## 环境部署
由于使用ElasticSearch先修改下变量。
```
vim /etc/sysctl.conf
# 新增内容
vm.max_map_count=262144
# 读取配置
sysctl -p
# 检查
sysctl -a | grep 'vm.max_map_count'
```
## 说明
* db.ini: 数据库同步的配置文件。
* demo_mysql.sql: mysql数据库的测试库表结构及数据。
## 数据库同步文件
把想要同步的数据库的SQL语句文件放到`/logstash/db-shipper/sql/` 文件夹下,参考目前已经有的*department.sql*和*user.sql*文件。
*例子是采取增量同步的方式,每个表中必须有一个同名的创建时间的标识。*
文件名将作为导入到ElasticSearch中的 _type_ 标识。
## 配置生成
1. 修改数据库配置
根据部署环境,修改 `/logstash/db-shipper/db.ini` 文件
注意 _schedule_ 一栏,可方便的设置不同数据库脚本生成间隔。
2. 生成配置文件
```
python generate_logstash_config.py
```
3. 检查
打开 pipeline/logstash.conf 文件,确认配置是否正确。
## 使用方法
配置文件生成完后。
```
docker-compose up -d
```
其他请参考docker-compose的使用文档。

View File

@ -1,10 +1,16 @@
version: '2'
version: '3'
networks:
&network db_es_net:
driver: bridge
ipam:
driver: default
config:
- subnet: 172.16.238.0/24
services:
db_es:
image: registry.cn-hangzhou.aliyuncs.com/kennylee/elasticsearch:5.3.0
container_name: db_es
hostname: db_es
ports:
- "9200:9200"
- "9300:9300"
@ -22,22 +28,27 @@ services:
ipv4_address: 172.16.238.10
volumes:
- ./data/elasticsearch:/usr/share/elasticsearch/data:rw
logging:
driver: 'json-file'
options:
max-size: '30m'
max-file: '2'
db_logstash:
build: logstash/db-shipper/
container_name: db_logstash
volumes:
- ./logstash/db-shipper/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- ./logstash/db-shipper/pipeline:/usr/share/logstash/pipeline
- ./logstash/db-shipper/sql:/usr/share/logstash/jdbc/
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- db_es_net
- *network
depends_on:
- db_es
networks:
db_es_net:
driver: bridge
ipam:
driver: default
config:
- subnet: 172.16.238.0/24
logging:
driver: 'json-file'
options:
max-size: '30m'
max-file: '2'

View File

@ -0,0 +1,3 @@
FROM registry.cn-hangzhou.aliyuncs.com/kennylee/logstash:5.3.0
COPY ./lib/ /usr/share/logstash/lib/

View File

@ -0,0 +1,15 @@
jdbc {
jdbc_driver_library => "/usr/share/logstash/lib/$driver_name"
jdbc_driver_class => "$driver_class"
jdbc_connection_string => "$db_connect_url"
jdbc_validate_connection => "true"
schedule => "$schedule"
jdbc_validation_timeout => "60"
jdbc_user => "$db_user"
jdbc_password => "$db_pwd"
jdbc_paging_enabled => "true"
jdbc_page_size => "10000"
type => "$table_name"
statement_filepath => "/usr/share/logstash/jdbc/$table_name.sql"
}

View File

@ -0,0 +1,16 @@
input {
$inputs
}
output {
#stdout {
# codec => rubydebug
# }
elasticsearch {
hosts => "$es_host"
flush_size => 10000
index => "$es_index_name"
document_id => "%{id}"
}
}

View File

@ -0,0 +1,16 @@
[db]
connect_url = jdbc:mysql://192.168.1.206:3306/demo
user = root
password = 111111
driver_type = mysql
[es]
index_name = demo
[lib]
mysql = mysql-connector-java-5.1.30.jar
oracle = ojdbc14-10.2.0.3.jar
[schedule]
department = */5 * * * *

View File

@ -0,0 +1,73 @@
-- MySQL dump 10.13 Distrib 5.7.19, for osx10.12 (x86_64)
--
-- Host: localhost Database: demo
-- ------------------------------------------------------
-- Server version 5.7.19
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
--
-- Table structure for table `department`
--
DROP TABLE IF EXISTS `department`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `department` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(32) NOT NULL,
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `department`
--
INSERT INTO `department` (`id`, `name`, `create_time`) VALUES (2,'研发部','2017-10-24 13:43:11');
--
-- Table structure for table `user`
--
DROP TABLE IF EXISTS `user`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(32) NOT NULL,
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Dumping data for table `user`
--
INSERT INTO `user` (`id`, `name`, `create_time`) VALUES (1,'张三','2017-10-24 13:42:41'),(2,'李四','2017-10-24 13:42:41');
--
-- Dumping routines for database 'demo'
--
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2017-10-24 13:49:22

View File

@ -0,0 +1,76 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from string import Template
import glob
from os.path import basename, splitext
import ConfigParser
files_path = 'sql/'
dist_file_path = 'pipeline/logstash.conf'
config_file_path = 'db.ini'
default_schedule = '*/2 * * * *'
es_host = '172.16.238.10:9200'
cf = ConfigParser.ConfigParser()
cf.read(config_file_path)
def get_table_schedule_map():
schedules = cf.options("schedule")
m = {}
for option in schedules:
m[option] = cf.get('schedule', option)
return m
def generate_input_configs():
sql_files = glob.glob(files_path + '/*.sql')
it = Template(open('assets/input-template.conf').read())
input_str = ''
print(len(sql_files))
d = get_table_schedule_map()
driver_type = cf.get("db", "driver_type")
driver_mysql_name = cf.get("lib", "mysql")
driver_oracle_name = cf.get("lib", "oracle")
if driver_type == 'mysql':
driver_name = driver_mysql_name
driver_class = 'com.mysql.jdbc.Driver'
elif driver_type == 'oracle':
driver_name = driver_oracle_name
driver_class = 'Java::oracle.jdbc.driver.OracleDriver'
else:
raise Exception("unsupported driver name %s yet!" % driver_type)
db_connect_url = cf.get("db", "connect_url")
db_user = cf.get("db", "user")
db_pwd = cf.get("db", "password")
for file_url in sql_files:
file_name_with_ext = basename(file_url)
file_name = splitext(file_name_with_ext)[0]
schedule = d.get(file_name.lower(), default_schedule)
input_str += it.substitute(table_name=file_name, schedule=schedule, driver_name=driver_name,
driver_class=driver_class, db_connect_url=db_connect_url, db_user=db_user,
db_pwd=db_pwd)
return input_str
if __name__ == '__main__':
input_configs = generate_input_configs()
es_index_name = cf.get('es', 'index_name')
f = open(dist_file_path, 'w+')
filein = open('assets/logstash-template.conf')
s = Template(filein.read())
ls_str = s.substitute(inputs=input_configs, es_host=es_host, es_index_name=es_index_name)
f.write(ls_str)
f.close()

View File

@ -0,0 +1,46 @@
input {
jdbc {
jdbc_driver_library => "/usr/share/logstash/lib/mysql-connector-java-5.1.30.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_connection_string => "jdbc:mysql://192.168.1.206:3306/demo"
jdbc_validate_connection => "true"
schedule => "*/5 * * * *"
jdbc_validation_timeout => "60"
jdbc_user => "root"
jdbc_password => "111111"
jdbc_paging_enabled => "true"
jdbc_page_size => "10000"
type => "department"
statement_filepath => "/usr/share/logstash/jdbc/department.sql"
}
jdbc {
jdbc_driver_library => "/usr/share/logstash/lib/mysql-connector-java-5.1.30.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_connection_string => "jdbc:mysql://192.168.1.206:3306/demo"
jdbc_validate_connection => "true"
schedule => "*/2 * * * *"
jdbc_validation_timeout => "60"
jdbc_user => "root"
jdbc_password => "111111"
jdbc_paging_enabled => "true"
jdbc_page_size => "10000"
type => "user"
statement_filepath => "/usr/share/logstash/jdbc/user.sql"
}
}
output {
#stdout {
# codec => rubydebug
# }
elasticsearch {
hosts => "172.16.238.10:9200"
flush_size => 10000
index => "demo"
document_id => "%{id}"
}
}

View File

@ -0,0 +1 @@
SELECT * FROM department WHERE create_time > :sql_last_value

View File

@ -0,0 +1 @@
SELECT * FROM `user` WHERE create_time > :sql_last_value

View File

@ -1,3 +0,0 @@
# ElasticSearch+Logstash JDBC同步-实现搜索引擎功能

View File

@ -1,54 +0,0 @@
version: '2'
services:
db_es:
image: registry.cn-hangzhou.aliyuncs.com/kennylee/elasticsearch:5.3.0
container_name: db_es
hostname: db_es
ports:
- "9200:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
# disable X-Pack
# see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html
# https://www.elastic.co/guide/en/x-pack/current/installing-xpack.html#xpack-enabling
xpack.security.enabled: "false"
xpack.monitoring.enabled: "false"
xpack.graph.enabled: "false"
xpack.watcher.enabled: "false"
networks:
db_es_net:
ipv4_address: 172.16.238.10
volumes:
- ./data/elasticsearch:/usr/share/elasticsearch/data:rw
db_logstash:
build: logstash/db-shipper/
container_name: db_logstash
volumes:
- ./logstash/db-shipper/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- ./logstash/db-shipper/pipeline:/usr/share/logstash/pipeline
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- db_es_net
depends_on:
- db_es
kibana:
image: registry.cn-hangzhou.aliyuncs.com/kennylee/kibana:5.3.0
container_name: db_kibana
volumes:
- ./kibana/config/:/usr/share/kibana/config
ports:
- "5601:5601"
networks:
- db_es_net
depends_on:
- db_es
networks:
db_es_net:
driver: bridge
ipam:
driver: default
config:
- subnet: 172.16.238.0/24

View File

@ -1,16 +0,0 @@
---
## Default Kibana configuration from kibana-docker.
## from https://github.com/elastic/kibana-docker/blob/master/build/kibana/config/kibana.yml
#
server.name: kibana
server.host: "0"
elasticsearch.url: http://db_es:9200
## Disable X-Pack
## see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html
## https://www.elastic.co/guide/en/x-pack/current/installing-xpack.html#xpack-enabling
#
xpack.security.enabled: false
xpack.monitoring.enabled: false
xpack.graph.enabled: false
xpack.reporting.enabled: false

View File

@ -1,6 +0,0 @@
FROM registry.cn-hangzhou.aliyuncs.com/kennylee/logstash:5.3.0
COPY config/logstash.yml /usr/share/logstash/config/logstash.yml
COPY pipeline /usr/share/logstash/pipeline
COPY ./lib/ /usr/share/logstash/lib/

View File

@ -1,40 +0,0 @@
input {
jdbc {
jdbc_driver_library => "/usr/share/logstash/lib/mysql-connector-java-5.1.30.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_connection_string => "jdbc:mysql://192.168.1.110:3306/tksite"
jdbc_user => "root"
jdbc_password => "111111"
jdbc_paging_enabled => "true"
jdbc_page_size => "1000"
type => "users"
schedule => "*/1 * * * *"
statement => "SELECT * FROM users"
}
jdbc {
jdbc_driver_library => "/usr/share/logstash/lib/mysql-connector-java-5.1.30.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_connection_string => "jdbc:mysql://192.168.1.110:3306/tksite"
jdbc_user => "root"
jdbc_password => "111111"
jdbc_paging_enabled => "true"
jdbc_page_size => "1000"
type => "department"
schedule => "*/1 * * * *"
statement => "SELECT * FROM department"
}
}
output {
stdout {
codec => rubydebug
}
elasticsearch {
hosts => "172.16.238.10:9200"
flush_size => 1000
index => "gkxt"
document_id => "%{id}"
}
}

View File

@ -1,10 +1,7 @@
app:
image: kennylee26/pureftpd
image: registry.cn-hangzhou.aliyuncs.com/kennylee/pureftp
container_name: "pureftpd"
net: "host" # none but net=host, windows user can be work.
# ports:
# - "21:21"
# Please create these folders before.
net: "host"
volumes:
- /home/data/ftp/files:/home/ftpusers
- /home/data/ftp/pure-ftpd:/etc/pure-ftpd

View File

@ -1,14 +1,11 @@
共享资源的环境
=====
# 共享资源的环境
目前包括FTP和samba。
1. ftp支持上传和下载功能。账号: ftp 密码 ftp
2. samba仅支持下载。
注意 docker-compose.yml 文件中的一些本地路径映射的配置,根据实际情况来修改。
--------
FTP配置信息参见http://download.pureftpd.org/pub/pure-ftpd/doc/README
FTP配置信息参见http://download.pureftpd.org/pub/pure-ftpd/doc/README

View File

@ -1,38 +1,37 @@
ftp:
image: kennylee26/pureftpd
container_name: "share-pureftpd"
net: "host" # none but net=host, windows user can be work.
# Please create these folders at before.
volumes:
- /home/data/ftp/files:/home/ftpusers
- /home/data/ftp/pure-ftpd:/etc/pure-ftpd
restart: always
environment:
- TZ=Asia/Shanghai
samba:
image: vimagick/samba
container_name: "share-samba"
ports:
- "137:137/udp"
- "138:138/udp"
- "139:139/tcp"
- "445:445/tcp"
volumes:
# 换成绝对路径
- ${pwd}/smb.conf:/etc/samba/smb.conf
- /home/data/ftp/files/ftp:/share
restart: always
environment:
- TZ=Asia/Shanghai
http:
image: nginx:1.9.8
container_name: "share-http"
ports:
- "8001:80"
volumes:
- /home/data/ftp/files/ftp:/usr/share/nginx/html
# 换成绝对路径
- ${pwd}/nginx.conf:/etc/nginx/nginx.conf
environment:
- TZ=Asia/Shanghai
restart: always
version: '2'
services:
ftp:
image: registry.cn-hangzhou.aliyuncs.com/kennylee/pureftp
container_name: "share-pureftpd"
net: "host" # none but net=host, windows user can be work.
# Please create these folders at before.
volumes:
- /home/data/ftp/files:/home/ftpusers
- /home/data/ftp/pure-ftpd:/etc/pure-ftpd
restart: always
samba:
image: vimagick/samba
container_name: "share-samba"
ports:
- "137:137/udp"
- "138:138/udp"
- "139:139/tcp"
- "445:445/tcp"
volumes:
- ./smb.conf:/etc/samba/smb.conf
- /home/data/ftp/files/ftp:/share
restart: always
environment:
- TZ=Asia/Shanghai
http:
image: nginx:1.9.8
container_name: "share-http"
ports:
- "8001:80"
volumes:
- /home/data/ftp/files/ftp:/usr/share/nginx/html
- ./nginx.conf:/etc/nginx/nginx.conf
environment:
- TZ=Asia/Shanghai
restart: always

View File

@ -6,7 +6,9 @@ services:
- "8080:8080"
volumes:
- ./app/webapps/:/opt/tomcat/webapps/:z
- ./config/server.xml:/opt/tomcat/conf/server.xml:ro
#- ./config/server.xml:/opt/tomcat/conf/server.xml:ro
- ./data/logs/:/opt/tomcat/logs/:z
restart: always
environment:
- JAVA_OPTS=-server -Xms128m -Xmx1024m -XX:PermSize=128M -XX:MaxPermSize=192M

View File

@ -6,7 +6,9 @@ services:
- "8080:8080"
volumes:
- ./app/webapps/:/opt/tomcat/webapps/:z
- ./config/server.xml:/opt/tomcat/conf/server.xml:ro
#- ./config/server.xml:/opt/tomcat/conf/server.xml:ro
- ./data/logs/:/opt/tomcat/logs/:z
restart: always
environment:
- JAVA_OPTS=-server -Xms128m -Xmx1024m