查看ES健康状态
1
2
3[root@10 ~]# curl '10.0.3.40:9200/_cat/health?v'
epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1489221736 16:42:16 ssp green 3 3 662 331 0 0 0 0 - 100.0%查看集群节点
1
2
3
4
5[root@10 ~]# curl '10.0.3.40:9200/_cat/nodes?v'
host ip heap.percent ram.percent load node.role master name
10.0.3.40 10.0.3.40 49 97 0.89 d m 10.0.3.40
10.0.3.41 10.0.3.41 58 98 0.71 d m 10.0.3.41
10.0.3.42 10.0.3.42 61 97 1.09 d * 10.0.3.42查看集群索引信息
1
2
3
4[root@10 ~]# curl '10.0.3.40:9200/_cat/indices?v'
health status index pri rep docs.count docs.deleted store.size pri.store.size
green open index_1 5 1 12935206 0 10.5gb 5.2gb
green open index_2 5 1 21408160 0 16.6gb 8.3gb查看单条索引
1
2[root@10 soft]# curl -XGET http://10.0.3.40:9200/new-index?v
[root@10 soft]# curl -XGET http://10.0.3.40:9200/new-index?pretty关闭和打开索引
1
2curl -XPOST http://10.0.3.40:9200/new-index/_open
curl -XPOST http://10.0.3.40:9200/new-index/_close删除索引
1
curl -XDELETE http://10.0.3.40:9200/new-index
创建索引
1
2
3
4
5
6
7
8
9
10
11
12
13
14//简单方法
curl -XPUT http://10.0.3.40:9200/new-index
//创建索引加参数
curl -XPUT http://10.0.3.40:9200/new-index -d '
{
"settings":{
"index":{
"number_of_shards":3,
"number_of_replicas":2
}
}
}
'查看分片
1
2
3
4
5
6
7
8
9
10
11
12
13
14[wisdom@10 ~]$ curl -XGET '10.0.3.41:9200/_cat/shards'|grep v2-inbound-request-2017.03
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 26082 100 26082 0 0 61893 0 --:--:-- --:--:-- --:--:-- 61952
v2-inbound-request-2017.03 1 r STARTED 25071632 35.2gb 10.0.3.41 10.0.3.41
v2-inbound-request-2017.03 1 p STARTED 25071643 35.2gb 10.0.3.40 10.0.3.40
v2-inbound-request-2017.03 3 p STARTED 25107804 35.3gb 10.0.3.41 10.0.3.41
v2-inbound-request-2017.03 3 r STARTED 25107796 35.3gb 10.0.3.42 10.0.3.42
v2-inbound-request-2017.03 2 r STARTED 25098807 35.2gb 10.0.3.42 10.0.3.42
v2-inbound-request-2017.03 2 p STARTED 25098799 35.2gb 10.0.3.40 10.0.3.40
v2-inbound-request-2017.03 4 p STARTED 25108295 35.3gb 10.0.3.41 10.0.3.41
v2-inbound-request-2017.03 4 r STARTED 25108301 35.3gb 10.0.3.42 10.0.3.42
v2-inbound-request-2017.03 0 r STARTED 25105055 35.3gb 10.0.3.42 10.0.3.42
v2-inbound-request-2017.03 0 p STARTED 25105050 35.3gb 10.0.3.40 10.0.3.40
samba conf
samba 全局设置变量
1 | [global] |
samba 共享目录设置
1 | [homes] |
mysql index options
删除索引
1
2
3mysql> DROP INDEX index_name ON talbe_name
mysql> ALTER TABLE table_name DROP INDEX index_name
mysql> ALTER TABLE table_name DROP PRIMARY KEY查看索引
1
2mysql> show index from tblname;
mysql> show keys from tblname;索引类型
1
2
3在创建索引时,可以规定索引能否包含重复值.如果不包含,则索引应该创建为PRIMARY KEY或UNIQUE索引.对于单列惟一性索引,这保证单列不包含重复的值.对于多列惟一性索引,保证多个值的组合不重复.
PRIMARY KEY索引和UNIQUE索引非常类似.事实上,PRIMARY KEY索引仅是一个具有名称PRIMARY的UNIQUE索引.这表示一个表只能包含一个PRIMARY KEY,因为一个表中不可能具有两个同名的索引.1
ALTER TABLE students ADD PRIMARY KEY (sid)
创建索引
1
2
3
4
5
6
7
8//ALTER TABLE用来创建普通索引、UNIQUE索引或PRIMARY KEY索引
ALTER TABLE table_name ADD INDEX index_name (column_list)
ALTER TABLE table_name ADD UNIQUE (column_list)
ALTER TABLE table_name ADD PRIMARY KEY (column_list)
//CREATE INDEX可对表增加普通索引或UNIQUE索引
CREATE INDEX index_name ON table_name (column_list)
CREATE UNIQUE INDEX index_name ON table_name (column_list)
samba install
samba 安装
关闭selinux
1
2[root@10 ~]# setenforce 0
[root@10 ~]# sed -i s/'SELINUX=enforcing'/'SELINUX=disabled'/g /etc/selinux/config关闭防火墙
1
2[root@10 ~]# systemctl stop firewalld.service
[root@10 ~]# systemctl disable firewalld.servicesamba 安装
1
[root@localhost ~]# yum -y install samba samba-client
创建共享目录
1
2[root@10 ~]# mkdir -p /mnt/data/samba/elasticsearch
[root@10 ~]# chmod 777 /mnt/data/samba/elasticsearchsamba 配置
1
2
3
4
5
6
7
8
9[root@10 ~]# cp /etc/samba/{smb.conf,smb.conf.bak}
[root@10 ~]# vim /etc/samba/smb.conf ==>追加
[elasticsearch]
comment = elasticsearch share
path = /mnt/data/samba/elasticsearch
hosts allow = 10.0.1. 10.0.2. 10.0.3. 10.0.4.
browseable = no
guest ok = no
writable = yes创建访问账号
1
2
3
4
5[root@10 ~]# useradd -s /sbin/nologin smbuser
[root@10 ~]# smbpasswd -a smbuser
New SMB password: //123qwe`
Retype new SMB password:
Added user smbuser.1
2
3
4
5
6
7
8
9
10
11//添加账号并设置密码
smbpasswd -a smbuser
//删除smb账号
smbpasswd -x smbuser
//禁用smb账号
smbpasswd -d smbuser
//启用smb账号
smbpasswd -e smbuser启动smb服务
1
[root@10 ~]# systemctl start smb
1
2
3
4
5[root@10 ~]# netstat -lntp|grep smbd
tcp 0 0 0.0.0.0:445 0.0.0.0:* LISTEN 5949/smbd
tcp 0 0 0.0.0.0:139 0.0.0.0:* LISTEN 5949/smbd
tcp6 0 0 :::445 :::* LISTEN 5949/smbd
tcp6 0 0 :::139 :::* LISTEN 5949/smbd客户端安装samba-client
1
[root@10 ~]# yum -y install samba-client
客户端查看共享资源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17[root@10 ~]# smbclient -L //10.0.2.115
Enter root's password: //查看共享信息,不需要密码,直接按回车键
Anonymous login successful
Domain=[SAMBA] OS=[Windows 6.1] Server=[Samba 4.4.4]
Sharename Type Comment
--------- ---- -------
print$ Disk Printer Drivers
IPC$ IPC IPC Service (Samba 4.4.4)
Anonymous login successful
Domain=[SAMBA] OS=[Windows 6.1] Server=[Samba 4.4.4]
Server Comment
--------- -------
Workgroup Master
--------- -------客户端查看认证用户共享目录
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27[root@10 ~]# smbclient -U smbuser //10.0.2.115/elasticsearch
Enter smbuser's password:
Domain=[SAMBA] OS=[Windows 6.1] Server=[Samba 4.4.4]
smb: \> ls
. D 0 Sat Mar 11 15:27:59 2017
.. D 0 Sat Mar 11 15:27:59 2017
1048064000 blocks of size 1024. 1048031024 blocks available
smb: \> help
? allinfo altname archive backup
blocksize cancel case_sensitive cd chmod
chown close del dir du
echo exit get getfacl geteas
hardlink help history iosize lcd
link lock lowercase ls l
mask md mget mkdir more
mput newer notify open posix
posix_encrypt posix_open posix_mkdir posix_rmdir posix_unlink
print prompt put pwd q
queue quit readlink rd recurse
reget rename reput rm rmdir
showacls setea setmode scopy stat
symlink tar tarmode timeout translate
unlock volume vuid wdel logon
listconnect showconnect tcon tdis tid
logoff .. !
smb: \> quit客户端挂载和卸载共享目录
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16[root@10 ~]# mkdir -p /mnt/ops/elasticsearch
[root@10 ~]# chmod 777 /mnt/ops/elasticsearch
[root@10 ~]# mount -t cifs //10.0.2.115/elasticsearch /mnt/ops/elasticsearch -o username=smbuser,password=123qwe\`
或者:
[root@10 ~]# mkdir -p /mnt/ops/elasticsearch
[root@10 ~]# chmod 777 /mnt/ops/elasticsearch
[root@10 ~]# vim /etc/fstab
//10.0.2.115/elasticsearch /mnt/ops/elasticsearch cifs defaults,username=smbuser,password=123qwe` 0 0
[root@10 ~]# mount -a
[root@10 ~]# df -h |grep elasticsearch
//10.0.2.115/elasticsearch 1000G 33M 1000G 1% /mnt/ops/elasticsearch
[root@10 ~]# umount /mnt/ops/elasticsearch拓展:不同用户访问不同目录(授权)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32//创建两个组
groupadd G1
groupadd G2
//创建三个用户
useradd -s /sbin/nologin -g G1 U1
useradd -s /sbin/nologin -g G1 U2
useradd -s /sbin/nologin -g G2 U3
//设置smb 密码
smbpasswd -a U1
smbpasswd -a U2
smbpasswd -a U3
//配置文件设置
[test1]
comment = Only U1 and U2 access
path = /tmp/test1
hosts allow = 10.0.1. 10.0.2. 10.0.3. 10.0.4.
browseable = yes
guest ok = no
writable = yes
write list = @G1
[test2]
comment = Only u3 access
path = /tmp/test1
hosts allow = 10.0.1. 10.0.2. 10.0.3. 10.0.4.
browseable = no
guest ok = no
writable = yes
write list = @G2
nfs install
NFS 简介
1 | NFS是Network File System的缩写,即网络文件系统 |
NFS 安装
NFS 安装
1
2// NFS 服务端 和 客户端
[root@10 ~]# yum -y install nfs-utils rpcbind1
2
3* nfs-utils Nfs-utils软件包提供了rpc.nfsd和rpc.mountd两个RPC守护进程
* rpcbind 远程过程调用服务
* nfs-ganesha* NFS-Ganesha是一个基于NFSv3\v4\v4.1的文件服务器,运行在大多数Linux发行版的用户模态下,同时也支持9p.2000L协议.它支持的运行平台包括Linux,BSD variants和POSIX-compliant UnixesNFS 常用文件或目录
1
2
3
4
5* /etc/exports NFS服务的主要配置文件
* /usr/sbin/exportfs NFS服务的管理命令
* /usr/sbin/showmount 客户端的查看命令
* /var/lib/nfs/etab 记录NFS分享出来的目录的完整权限设定值
* /var/lib/nfs/xtab 记录曾经登录过的客户端信息
NFS 配置
服务端:创建存放数据目录
1
2[root@10 ~]# mkdir -p /mnt/data/nfs/elasticsearch
[root@10 ~]# chmod 777 /mnt/data/nfs/elasticsearch服务端:修改配置文件/etc/exports
1
2[root@10 ~]# vim /etc/exports
/mnt/data/nfs/elasticsearch 10.0.3.0/24(rw,sync,no_root_squash,no_all_squash)1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30/etc/exports文件内容格式:
<输出目录> [客户端1 选项(访问权限,用户映射,其他)] [客户端2 选项(访问权限,用户映射,其他)]
* 输出目录 共享目录位置
* 客户端IP:
1.指定ip地址:1.1.1.1
2.指定网段:1.1.1.0/24,1.1.1.0/255.255.255.0
3.指定主机域名:a.b.com
4.指定部分主机域名:*.b.com
5.所有主机:*
* 客户端选项:选项用来设置输出目录的访问权限,用户映射
1.访问权限:
* ro 只读访问
* rw 读写访问
2.用户映射选项:
* all_squash 将远程访问的所有普通用户及所属组都映射为匿名用户或用户组(nfsnobody)
* no_all_squash 与all_squash取反(默认设置)
* root_squash 将root用户及所属组都映射为匿名用户或用户组(默认设置)
* no_root_squash 与rootsquash取反
* anonuid=xxx 将远程访问的所有用户都映射为匿名用户,并指定该用户为本地用户(UID=xxx)
* anongid=xxx 将远程访问的所有用户组都映射为匿名用户组账户,并指定该匿名用户组账户为本地用户组账户(GID=xxx)
3.其它选项
* secure 限制客户端只能从小于1024的tcp/ip端口连接nfs服务器(默认设置)
* insecure 允许客户端从大于1024的tcp/ip端口连接服务器
* sync 将数据同步写入内存缓冲区与磁盘中,效率低,但可以保证数据的一致性
* async 将数据先保存在内存缓冲区中,必要时才写入磁盘
* wdelay 检查是否有相关的写操作,如果有则将这些写操作一起执行,这样可以提高效率(默认设置)
* no_wdelay 若有写操作则立即执行,应与sync配合使用
* subtree 若输出目录是一个子目录,则nfs服务器将检查其父目录的权限(默认设置)
* no_subtree 即使输出目录是一个子目录,nfs服务器也不检查其父目录的权限,这样可以提高效率服务端:启动服务和开机自启动
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15//重启(修改完配置文件后)
[root@10 ~]# systemctl restart rpcbind
[root@10 ~]# systemctl restart nfs
或者:
//重新加载配置文件
[root@10 ~]# exportfs -r
//启动
[root@10 ~]# systemctl start rpcbind
[root@10 ~]# systemctl start nfs
//开机启动项
[root@10 ~]# systemctl enable rpcbind
[root@10 ~]# systemctl enable nfs服务端:检查NFS是否启动成功
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28[root@10 ~]# rpcinfo -p
program vers proto port service
100000 4 tcp 111 portmapper
100000 3 tcp 111 portmapper
100000 2 tcp 111 portmapper
100000 4 udp 111 portmapper
100000 3 udp 111 portmapper
100000 2 udp 111 portmapper
100024 1 udp 53483 status
100024 1 tcp 50192 status
100005 1 udp 20048 mountd
100005 1 tcp 20048 mountd
100005 2 udp 20048 mountd
100005 2 tcp 20048 mountd
100005 3 udp 20048 mountd
100005 3 tcp 20048 mountd
100003 3 tcp 2049 nfs
100003 4 tcp 2049 nfs
100227 3 tcp 2049 nfs_acl
100003 3 udp 2049 nfs
100003 4 udp 2049 nfs
100227 3 udp 2049 nfs_acl
100021 1 udp 38395 nlockmgr
100021 3 udp 38395 nlockmgr
100021 4 udp 38395 nlockmgr
100021 1 tcp 53778 nlockmgr
100021 3 tcp 53778 nlockmgr
100021 4 tcp 53778 nlockmgr1
2
3
4
5
6
7注意:
我们只输入了一条启动NFS服务的命令,它额外的启动了3个服务:quotas,daemon,mountd(启动portmap服务)又是怎么回事?
首先,NFS文件系统要提供服务单靠本身的NFS服务是不够的,还需要调用其它服务,这个其它服务就是RPC(remote procedure call,远程过程调用)服务和portmap服务.由于NFS服务本身不提供文件传输功能,我们要远程使用NFS文件系统就需要RPC服务的支持,而portmap服务用来为RPC服务进行动态端口分配和映射,所以portmap服务也是NFS服务所必须的.正是因为NFS的运行必须要使用RPC服务,所以一般把NFS服务看作RPC服务的一种,开启NFS服务其实就是在开启RPC服务.REDHAT安全指南在讲portmap服务时就说:"portmap服务是用于RPC服务(如NIS和NFS)的动态端口分配守护进程",明显把NFS和NIS(network information service)看成了RPC服务的其中两个应用.
NFS使用了RPC的哪些服务呢? rpc.nfsd服务 和 rpc.mountd服务
明白了这点,我们就明白为什么在开启nfs服务时,系统除了开启NFS services还开启了rpc.nfsd和rpc.mountd,而至于NFS quotas是用来管理目录配额的,它并不是必需的.这也就明白了安装NFS服务时所需要的两个软件包nfs-utils和portmap: Nfs-utils软件包提供了rpc.nfsd和rpc.mountd两个RPC的daemon(守护进程)程序,而portmap软件包提供了portmap程序客户端:检查共享目录列表
1
2
3[root@10 ~]# showmount -e 10.0.2.115
Export list for 10.0.2.115:
/mnt/data/nfs/elasticsearch 10.0.3.0/24客户端:创建挂载点
1
2[root@10 ~]# mkdir -p /mnt/ops/elasticsearch
[root@10 ~]# chmod 777 /mnt/ops/elasticsearch客户端:挂在共享目录
1
2
3
4
5
6//显示指定NFS服务器的客户端以及服务器端在客户端的挂载点
showmount –a IP
//显示指定NFS服务器在客户端的挂载点
showmount –d IP
rpcinfo -p 10.10.209.148客户端挂载目录
1
[root@10 ~]# mount.nfs -t nfs 10.0.2.115:/mnt/data/nfs/elasticsearch /mnt/ops/elasticsearch -o proto=tcp -o nolock
elasticsearch backup and restore
elasticsearch 备份说明
1 | Elasticsearch的备份分两步: |
elasticsearch 备份
elasticsearch 创建一个仓库
1
2
3//查看当前所有仓库
[root@10 ~]# curl -XGET http://10.0.3.41:9200/_snapshot/_all?pretty
{ }1
2
3
4
5
6
7
8
9
10
11
12//创建一个仓库
[wisdom@10 ~]$ curl -XPUT '10.0.3.41:9200/_snapshot/esbackup' -d '
> {
> "type": "fs",
> "settings": {
> "location": "/mnt/ops/elasticsearch",
> "compress" : "true",
> "max_snapshot_bytes_per_sec" : "80mb",
> "max_restore_bytes_per_sec" : "80mb"
> }
> }'
{"acknowledged":true}1
2
3
4
5
6
7
8
9
10// 使用POST来修改配置
[wisdom@10 ~]$ curl -XPUT '10.0.3.41:9200/_snapshot/esbackup' -d '
> {
> "type": "fs",
> "settings": {
> "max_snapshot_bytes_per_sec" : "100mb",
> "max_restore_bytes_per_sec" : "100mb"
> }
> }'
{"acknowledged":true}1
2
3
4
5
6
7
8
9
10
11
12
13//查看当前仓库信息
[wisdom@10 ~]$ curl -XGET http://10.0.3.41:9200/_snapshot/esbackup?pretty
{
"esbackup" : {
"type" : "fs",
"settings" : {
"location" : "/mnt/ops/elasticsearch",
"max_restore_bytes_per_sec" : "80mb",
"compress" : "true",
"max_snapshot_bytes_per_sec" : "80mb"
}
}
}1
2
3
4
5
6
7
8
9
10
11
12
13//查看当前所有仓库信息
[wisdom@10 ~]$ curl -XGET http://10.0.3.41:9200/_snapshot/_all?pretty
{
"esbackup" : {
"type" : "fs",
"settings" : {
"location" : "/mnt/ops/elasticsearch",
"max_restore_bytes_per_sec" : "80mb",
"compress" : "true",
"max_snapshot_bytes_per_sec" : "80mb"
}
}
}1
2
3
4
5
6
7
8
9
10
11
12
13
14
15//查看仓库是否创建成功
[wisdom@10 ~]$ curl -XPOST http://10.0.3.41:9200/_snapshot/esbackup/_verify?pretty
{
"nodes" : {
"j0bxQZGZSbW1lNoD9d2WHQ" : {
"name" : "10.0.3.41"
},
"lhlUZILyRAm2Mtmp8czEOw" : {
"name" : "10.0.3.42"
},
"DKfbgvGYTNO48BdMI3eeQA" : {
"name" : "10.0.3.40"
}
}
}1
2
3
4
5//删除仓库
[root@10 ~]# curl -XDELETE http://10.0.3.40:9200/_snapshot/esbackup?pretty
{
"acknowledged" : true
}1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48补充: 一个权限错误
[wisdom@10 ~]$ curl -XPUT '10.0.3.41:9200/_snapshot/es' -d '
{
"type": "fs",
"settings": {
"location": "/mnt/ops/elasticsearch",
"max_snapshot_bytes_per_sec" : "80mb",
"max_restore_bytes_per_sec" : "80mb"
}
}'
{"error":{"root_cause":[{"type":"access_denied_exception","reason":"access_denied_exception: /mnt/ops/elasticsearch/tests-ggM3LzbjTn2d0pWjwj7WpQ/master.dat-temp"}],"type":"repository_verification_exception","reason":"[es] path is not accessible on master node","caused_by":{"type":"access_denied_exception","reason":"access_denied_exception: /mnt/ops/elasticsearch/tests-ggM3LzbjTn2d0pWjwj7WpQ/master.dat-temp"}},"status":500}
问题解决:
1. 查看test目录
[root@10 ~]# ls -l /mnt/ops/elasticsearch/
total 0
drwxr-xr-x 2 1001 1001 0 Mar 12 03:10 tests-ggM3LzbjTn2d0pWjwj7WpQ
2. 我使用的是普通用户wisdom启动的elasticsearch,三台es的uid和gid都是1000
[wisdom@10 ~]$ tail /etc/passwd|grep wisdom
wisdom:x:1000:1000::/home/wisdom:/bin/bash
此时,我们已经发现问题,创建的test目录的权限是1001,和我们使用的用户对不上号
3. 1001是什么用户?经排查是samba的权限账号
//Samba server:
[root@10 ~]# tail /etc/passwd|grep 1001
smbuser:x:1001:1001::/home/smbuser:/sbin/nologin
//查看ES集群挂载点
[root@10 ~]# cat /etc/fstab |grep elasticsearch
//10.0.2.115/elasticsearch /mnt/ops/elasticsearch cifs defaults,username=smbuser,password=xxxxxx 0 0
至此,问题已经找到. 解决就很快了.
4. 在Samba服务器上创建UID和GID与三台ES主机中ES启动用户相同的用户
//Samba server:
[root@10 ~]# useradd -u 1000 -g 1000 wisdom
[root@10 ~]# smbpasswd -a wisdom
//ES集群
[root@10 ~]# umount /mnt/ops/elasticsearch
[root@10 ~]# vim /etc/fstab |grep elasticsearch
//10.0.2.115/elasticsearch /mnt/ops/elasticsearch cifs defaults,username=wisdom,password=xxxxxx 0 0
[root@10 ~]# mount -a
[root@10 ~]# smbclient -U wisdom //10.0.2.115/elasticsearch
5. 重新创建仓库,成功elasticsearch 备份索引
1
2
3一个仓库可以包含多个快照(snapshots),快照可以存所有的索引,部分索引或者一个单独的索引
注意: 快照只会备份正在运行open状态的索引!!!1
2
3
4
5
6
7//备份所有索引到一个快照中
[wisdom@10 ~]$ curl -XPUT http://10.0.3.40:9200/_snapshot/esbackup/snapshot_all
直接返回{"accepted":true},然后备份在后台执行
or:
[wisdom@10 ~]$ curl -XPUT http://10.0.3.40:9200/_snapshot/esbackup/snapshot_all?wait_for_completion=true
只有备份完成后,才会显示{"accepted":true}1
2
3//备份部分索引
[wisdom@10 ~]$ curl -XPUT http://10.0.3.40:9200/_snapshot/esbackup/snapshot_index -d '{ "indices": "index_1,index_2" }'
会将索引index_1和index_2备份到快照snapshot_index中1
2
3//备份单个索引
[wisdom@10 ~]$ curl -XPUT http://10.0.3.40:9200/_snapshot/esbackup/snapshot_index_1 -d '{ "indices": "index_1" }'
会将索引index_1备份到快照snapshot_index_1中1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22//查看快照信息
[wisdom@10 ~]$ curl -XGET http://10.0.3.41:9200/_snapshot/esbackup/snapshot_index_1?pretty
{
"snapshots" : [ {
"snapshot" : "snapshot_index_1",
"version_id" : 2030399,
"version" : "2.3.3",
"indices" : [ "snapshot_index_1" ],
"state" : "SUCCESS",
"start_time" : "2017-03-11T20:53:11.478Z",
"start_time_in_millis" : 1489265591478,
"end_time" : "2017-03-11T20:53:46.128Z",
"end_time_in_millis" : 1489265626128,
"duration_in_millis" : 34650,
"failures" : [ ],
"shards" : {
"total" : 5,
"failed" : 0,
"successful" : 5
}
} ]
}1
2//查看所有快照信息
[wisdom@10 ~]$ curl -XGET http://10.0.3.41:9200/_snapshot/esbackup/_all?pretty1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102//查看快照状态信息
[wisdom@10 ~]$ curl -XGET http://10.0.3.41:9200/_snapshot/esbackup/snapshot_index_1/_status?pretty
{
"snapshots" : [ {
"snapshot" : "snapshot_index_1",
"repository" : "esbackup",
"state" : "SUCCESS",
"shards_stats" : {
"initializing" : 0,
"started" : 0,
"finalizing" : 0,
"done" : 5,
"failed" : 0,
"total" : 5
},
"stats" : {
"number_of_files" : 497,
"processed_files" : 497,
"total_size_in_bytes" : 2141849055,
"processed_size_in_bytes" : 2141849055,
"start_time_in_millis" : 1489265591607,
"time_in_millis" : 34498
},
"indices" : {
"snapshot_index_1" : {
"shards_stats" : {
"initializing" : 0,
"started" : 0,
"finalizing" : 0,
"done" : 5,
"failed" : 0,
"total" : 5
},
"stats" : {
"number_of_files" : 497,
"processed_files" : 497,
"total_size_in_bytes" : 2141849055,
"processed_size_in_bytes" : 2141849055,
"start_time_in_millis" : 1489265591607,
"time_in_millis" : 34498
},
"shards" : {
"0" : {
"stage" : "DONE",
"stats" : {
"number_of_files" : 94,
"processed_files" : 94,
"total_size_in_bytes" : 429108117,
"processed_size_in_bytes" : 429108117,
"start_time_in_millis" : 1489265591663,
"time_in_millis" : 12916
}
},
"1" : {
"stage" : "DONE",
"stats" : {
"number_of_files" : 82,
"processed_files" : 82,
"total_size_in_bytes" : 429223456,
"processed_size_in_bytes" : 429223456,
"start_time_in_millis" : 1489265591607,
"time_in_millis" : 32220
}
},
"2" : {
"stage" : "DONE",
"stats" : {
"number_of_files" : 115,
"processed_files" : 115,
"total_size_in_bytes" : 428510263,
"processed_size_in_bytes" : 428510263,
"start_time_in_millis" : 1489265591615,
"time_in_millis" : 34152
}
},
"3" : {
"stage" : "DONE",
"stats" : {
"number_of_files" : 97,
"processed_files" : 97,
"total_size_in_bytes" : 427697132,
"processed_size_in_bytes" : 427697132,
"start_time_in_millis" : 1489265591619,
"time_in_millis" : 34261
}
},
"4" : {
"stage" : "DONE",
"stats" : {
"number_of_files" : 109,
"processed_files" : 109,
"total_size_in_bytes" : 427310087,
"processed_size_in_bytes" : 427310087,
"start_time_in_millis" : 1489265591611,
"time_in_millis" : 34494
}
}
}
}
}
} ]
}1
2//删除快照
[wisdom@10 ~]$ curl -XDELETE http://10.0.3.41:9200/_snapshot/esbackup/snapshot_index_1
elasticsearch 恢复
1 | //恢复快照 |
1 | //恢复快照中指定索引 |
1 | //查看恢复状态 |
1 | //取消恢复过程(不管是已经恢复完,还是正在恢复)直接删除索引即可 |
kafka broker config
1 | ############################# System ############################# |
kafka add topic replicats
kafka修改topic的replicats数量(添加)
修改前检查
1
2
3
4penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-topics.sh --zookeeper 10.0.2.15:2181/kafka --describe --topic test-1
Topic:test-1 PartitionCount:2 ReplicationFactor:2 Configs:
Topic: test-1 Partition: 0 Leader: 4 Replicas: 4,3 Isr: 3,4
Topic: test-1 Partition: 1 Leader: 3 Replicas: 3,4 Isr: 3,4编写json文件,将partition的副本由2个扩大到4个
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26penn@ubuntu:~$ cat increate_replica_partition.json
{
"partitions": [
{
"topic": "test-1",
"partition": 0,
"replicas": [
1,
2,
3,
4
]
},
{
"topic": "test-1",
"partition": 1,
"replicas": [
1,
2,
3,
4
]
}
],
"version": 1
}执行副本集扩容
1
2
3
4
5penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-reassign-partitions.sh --zookeeper 10.0.2.15:2181/kafka --reassignment-json-file ./increate_replica_partition.json --execute
Current partition replica assignment
{"version":1,"partitions":[{"topic":"test-1","partition":1,"replicas":[3,4]},{"topic":"test-1","partition":0,"replicas":[4,3]}]}
Save this to use as the --reassignment-json-file option during rollback
Successfully started reassignment of partitions.修改后检查
1
2
3
4
5
6
7
8
9penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-reassign-partitions.sh --zookeeper 10.0.2.15:2181/kafka --reassignment-json-file ./increate_replica_partition.json --verify
Status of partition reassignment:
Reassignment of partition [test-1,0] completed successfully
Reassignment of partition [test-1,1] completed successfully
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-topics.sh --zookeeper 10.0.2.15:2181/kafka --describe --topic test-1
Topic:test-1 PartitionCount:2 ReplicationFactor:4 Configs:
Topic: test-1 Partition: 0 Leader: 4 Replicas: 1,2,3,4 Isr: 3,4,2,1
Topic: test-1 Partition: 1 Leader: 3 Replicas: 1,2,3,4 Isr: 3,4,2,1
kafka修改topic的replicats数量(缩减)
缩容前检查
1
2
3
4penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-topics.sh --zookeeper 10.0.2.15:2181/kafka --describe --topic test-1
Topic:test-1 PartitionCount:2 ReplicationFactor:4 Configs:
Topic: test-1 Partition: 0 Leader: 1 Replicas: 1,2,3,4 Isr: 3,4,2,1
Topic: test-1 Partition: 1 Leader: 1 Replicas: 1,2,3,4 Isr: 3,4,2,1缩容json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22penn@ubuntu:~$ cat increate_replica_partition.json
{
"partitions": [
{
"topic": "test-1",
"partition": 0,
"replicas": [
1,
2
]
},
{
"topic": "test-1",
"partition": 1,
"replicas": [
1,
2
]
}
],
"version": 1
}执行json
1
2
3
4
5
6
7
8
9
10
11penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-reassign-partitions.sh --zookeeper 10.0.2.15:2181/kafka --reassignment-json-file ./increate_replica_partition.json --execute
Current partition replica assignment
{"version":1,"partitions":[{"topic":"test-1","partition":1,"replicas":[1,2,3,4]},{"topic":"test-1","partition":0,"replicas":[1,2,3,4]}]}
Save this to use as the --reassignment-json-file option during rollback
Successfully started reassignment of partitions.
4. 缩减后检查
```bash
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-topics.sh --zookeeper 10.0.2.15:2181/kafka --describe --topic test-1 Topic:test-1 PartitionCount:2 ReplicationFactor:2 Configs:
Topic: test-1 Partition: 0 Leader: 1 Replicas: 1,2 Isr: 2,1
Topic: test-1 Partition: 1 Leader: 1 Replicas: 1,2 Isr: 2,1
kafka migrate and scale
迁移和扩容
1 | 迁移和扩容需要满足两点: |
测试数据
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-topics.sh --zookeeper 10.0.2.15:2181/kafka --create --replication-factor 2 --partitions 2 --topic test-1
Created topic "test-1".
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-topics.sh --zookeeper 10.0.2.15:2181/kafka --create --replication-factor 2 --partitions 2 --topic test-2
Created topic "test-2".
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-topics.sh --zookeeper 10.0.2.15:2181/kafka --list
test-1
test-2
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-topics.sh --zookeeper 10.0.2.15:2181/kafka --describe --topic test-1
Topic:test-1 PartitionCount:2 ReplicationFactor:2 Configs:
Topic: test-1 Partition: 0 Leader: 2 Replicas: 2,1 Isr: 2,1
Topic: test-1 Partition: 1 Leader: 1 Replicas: 1,2 Isr: 1,2
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-topics.sh --zookeeper 10.0.2.15:2181/kafka --describe --topic test-2
Topic:test-2 PartitionCount:2 ReplicationFactor:2 Configs:
Topic: test-2 Partition: 0 Leader: 2 Replicas: 2,1 Isr: 2,1
Topic: test-2 Partition: 1 Leader: 1 Replicas: 1,2 Isr: 1,2
通过上面我们看到,我们创建了2个topic,它们存放在1和2两个broker上,此时我们添加第三个broker,并将test-1 topic迁移到3和4broker上准备迁移json
1
2
3
4
5
6
7
8
9
10//创建migration-test-topic.json文件,并写入要迁移都topic
penn@ubuntu:~$ cat migration-test-topic.json
{
"topics": [
{
"topic": "test-1"
}
],
"version": 1
}根据json文件生成迁移JSON语句
1
2
3
4
5
6
7
8
9
10
11//根据上面的migration-test-topic.json文件生成迁移JSON语句
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-reassign-partitions.sh --zookeeper 10.0.2.15:2181/kafka --topics-to-move-json-file ./migration-test-topic.json --broker-list "3,4" --generate
Current partition replica assignment
{"version":1,"partitions":[{"topic":"test-1","partition":1,"replicas":[1,2]},{"topic":"test-1","partition":0,"replicas":[2,1]}]}
Proposed partition reassignment configuration
{"version":1,"partitions":[{"topic":"test-1","partition":1,"replicas":[3,4]},{"topic":"test-1","partition":0,"replicas":[4,3]}]}
//将JSON保存到文件
penn@ubuntu:~$ cat move-to-new-broker.json //将下面都json写入到新文件中
{"version":1,"partitions":[{"topic":"test-1","partition":1,"replicas":[3,4]},{"topic":"test-1","partition":0,"replicas":[4,3]}]}1
2
3
4
5//补充:一个错误
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-reassign-partitions.sh --zookeeper 10.0.2.15:2181/kafka --topics-to-move-json-file ./migration-test-topic.json --broker-list "3" --generate
Partitions reassignment failed due to replication factor: 2 larger than available brokers: 1
org.apache.kafka.common.errors.InvalidReplicationFactorException: replication factor: 2 larger than available brokers: 1
报错原因: 是因为要迁移都topic有两个replica,所以在添加新的broker的个数最好是replica指定数的倍数对topic进行迁移
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25//迁移前检查
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-reassign-partitions.sh --zookeeper 10.0.2.15:2181/kafka --reassignment-json-file ./move-to-new-broker.json --verify
Status of partition reassignment:
ERROR: Assigned replicas (1,2) don't match the list of replicas for reassignment (3,4) for partition [test-1,1]
ERROR: Assigned replicas (2,1) don't match the list of replicas for reassignment (4,3) for partition [test-1,0]
Reassignment of partition [test-1,1] failed
Reassignment of partition [test-1,0] failed
//迁移
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-reassign-partitions.sh --zookeeper 10.0.2.15:2181/kafka --reassignment-json-file ./move-to-new-broker.json --execute
Current partition replica assignment
{"version":1,"partitions":[{"topic":"test-1","partition":1,"replicas":[1,2]},{"topic":"test-1","partition":0,"replicas":[2,1]}]}
Save this to use as the --reassignment-json-file option during rollback
Successfully started reassignment of partitions.
//迁移后检查
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-reassign-partitions.sh --zookeeper 10.0.2.15:2181/kafka --reassignment-json-file ./move-to-new-broker.json --verify
Status of partition reassignment:
Reassignment of partition [test-1,1] completed successfully
Reassignment of partition [test-1,0] completed successfully
penn@ubuntu:~$ /mnt/app/kafka.1/bin/kafka-topics.sh --zookeeper 10.0.2.15:2181/kafka --describe --topic test-1
Topic:test-1 PartitionCount:2 ReplicationFactor:2 Configs:
Topic: test-1 Partition: 0 Leader: 4 Replicas: 4,3 Isr: 3,4
Topic: test-1 Partition: 1 Leader: 3 Replicas: 3,4 Isr: 3,4