GlusterFS 搭建+优化

 
1. 在每一个节点上进行安装并启动服务
# 先安装 gluster 源
$ yum install centos-release-gluster -y

# 安装 glusterfs 组件
$ yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel

## 创建 glusterfs 目录
$ mkdir /opt/glusterd

## 修改 glusterd 目录
$ sed -i 's/var\/lib/opt/g' /etc/glusterfs/glusterd.vol

# 启动 glusterfs
$ systemctl start glusterd.service

# 设置开机启动
$ systemctl enable glusterd.service

#查看状态
$ systemctl status glusterd.service
  1. 为各个节点设置好域名或直接绑定/etc/hosts
  2. 在各个节点上开放24007端口并创建存储目录:
# 开放端口
iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 24007:24011 -j ACCEPT
iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 111 -j ACCEPT
iptables -A INPUT -m state --state NEW -m udp -p udp --dport 111 -j ACCEPT
iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 38465:38485 -j ACCEPT
service iptables save

    有必要的时候需要加上-o eth1 指定内网开放端口

# 创建存储目录
$ mkdir /opt/gfs_data

4. 将相应的节点加入集群:

# 添加节点到 集群
# 执行操作的本机不需要probe 本机
[root@master-001 ~]#
gluster peer probe test-002.jimmysong.io
gluster peer probe test-003.jimmysong.io

# 查看集群状态
$ gluster peer status
Number of Peers: 2

Hostname: test-002.jimmysong.io
Uuid: f25546cc-2011-457d-ba24-342554b51317
State: Peer in Cluster (Connected)

Hostname: test-003.jimmysong.io
Uuid: 42b6cad1-aa01-46d0-bbba-f7ec6821d66d
State: Peer in Cluster (Connected)

5. 创建并使用:

# 创建分布卷
$ gluster volume create k8s-volume transport tcp test-001.master.io:/opt/gfs_data test-002.jimmysong.io:/opt/gfs_data test-003.master.io:/opt/gfs_data force

# 查看volume状态
$ gluster volume info
Volume Name: k8s-volume
Type: Distribute
Volume ID: 9a3b0710-4565-4eb7-abae-1d5c8ed625ac
Status: Created
Snapshot Count: 0
Number of Bricks: 3
Transport-type: tcp
Bricks:
Brick1: storage-001.master.io:/opt/gfs_data
Brick2: storage-002.master.io:/opt/gfs_data
Brick3: storage-003.master.io:/opt/gfs_data
Options Reconfigured:
transport.address-family: inet
nfs.disable: on

# 启动 分布卷
$ gluster volume start k8s-volume
 6. 一些可供参考的调优参数设置:

# 开启 指定 volume 的配额 $ gluster volume quota k8s-volume enable
# 限制 指定 volume 的配额 $ gluster volume quota k8s-volume limit-usage / 1TB
# 设置 cache 大小, 默认32MB $ gluster volume set k8s-volume performance.cache-size 4GB
# 设置 io 线程, 太大会导致进程崩溃 $ gluster volume set k8s-volume performance.io-thread-count 16
# 设置 网络检测时间, 默认42s $ gluster volume set k8s-volume network.ping-timeout 10
# 设置 写缓冲区的大小, 默认1M $ gluster volume set k8s-volume performance.write-behind-window-size 1024MB

实例:
[root@master GlusterFSsupport]# gluster volume create dns-vol replica 4 arbiter 1 transport tcp master.k8s.plus7s.com:/opt/gfs_data/dns node1.k8s.plus7s.com:/opt/gfs_data/dns node2.k8s.plus7s.com:/opt/gfs_data/dns node3.k8s.plus7s.com:/opt/gfs_data/dns force

其中,为了保证在本地文件系统中创建的glusterfs能够正常运行,故必须要加入force参数

查看状态: [root@master GlusterFSsupport]# gluster volume info dns-vol

Volume Name: dns-vol
Type: Replicate
Volume ID: bfe389f2-560c-4a51-9f42-99389e946d33
Status: Created
Snapshot Count: 0
Number of Bricks: 1 x (3 + 1) = 4
Transport-type: tcp
Bricks:
Brick1: master.k8s.plus7s.com:/opt/gfs_data/dns
Brick2: node1.k8s.plus7s.com:/opt/gfs_data/dns
Brick3: node2.k8s.plus7s.com:/opt/gfs_data/dns
Brick4: node3.k8s.plus7s.com:/opt/gfs_data/dns (arbiter)
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off

启动该卷:[root@master GlusterFSsupport]# gluster volume start dns-vol volume start: dns-vol: success
  • 7. 安装客户端
# 在所有 k8s node 中安装 glusterfs 客户端
$ yum install -y glusterfs glusterfs-fuse
  • 8. 客户端挂载
yum install -y glusterfs gluster-fuse
glusterfs 方式挂载
[root@c6-vm3 yum.repos.d]# mount -t glusterfs 10.99.0.1:/dns-vol /mnt/dns/
[root@c6-vm3 mnt]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda2        29G  923M   27G   4% /
tmpfs           358M     0  358M   0% /dev/shm
/dev/sda1       190M   25M  156M  14% /boot
c6-vm1:/gv2     9.8G   23M  9.2G   1% /mnt
nfs 方式挂载
[root@c6-vm2 gfs1]# mount -t nfs -o mountproto=tcp,vers=3 c6-vm2:gv2  /mnt/
[root@c6-vm2 gfs1]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda2        29G  875M   27G   4% /
tmpfs           358M     0  358M   0% /dev/shm
/dev/sda1       190M   25M  156M  14% /boot
/dev/sdb1       9.8G   23M  9.2G   1% /data/gfs1
c6-vm2:gv2      9.8G   33M   14G   1% /mnt
# 创建文件
[root@c6-vm3 mnt]# cd /mnt/
[root@c6-vm3 mnt]# touch file{1..10}
This entry was posted in Glusterfs, Servuce and tagged , . Bookmark the permalink.

Leave a Reply

Your email address will not be published. Required fields are marked *