一 工作目录
root@cephadm-deploy:~# cephadm shellInferring fsid 0888a64c-57e6-11ec-ad21-fbe9db6e2e74
Using recent ceph image quay.io/ceph/ceph@sha256:bb6a71f7f481985f6d3b358e3b9ef64c6755b3db5aa53198e0aac38be5c8ae54
root@cephadm-deploy:/#
二 NFS集群管理
官方文档:
https://docs.ceph.com/en/pacific/mgr/nfs/
从 Ceph Pacific 开始,nfs必须启用 mgr 模块。
2.1 查看nfs cluster
root@cephadm-deploy:~# ceph nfs cluster ls2.2 编写nfs.yml
root@cephadm-deploy:~# cat nfs.ymlservice_type: nfs
service_id: wgsnfs
placement:
hosts:
- ceph-node01
- cephadm-deploy
spec:
port: 2049
2.3 创建NFS GANESHA 集群
root@cephadm-deploy:~# ceph orch apply -i nfs.ymlScheduled nfs.wgsnfs update...
2.3 编写nfs-lvs.yml
root@cephadm-deploy:~# cat nfs-lvs.ymlservice_type: ingress
service_id: nfs.wgsnfs
placement:
count: 3
spec:
backend_service: nfs.wgsnfs
frontend_port: 20490
monitor_port: 9000
virtual_ip: 192.168.174.251/24
2.4 创建nfs 高可用
root@cephadm-deploy:~# ceph orch apply -i nfs-lvs.ymlScheduled ingress.nfs.wgsnfs update...
2.5 查看nfs cluster
root@cephadm-deploy:/# ceph nfs cluster lswgsnfs
2.6 查看ceph集群服务
root@cephadm-deploy:~# ceph orch ls |grep nfsingress.nfs.wgsnfs 192.168.174.251:20490,9000 6/6 3m ago 4m count:3
nfs.wgsnfs ?:2049 2/2 3m ago 5m ceph-node01;cephadm-deploy
2.5 查看生成的镜像信息
cephadm-deploy节点
root@cephadm-deploy:~# docker ps |grep nfsfa6fd4360245 arcts/keepalived "./init.sh" 4 minutes ago Up 4 minutes ceph-0888a64c-57e6-11ec-ad21-fbe9db6e2e74-keepalived-nfs-wgsnfs-cephadm-deploy-nsuwtu
11288427bb40 haproxy:2.3 "docker-entrypoint.s…" 5 minutes ago Up 5 minutes ceph-0888a64c-57e6-11ec-ad21-fbe9db6e2e74-haproxy-nfs-wgsnfs-cephadm-deploy-egujpl
9888fee1d909 quay.io/ceph/ceph "/usr/bin/ganesha.nf…" 5 minutes ago Up 5 minutes ceph-0888a64c-57e6-11ec-ad21-fbe9db6e2e74-nfs-wgsnfs-1-0-cephadm-deploy-hwgipi
ceph-node01节点
root@ceph-node01:~# docker ps |grep nfs6b30825c37d4 arcts/keepalived "./init.sh" 4 minutes ago Up 4 minutes ceph-0888a64c-57e6-11ec-ad21-fbe9db6e2e74-keepalived-nfs-wgsnfs-ceph-node01-rgxlxi
b45c35028873 haproxy:2.3 "docker-entrypoint.s…" 4 minutes ago Up 4 minutes ceph-0888a64c-57e6-11ec-ad21-fbe9db6e2e74-haproxy-nfs-wgsnfs-ceph-node01-lwojxg
8ffb124d06ec quay.io/ceph/ceph "/usr/bin/ganesha.nf…" 5 minutes ago Up 5 minutes ceph-0888a64c-57e6-11ec-ad21-fbe9db6e2e74-nfs-wgsnfs-1-0-ceph-node01-pgdqkp
2.6 查看端口
root@cephadm-deploy:~# ceph orch lsNAME PORTS RUNNING REFRESHED AGE PLACEMENT
alertmanager ?:9093,9094 1/1 3m ago 3h count:1
crash 5/5 5m ago 3h *
grafana ?:3000 1/1 3m ago 3h count:1
ingress.nfs.wgsnfs 192.168.174.251:20490,9000 6/6 5m ago 6m count:3
mds.wgs_cephfs 3/3 5m ago 19h count:3
mgr 1/2 5m ago 3h count:2
mon 5/5 5m ago 3h count:5
nfs.wgsnfs ?:2049 2/2 5m ago 7m ceph-node01;cephadm-deploy
node-exporter ?:9100 5/5 5m ago 3h *
osd 1 5m ago - <unmanaged>
osd.all-available-devices 14 5m ago 1h *
prometheus ?:9095 1/1 3m ago 3h count:1
rgw.wgs_rgw ?:8000 6/6 5m ago 6h count-per-host:2;label:rgw
2.7 查看nfs进程信息
root@cephadm-deploy:~# ceph orch ps |grep nfshaproxy.nfs.wgsnfs.ceph-node01.nkcvsz ceph-node01 *:20490,9000 running (7m) 6m ago 7m 8227k - 2.3.16-49b2134 8171b25df830 78dad96347d0
haproxy.nfs.wgsnfs.ceph-node02.nnexpa ceph-node02 *:20490,9000 running (6m) 6m ago 6m 8867k - 2.3.16-49b2134 8171b25df830 5537a630976e
haproxy.nfs.wgsnfs.cephadm-deploy.egujpl cephadm-deploy *:20490,9000 running (7m) 4m ago 7m 8223k - 2.3.16-49b2134 8171b25df830 11288427bb40
keepalived.nfs.wgsnfs.ceph-node01.zdfavu ceph-node01 running (6m) 6m ago 6m 5459k - 2.0.5 073e0c3cd1b9 d11b2b413e36
keepalived.nfs.wgsnfs.ceph-node02.ckothj ceph-node02 running (6m) 6m ago 6m 2411k - 2.0.5 073e0c3cd1b9 cad4f6a5d3f1
keepalived.nfs.wgsnfs.cephadm-deploy.nsuwtu cephadm-deploy running (6m) 4m ago 6m 5556k - 2.0.5 073e0c3cd1b9 fa6fd4360245
nfs.wgsnfs.0.0.ceph-node01.aixpmu ceph-node01 *:2049 running (8m) 6m ago 8m 39.8M - 3.5 cc266d6139f4 a4f561111c0f
nfs.wgsnfs.1.0.cephadm-deploy.hwgipi cephadm-deploy *:2049 running (8m) 4m ago 8m 36.1M - 3.5 cc266d6139f4 9888fee1d909
三 创建 CEPHFS EXPORT
仅对使用 nfs 接口部署的 NFS Ganesha 集群支持导出创建。
3.1 创建命令格式
$ ceph nfs export create cephfs --cluster-id <cluster_id> --pseudo-path <pseudo_path> --fsname <fsname> [--readonly] [--path=/path/in/cephfs] [--client_addr <value>...] [--squash <value>]<cluster_id> 是 NFS Ganesha 集群 ID。
<pseudo_path>是 NFS v4 伪文件系统中的导出位置,导出将在服务器上可用。它必须是绝对路径并且是唯一的。
<fsname> 是 NFS Ganesha 集群使用的 FS 卷的名称,该集群将为该导出提供服务。
<path>是 cephfs 中的路径。应给出有效路径,默认路径为“/”。它不必是唯一的。可以使用以下方法获取子卷路径:
$ ceph fs subvolume getpath <vol_name> <subvol_name> [--group_name <subvol_group_name>]<client_addr>是这些导出权限将适用的客户端地址列表。默认情况下,所有客户端都可以根据指定的导出权限访问导出。
<squash>定义要执行的用户 ID 压缩类型。默认值为no_root_squash。
3.3 查看cephfs 文件系统
root@cephadm-deploy:/# ceph fs lsname: wgs_cephfs, metadata pool: cephfs.wgs_cephfs.meta, data pools: [cephfs.wgs_cephfs.data ]
3.4 查看NFS Ganesha ID
root@cephadm-deploy:/# ceph nfs cluster lswgsnfs
3.5 创建cephfs exporter
root@cephadm-deploy:/# ceph nfs export create cephfs --cluster-id wgsnfs --pseudo-path /cephfs --fsname wgs_cephfs --path=/{
"bind": "/cephfs",
"fs": "wgs_cephfs",
"path": "/",
"cluster": "wgsnfs",
"mode": "RW"
}
3.6 查看cephfs exporter信息
root@cephadm-deploy:/# ceph nfs export info wgsnfs /cephfs{
"export_id": 1,
"path": "/",
"cluster_id": "wgsnfs",
"pseudo": "/cephfs",
"access_type": "RW",
"squash": "none",
"security_label": true,
"protocols": [
4
],
"transports": [
"TCP"
],
"fsal": {
"name": "CEPH",
"user_id": "nfs.wgsnfs.1",
"fs_name": "wgs_cephfs"
},
"clients": []
}
四 RGW BUCKET EXPORT
仅对使用 nfs 接口部署的 NFS Ganesha 集群支持导出创建。
如果启用了多站点 RGW,Ceph 只能导出默认领域中的 RGW 存储区。
4.1 创建命令格式
$ ceph nfs export create rgw --cluster-id <cluster_id> --pseudo-path <pseudo_path> --bucket <bucket_name> [--user-id <user-id>] [--readonly] [--client_addr <value>...] [--squash <value>]<cluster_id> 是 NFS Ganesha 集群 ID。
<pseudo_path>是 NFS v4 伪文件系统中的导出位置,导出将在服务器上可用。它必须是绝对路径并且是唯一的。
<bucket_name> 是将要导出的存储桶的名称。
<user_id>是可选的,并指定将使用哪个 RGW 用户对存储桶进行读写操作。如果未指定,将使用拥有该桶的用户。
<client_addr>是这些导出权限将适用的客户端地址列表。默认情况下,所有客户端都可以根据指定的导出权限访问导出。
<squash>定义要执行的用户 ID 压缩类型。默认值为no_root_squash。
4.2 查看bucket
root@cephadm-deploy:/# radosgw-admin bucket list[
"wgsbucket"
]
4.3 查看NFS Ganesha ID
root@cephadm-deploy:/# ceph nfs cluster lswgsnfs
4.4 创建rgw bucket exporter
root@cephadm-deploy:/# ceph nfs export create rgw --cluster-id wgsnfs --pseudo-path /bucketdata --bucket wgsbucket{
"bind": "/bucketdata",
"path": "wgsbucket",
"cluster": "wgsnfs",
"mode": "RW",
"squash": "none"
}
4.5 查看rgw bucket exporter
root@cephadm-deploy:/# ceph nfs export info wgsnfs /bucketdata{
"export_id": 2,
"path": "wgsbucket",
"cluster_id": "wgsnfs",
"pseudo": "/bucketdata",
"access_type": "RW",
"squash": "none",
"security_label": true,
"protocols": [
4
],
"transports": [
"TCP"
],
"fsal": {
"name": "RGW",
"user_id": "wgs01",
"access_key_id": "BKRXWW1XBXZI04MDEM09",
"secret_access_key": "pbkRtYmG5fABMe4wFuV7VPENIKcXbj0bUiYtxnCB"
},
"clients": []
}
五 rgw user exporter
5.1 创建命令格式
$ ceph nfs export create rgw --cluster-id <cluster_id> --pseudo-path <pseudo_path> --user-id <user-id> [--readonly] [--client_addr <value>...] [--squash <value>]5.2 查看NFS Ganesha ID
root@cephadm-deploy:/# ceph nfs cluster lswgsnfs
5.3 查看rgw user
root@cephadm-deploy:/# radosgw-admin user list[
"dashboard",
"wgs01"
]
5.4 创建rgw user exporter
root@cephadm-deploy:/# ceph nfs export create rgw --cluster-id wgsnfs --pseudo-path /userbucket --user-id wgs01{
"bind": "/userbucket",
"path": "/",
"cluster": "wgsnfs",
"mode": "RW",
"squash": "none"
}
5.5 查看rgw user exporter信息
root@cephadm-deploy:/# ceph nfs export info wgsnfs /userbucket{
"export_id": 3,
"path": "/",
"cluster_id": "wgsnfs",
"pseudo": "/userbucket",
"access_type": "RW",
"squash": "none",
"security_label": true,
"protocols": [
4
],
"transports": [
"TCP"
],
"fsal": {
"name": "RGW",
"user_id": "wgs01",
"access_key_id": "BKRXWW1XBXZI04MDEM09",
"secret_access_key": "pbkRtYmG5fABMe4wFuV7VPENIKcXbj0bUiYtxnCB"
},
"clients": []
}
六 列出nfs exporter
查看代码
[
{
"export_id": 1,
"path": "/",
"cluster_id": "wgsnfs",
"pseudo": "/cephfs",
"access_type": "RW",
"squash": "none",
"security_label": true,
"protocols": [
4
],
"transports": [
"TCP"
],
"fsal": {
"name": "CEPH",
"user_id": "nfs.wgsnfs.1",
"fs_name": "wgs_cephfs"
},
"clients": []
},
{
"export_id": 2,
"path": "wgsbucket",
"cluster_id": "wgsnfs",
"pseudo": "/bucketdata",
"access_type": "RW",
"squash": "none",
"security_label": true,
"protocols": [
4
],
"transports": [
"TCP"
],
"fsal": {
"name": "RGW",
"user_id": "wgs01",
"access_key_id": "BKRXWW1XBXZI04MDEM09",
"secret_access_key": "pbkRtYmG5fABMe4wFuV7VPENIKcXbj0bUiYtxnCB"
},
"clients": []
},
{
"export_id": 3,
"path": "/",
"cluster_id": "wgsnfs",
"pseudo": "/userbucket",
"access_type": "RW",
"squash": "none",
"security_label": true,
"protocols": [
4
],
"transports": [
"TCP"
],
"fsal": {
"name": "RGW",
"user_id": "wgs01",
"access_key_id": "BKRXWW1XBXZI04MDEM09",
"secret_access_key": "pbkRtYmG5fABMe4wFuV7VPENIKcXbj0bUiYtxnCB"
},
"clients": []
}
]
]
七 客户端挂载cephfs nfs
7.1 客户端安装nfs-common
root@ceph-client01:~# apt -y install nfs-common7.2 创建挂载点
root@ceph-client01:~# mkdir -pv /data/{cephfs-data01,cephfs-data02}7.3 ceph方式挂载cephfs
7.3.1 挂载cephfs
root@ceph-client01:~# mount -t ceph 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ /data/cephfs-data02 -o name=wgs,secretfile=/etc/ceph/wgs.key7.3.2 验证挂载
root@ceph-client01:~# df -THFilesystem Type Size Used Avail Use% Mounted on
udev devtmpfs 982M 0 982M 0% /dev
tmpfs tmpfs 206M 2.0M 204M 1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv ext4 20G 9.8G 9.2G 52% /
tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm
tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock
tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup
192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ ceph 102G 0 102G 0% /data/cephfs-data02
7.3.3 查看挂载点内容
root@ceph-client01:~# ls -l /data/cephfs-data02/total 1
-rw-r--r-- 1 root root 20 Dec 8 22:17 data01.txt
-rw-r--r-- 1 root root 20 Dec 8 22:18 data02.txt
7.4 nfs挂载cephfs
7.4.1 挂载cephfs
root@ceph-client01:~# mount -t nfs -o port=20490,nfsvers=4.1,proto=tcp 192.168.174.251:/cephfs /data/cephfs-data017.4.2 验证挂载
root@ceph-client01:~# df -THFilesystem Type Size Used Avail Use% Mounted on
udev devtmpfs 982M 0 982M 0% /dev
tmpfs tmpfs 206M 2.0M 204M 1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv ext4 20G 9.8G 9.2G 52% /
tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm
tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock
tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup
/dev/loop5 squashfs 46M 46M 0 100% /snap/snapd/14295
192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ ceph 102G 0 102G 0% /data/cephfs-data02
192.168.174.251:/cephfs nfs4 102G 0 102G 0% /data/cephfs-data01
7.4.3 验证挂载点数据
root@ceph-client01:~# ls -l /data/cephfs-data01/total 1
-rw-r--r-- 1 root root 20 Dec 8 22:17 data01.txt
-rw-r--r-- 1 root root 20 Dec 8 22:18 data02.txt
7.5 下载挂载点
root@ceph-client01:~# umount /data/cephfs-data01root@ceph-client01:~# umount /data/cephfs-data02
八 客户端挂载rgw nfs
8.1 客户端安装nfs-common
root@ceph-client01:~# apt -y install nfs-common8.2 创建挂载点
root@ceph-client01:~# mkdir /data/{rgw_data,rgw_userdata} -pvmkdir: created directory '/data/rgw_data'
mkdir: created directory '/data/rgw_userdata'
8.3 rgw bucket
8.3.1 挂载rgw bucket
root@ceph-client01:~# mount -t nfs -o port=20490,nfsvers=4.1,proto=tcp 192.168.174.251:/bucketdata /data/rgw_data/8.3.2 验证挂载
root@ceph-client01:~# df -THFilesystem Type Size Used Avail Use% Mounted on
udev devtmpfs 982M 0 982M 0% /dev
tmpfs tmpfs 206M 2.0M 204M 1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv ext4 20G 9.8G 9.2G 52% /
tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm
tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock
tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup
192.168.174.251:/bucketdata nfs4 323G 680M 322G 1% /data/rgw_data
8.3.3 查看当前挂载点内容
root@ceph-client01:~# ls /data/rgw_data/ -ltotal 0
8.3.4 客户s3cmd上传文件
root@ceph-client01:~# s3cmd put /var/log/syslog s3://wgsbucket/sys_logsupload: '/var/log/syslog' -> 's3://wgsbucket/sys_logs' [1 of 1]
5968909 of 5968909 100% in 2s 2.07 MB/s done
8.3.5 查看ceph集群IO
root@cephadm-deploy:/# ceph -scluster:
id: 0888a64c-57e6-11ec-ad21-fbe9db6e2e74
health: HEALTH_WARN
mon ceph-node01 is low on available space
services:
mon: 5 daemons, quorum cephadm-deploy,ceph-node01,ceph-node02,ceph-node03,ceph-node04 (age 21m)
mgr: ceph-node01.anwvfy(active, since 22m), standbys: cephadm-deploy.jgiulj
mds: 1/1 daemons up, 2 standby
osd: 15 osds: 15 up (since 21m), 15 in (since 21h)
rgw: 6 daemons active (3 hosts, 1 zones)
rgw-nfs: 2 daemons active (2 hosts, 1 zones)data:
volumes: 1/1 healthy
pools: 10 pools, 241 pgs
objects: 275 objects, 5.7 MiB
usage: 664 MiB used, 299 GiB / 300 GiB avail
pgs: 241 active+clean
io:
client: 170 B/s rd, 0 op/s rd, 0 op/s wr
8.3.6 验证挂载点数据
由于磁盘读写性能数据有延迟。
root@ceph-client01:~# ls -l /data/rgw_data/total 5829
-rw-rw-rw- 1 root root 5968909 Dec 9 18:14 sys_logs
8.4 rgw user
8.4.1 挂载rgw user
root@ceph-client01:~# mount -t nfs -o port=20490,nfsvers=4.1,proto=tcp 192.168.174.251:/userbucket /data/rgw_userdata/8.4.2 验证挂载
root@ceph-client01:~# df -THFilesystem Type Size Used Avail Use% Mounted on
udev devtmpfs 982M 0 982M 0% /dev
tmpfs tmpfs 206M 2.0M 204M 1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv ext4 20G 9.8G 9.2G 52% /
tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm
tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock
tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup
192.168.174.251:/bucketdata nfs4 323G 698M 322G 1% /data/rgw_data
192.168.174.251:/userbucket nfs4 323G 698M 322G 1% /data/rgw_userdata
8.4.3 查看当前挂载点内容
root@ceph-client01:~# tree /data/rgw_userdata//data/rgw_userdata/
└── wgsbucket
└── sys_logs
1 directory, 1 file
8.5 卸载挂载点
root@ceph-client01:~# umount /data/rgw_userdataroot@ceph-client01:~# umount /data/rgw_data
九 查看ceph集群状态
root@cephadm-deploy:/# ceph -scluster:
id: 0888a64c-57e6-11ec-ad21-fbe9db6e2e74
health: HEALTH_WARN
mon ceph-node01 is low on available space
services:
mon: 5 daemons, quorum cephadm-deploy,ceph-node01,ceph-node02,ceph-node03,ceph-node04 (age 35m)
mgr: ceph-node01.anwvfy(active, since 37m), standbys: cephadm-deploy.jgiulj
mds: 1/1 daemons up, 2 standby
osd: 15 osds: 15 up (since 35m), 15 in (since 21h)
rgw: 6 daemons active (3 hosts, 1 zones)
rgw-nfs: 2 daemons active (2 hosts, 1 zones)
data:
volumes: 1/1 healthy
pools: 10 pools, 241 pgs
objects: 275 objects, 5.7 MiB
usage: 650 MiB used, 299 GiB / 300 GiB avail
pgs: 241 active+clean
十 查看ceph dashboard