cephadm文件存储
  1m9rJBpbaLoS 2023年11月02日 45 0


一 工作目录

root@cephadm-deploy:~# cephadm shell
Inferring fsid 0888a64c-57e6-11ec-ad21-fbe9db6e2e74
Using recent ceph image quay.io/ceph/ceph@sha256:bb6a71f7f481985f6d3b358e3b9ef64c6755b3db5aa53198e0aac38be5c8ae54
root@cephadm-deploy:/#

二 部署cephfs

官方文档:

​https://docs.ceph.com/en/pacific/cephadm/services/mds/​

​https://docs.ceph.com/en/pacific/cephfs/#getting-started-with-cephfs​

2.1 查看当前文件系统

root@cephadm-deploy:/# ceph fs ls
No filesystems enabled

2.2 查看当前存储池

root@cephadm-deploy:/# ceph osd pool ls
device_health_metrics

2.3 创建cephfs文件系统

root@cephadm-deploy:/# ceph fs volume create wgs_cephfs --placement=3                 

2.4 查看当前存储池

root@cephadm-deploy:/# ceph osd pool ls
device_health_metrics
cephfs.wgs_cephfs.meta
cephfs.wgs_cephfs.data

2.5 验证当前cephfs文件系统

root@cephadm-deploy:/# ceph fs ls
name: wgs_cephfs, metadata pool: cephfs.wgs_cephfs.meta, data pools: [cephfs.wgs_cephfs.data ]

2.6 查看mds服务

root@cephadm-deploy:/# ceph mds stat
wgs_cephfs:1 {0=wgs_cephfs.cephadm-deploy.ztpmlk=up:active} 2 up:standby

2.7 查看mds进程数

root@cephadm-deploy:/# ceph orch ls 
NAME PORTS RUNNING REFRESHED AGE PLACEMENT
alertmanager ?:9093,9094 1/1 3m ago 8h count:1
crash 5/5 9m ago 8h *
grafana ?:3000 1/1 3m ago 8h count:1
mds.wgs_cephfs 3/3 3m ago 3m count:3
mgr 2/2 3m ago 8h count:2
mon 5/5 9m ago 8h count:5
node-exporter ?:9100 5/5 9m ago 8h *
osd 1 3m ago - <unmanaged>
osd.all-available-devices 14 9m ago 6h *
prometheus ?:9095 1/1 3m ago 8h count:1

2.8 查看cephfs文件系统信息

root@cephadm-deploy:/# ceph fs status wgs_cephfs
wgs_cephfs - 0 clients
==========
RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS
0 active wgs_cephfs.cephadm-deploy.ztpmlk Reqs: 0 /s 10 13 12 0
POOL TYPE USED AVAIL
cephfs.wgs_cephfs.meta metadata 96.0k 94.9G
cephfs.wgs_cephfs.data data 0 94.9G
STANDBY MDS
wgs_cephfs.ceph-node02.zpdphv
wgs_cephfs.ceph-node01.ellktv
MDS version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)

2.9 查看ceph集群状态

root@cephadm-deploy:/# ceph -s
cluster:
id: 0888a64c-57e6-11ec-ad21-fbe9db6e2e74
health: HEALTH_OK
services:

mon: 5 daemons, quorum cephadm-deploy,ceph-node01,ceph-node02,ceph-node03,ceph-node04 (age 35m)

mgr: cephadm-deploy.jgiulj(active, since 64m), standbys: ceph-node01.anwvfy

mds: 1/1 daemons up, 2 standby

osd: 15 osds: 15 up (since 35m), 15 in (since 35m)
data:

volumes: 1/1 healthy

pools: 3 pools, 65 pgs

objects: 22 objects, 2.3 KiB

usage: 107 MiB used, 300 GiB / 300 GiB avail

pgs: 65 active+clean

三 挂载cephfs文件系统

3.1 创建客户端用户

root@cephadm-deploy:/# ceph auth add client.wgs mon 'allow rw' mds 'allow rw' osd 'allow rwx pool=cephfs.wgs_cephfs.data'
added key for client.wgs

3.2 查看客户端用户信息

root@cephadm-deploy:/# ceph auth get client.wgs
[client.wgs]
key = AQAdtrBhxOX9BhAAbJtiqOdNrOAE/BmZc1mlyw==
caps mds = "allow rw"
caps mon = "allow rw"
caps osd = "allow rwx pool=cephfs.wgs_cephfs.data"
exported keyring for client.wgs

3.3 创建客户端用户keyring文件

root@cephadm-deploy:/# ceph auth get client.wgs -o ceph.client.wgs.keyring
exported keyring for client.wgs

3.4 验证客户端用户keyring文件

root@cephadm-deploy:/# cat ceph.client.wgs.keyring 
[client.wgs]
key = AQAdtrBhxOX9BhAAbJtiqOdNrOAE/BmZc1mlyw==
caps mds = "allow rw"
caps mon = "allow rw"
caps osd = "allow rwx pool=cephfs.wgs_cephfs.data"

3.5 创建客户端用户key文件

root@cephadm-deploy:/# ceph auth print-key client.wgs > wgs.key

四 配置客户端

4.1 添加ceph源文件

root@ceph-client01:~# wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
OK
root@ceph-client01:~# echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific $(lsb_release -cs) main" >> /etc/apt/sources.list
root@ceph-client01:~# apt -y update && apt -y upgrade

4.2 安装ceph-common

root@ceph-client01:~# apt -y install ceph-common

4.3 向同步客户端认证文件

root@cephadm-deploy:/# scp ceph.client.wgs.keyring wgs.key /etc/ceph/ceph.conf 192.168.174.121:/etc/ceph

4.4 客户端验证权限

root@ceph-client01:~# ceph --id wgs -s
cluster:
id: 0888a64c-57e6-11ec-ad21-fbe9db6e2e74
health: HEALTH_OK
services:

mon: 5 daemons, quorum cephadm-deploy,ceph-node01,ceph-node02,ceph-node03,ceph-node04 (age 58m)

mgr: cephadm-deploy.jgiulj(active, since 86m), standbys: ceph-node01.anwvfy

mds: 1/1 daemons up, 2 standby

osd: 15 osds: 15 up (since 58m), 15 in (since 58m)
data:

volumes: 1/1 healthy

pools: 3 pools, 65 pgs

objects: 22 objects, 2.3 KiB

usage: 107 MiB used, 300 GiB / 300 GiB avail

pgs: 65 active+clean

五 挂载cephfs文件系统

5.1 验证ceph客户端

root@ceph-client01:~# stat /sbin/mount.ceph
File: /sbin/mount.ceph
Size: 190888 Blocks: 376 IO Block: 4096 regular file
Device: fd00h/64768d Inode: 273881 Links: 1
Access: (0755/-rwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
Access: 2021-12-08 21:55:01.000000000 +0800
Modify: 2021-09-06 16:41:31.000000000 +0800
Change: 2021-12-08 21:55:05.278599892 +0800
Birth: -

5.2 挂载cephfs文件系统

5.2.1 创建挂载点

root@ceph-client01:~# mkdir /data/cephfs-data -pv

5.2.2 挂载cephfs

root@ceph-client01:~# mount -t ceph 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ /data/cephfs-data -o name=wgs,secretfile=/etc/ceph/wgs.key

5.3 验证挂载点

root@ceph-client01:~# df -TH
Filesystem Type Size Used Avail Use% Mounted on
udev devtmpfs 982M 0 982M 0% /dev
tmpfs tmpfs 206M 2.0M 204M 1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv ext4 20G 9.5G 9.5G 51% /
tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm
tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock
tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup
192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ ceph 102G 0 102G 0% /data/cephfs-data

5.4 查看挂载点信息

root@ceph-client01:~# stat -f /data/cephfs-data/
File: "/data/cephfs-data/"
ID: a55da9a5983f888c Namelen: 255 Type: ceph
Block size: 4194304 Fundamental block size: 4194304
Blocks: Total: 24306 Free: 24306 Available: 24306
Inodes: Total: 0 Free: -1

5.5 ceph端验证挂载点

root@cephadm-deploy:~# ceph fs status wgs_cephfs
wgs_cephfs - 1 clients #一个客户端
==========
RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS
0 active wgs_cephfs.cephadm-deploy.ztpmlk Reqs: 0 /s 10 13 12 1
POOL TYPE USED AVAIL
cephfs.wgs_cephfs.meta metadata 96.0k 94.9G
cephfs.wgs_cephfs.data data 0 94.9G
STANDBY MDS
wgs_cephfs.ceph-node02.zpdphv
wgs_cephfs.ceph-node01.ellktv
MDS version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)

5.6 设置开机自动挂载

root@ceph-client01:~# cat /etc/fstab
mount -t ceph 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ /data/cephfs-data -o name=wgs,secretfile=/etc/ceph/wgs.key

六 数据验证

6.1 创建挂载点

root@ceph-client01:~# mkdir -pv /data/cephfs-data01
mkdir: created directory '/data/cephfs-data01'
root@ceph-client01:~# mkdir -pv /data/cephfs-data02
mkdir: created directory '/data/cephfs-data02'

6.2 挂载cephfs

root@ceph-client01:~# mount -t ceph 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ /data/cephfs-data01 -o name=wgs,secretfile=/etc/ceph/wgs.key
root@ceph-client01:~# mount -t ceph 192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ /data/cephfs-data02 -o name=wgs,secretfile=/etc/ceph/wgs.key

6.3 验证挂载

root@ceph-client01:~# df -TH
Filesystem Type Size Used Avail Use% Mounted on
udev devtmpfs 982M 0 982M 0% /dev
tmpfs tmpfs 206M 2.0M 204M 1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv ext4 20G 9.5G 9.5G 51% /
tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm
tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock
tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup
192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ ceph 102G 0 102G 0% /data/cephfs-data01
192.168.174.120:6789,192.168.174.103:6789,192.168.174.104:6789:/ ceph 102G 0 102G 0% /data/cephfs-data02

6.4 cephfs-data01写入数据

root@ceph-client01:~# echo "/data/cephfs-data01" >> /data/cephfs-data01/data01.txt

6.5 cephfs-data02写入数据

root@ceph-client01:~# echo "/data/cephfs-data02" >> /data/cephfs-data01/data02.txt

6.6 验证数据

root@ceph-client01:~# ls -l /data/cephfs-data01/
total 1
-rw-r--r-- 1 root root 20 Dec 8 22:17 data01.txt
-rw-r--r-- 1 root root 20 Dec 8 22:18 data02.txt
root@ceph-client01:~# ls -l /data/cephfs-data02/
total 1
-rw-r--r-- 1 root root 20 Dec 8 22:17 data01.txt
-rw-r--r-- 1 root root 20 Dec 8 22:18 data02.txt

七 删除cephFS文件系统

ceph fs fail wgs_cephfs
ceph tell mon.* injectargs --mon-allow-pool-delete=true
ceph fs rm wgs_cephfs --yes-i-really-mean-it
ceph osd pool rm cephfs.wgs_cephfs.meta cephfs.wgs_cephfs.meta --yes-i-really-really-mean-it
ceph osd pool rm cephfs.wgs_cephfs.data cephfs.wgs_cephfs.data --yes-i-really-really-mean-it

 



【版权声明】本文内容来自摩杜云社区用户原创、第三方投稿、转载,内容版权归原作者所有。本网站的目的在于传递更多信息,不拥有版权,亦不承担相应法律责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@moduyun.com

  1. 分享:
最后一次编辑于 2023年11月08日 0

暂无评论

推荐阅读
  JiJ96DoSHEh4   2023年11月13日   28   0   0 分隔符字段sed
  JiJ96DoSHEh4   2023年11月13日   173   0   0 上传文件列表sed
  JiJ96DoSHEh4   2023年11月19日   26   0   0 bashbcsed
1m9rJBpbaLoS
最新推荐 更多