在node01节点为每个节点配置 OSD(对象存储设备)。
1)为各个节点配置firewalld
[root@node01 ~]# for NODE in node01 node02 node03
do
ssh $NODE "firewall-cmd --add-service=ceph; firewall-cmd --runtime-to-permanent"
done
success
success
success
success
Warning: ALREADY_ENABLED: 'ceph' already in 'public'
success
success
[root@node01 ~]#
2)为各个节点配置块存储
[root@node01 ~]# for NODE in node01 node02 node03
do
if [ ! ${NODE} = "node01" ]
then
scp /etc/ceph/ceph.conf ${NODE}:/etc/ceph/ceph.conf
scp /etc/ceph/ceph.client.admin.keyring ${NODE}:/etc/ceph
scp /var/lib/ceph/bootstrap-osd/ceph.keyring ${NODE}:/var/lib/ceph/bootstrap-osd
fi
ssh $NODE
"chown ceph:ceph /etc/ceph/ceph.* /var/lib/ceph/bootstrap-osd/*;
parted --script /dev/sdb 'mklabel gpt';
parted --script /dev/sdb "mkpart primary 0% 100%";
ceph-volume lvm create --data /dev/sdb1"
done
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 6566b36f-5820-4413-9f18-17e962b83bec
Running command: vgcreate --force --yes ceph-8e5c0f8a-cfdc-4826-b55f-23c9900dd588 /dev/sdb1
stdout: Physical volume "/dev/sdb1" successfully created.
stdout: Volume group "ceph-8e5c0f8a-cfdc-4826-b55f-23c9900dd588" successfully created
Running command: lvcreate --yes -l 5119 -n osd-block-6566b36f-5820-4413-9f18-17e962b83bec ceph-8e5c0f8a-cfdc-4826-b55f-23c9900dd588
stdout: Logical volume "osd-block-6566b36f-5820-4413-9f18-17e962b83bec" created.
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-8e5c0f8a-cfdc-4826-b55f-23c9900dd588/osd-block-6566b36f-5820-4413-9f18-17e962b83bec
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/ln -s /dev/ceph-8e5c0f8a-cfdc-4826-b55f-23c9900dd588/osd-block-6566b36f-5820-4413-9f18-17e962b83bec /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
stderr: got monmap epoch 2
--> Creating keyring file for osd.0
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 6566b36f-5820-4413-9f18-17e962b83bec --setuser ceph --setgroup ceph
stderr: 2023-09-12T13:57:13.858+0800 7ff44e7a9500 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
stderr: 2023-09-12T13:57:13.858+0800 7ff44e7a9500 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
stderr: 2023-09-12T13:57:13.858+0800 7ff44e7a9500 -1 bluestore(/var/lib/ceph/osd/ceph-0//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
stderr: 2023-09-12T13:57:13.858+0800 7ff44e7a9500 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid
--> ceph-volume lvm prepare successful for: /dev/sdb1
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8e5c0f8a-cfdc-4826-b55f-23c9900dd588/osd-block-6566b36f-5820-4413-9f18-17e962b83bec --path /var/lib/ceph/osd/ceph-0 --no-mon-config
Running command: /usr/bin/ln -snf /dev/ceph-8e5c0f8a-cfdc-4826-b55f-23c9900dd588/osd-block-6566b36f-5820-4413-9f18-17e962b83bec /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/systemctl enable ceph-volume@lvm-0-6566b36f-5820-4413-9f18-17e962b83bec
stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-6566b36f-5820-4413-9f18-17e962b83bec.service → /usr/lib/systemd/system/ceph-volume@.service.
Running command: /usr/bin/systemctl enable --runtime ceph-osd@0
stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service → /usr/lib/systemd/system/ceph-osd@.service.
Running command: /usr/bin/systemctl start ceph-osd@0
--> ceph-volume lvm activate successful for osd ID: 0
--> ceph-volume lvm create successful for: /dev/sdb1
ceph.conf 100% 582 189.7KB/s 00:00
ceph.client.admin.keyring 100% 151 136.1KB/s 00:00
ceph.keyring 100% 129 128.8KB/s 00:00
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 2bf9cd55-2c41-419b-9c32-f47cdb123bc9
Running command: vgcreate --force --yes ceph-0a39800f-3f16-4315-bcd9-13225c8b5a59 /dev/sdb1
stdout: Physical volume "/dev/sdb1" successfully created.
stdout: Volume group "ceph-0a39800f-3f16-4315-bcd9-13225c8b5a59" successfully created
Running command: lvcreate --yes -l 5119 -n osd-block-2bf9cd55-2c41-419b-9c32-f47cdb123bc9 ceph-0a39800f-3f16-4315-bcd9-13225c8b5a59
stdout: Logical volume "osd-block-2bf9cd55-2c41-419b-9c32-f47cdb123bc9" created.
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-1
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-0a39800f-3f16-4315-bcd9-13225c8b5a59/osd-block-2bf9cd55-2c41-419b-9c32-f47cdb123bc9
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/ln -s /dev/ceph-0a39800f-3f16-4315-bcd9-13225c8b5a59/osd-block-2bf9cd55-2c41-419b-9c32-f47cdb123bc9 /var/lib/ceph/osd/ceph-1/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-1/activate.monmap
stderr: got monmap epoch 2
--> Creating keyring file for osd.1
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-1/ --osd-uuid 2bf9cd55-2c41-419b-9c32-f47cdb123bc9 --setuser ceph --setgroup ceph
stderr: 2023-09-12T13:57:24.813+0800 7fd1fd412500 -1 bluestore(/var/lib/ceph/osd/ceph-1//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
stderr: 2023-09-12T13:57:24.814+0800 7fd1fd412500 -1 bluestore(/var/lib/ceph/osd/ceph-1//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
stderr: 2023-09-12T13:57:24.814+0800 7fd1fd412500 -1 bluestore(/var/lib/ceph/osd/ceph-1//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
stderr: 2023-09-12T13:57:24.815+0800 7fd1fd412500 -1 bluestore(/var/lib/ceph/osd/ceph-1/) _read_fsid unparsable uuid
--> ceph-volume lvm prepare successful for: /dev/sdb1
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-0a39800f-3f16-4315-bcd9-13225c8b5a59/osd-block-2bf9cd55-2c41-419b-9c32-f47cdb123bc9 --path /var/lib/ceph/osd/ceph-1 --no-mon-config
Running command: /usr/bin/ln -snf /dev/ceph-0a39800f-3f16-4315-bcd9-13225c8b5a59/osd-block-2bf9cd55-2c41-419b-9c32-f47cdb123bc9 /var/lib/ceph/osd/ceph-1/block
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Running command: /usr/bin/systemctl enable ceph-volume@lvm-1-2bf9cd55-2c41-419b-9c32-f47cdb123bc9
stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-1-2bf9cd55-2c41-419b-9c32-f47cdb123bc9.service → /usr/lib/systemd/system/ceph-volume@.service.
Running command: /usr/bin/systemctl enable --runtime ceph-osd@1
stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@1.service → /usr/lib/systemd/system/ceph-osd@.service.
Running command: /usr/bin/systemctl start ceph-osd@1
--> ceph-volume lvm activate successful for osd ID: 1
--> ceph-volume lvm create successful for: /dev/sdb1
ceph.conf 100% 582 253.4KB/s 00:00
ceph.client.admin.keyring 100% 151 163.5KB/s 00:00
ceph.keyring 100% 129 116.0KB/s 00:00
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 65198810-535a-4535-a8e4-833eb6cc6e39
Running command: vgcreate --force --yes ceph-9971c15a-3dda-44b8-9ae6-14095c8f64f4 /dev/sdb1
stdout: Physical volume "/dev/sdb1" successfully created.
stdout: Volume group "ceph-9971c15a-3dda-44b8-9ae6-14095c8f64f4" successfully created
Running command: lvcreate --yes -l 5119 -n osd-block-65198810-535a-4535-a8e4-833eb6cc6e39 ceph-9971c15a-3dda-44b8-9ae6-14095c8f64f4
stdout: Logical volume "osd-block-65198810-535a-4535-a8e4-833eb6cc6e39" created.
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-9971c15a-3dda-44b8-9ae6-14095c8f64f4/osd-block-65198810-535a-4535-a8e4-833eb6cc6e39
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/ln -s /dev/ceph-9971c15a-3dda-44b8-9ae6-14095c8f64f4/osd-block-65198810-535a-4535-a8e4-833eb6cc6e39 /var/lib/ceph/osd/ceph-2/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap
stderr: got monmap epoch 2
--> Creating keyring file for osd.2
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid 65198810-535a-4535-a8e4-833eb6cc6e39 --setuser ceph --setgroup ceph
stderr: 2023-09-12T13:57:36.075+0800 7ff19e2a3500 -1 bluestore(/var/lib/ceph/osd/ceph-2//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
stderr: 2023-09-12T13:57:36.075+0800 7ff19e2a3500 -1 bluestore(/var/lib/ceph/osd/ceph-2//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
stderr: 2023-09-12T13:57:36.075+0800 7ff19e2a3500 -1 bluestore(/var/lib/ceph/osd/ceph-2//block) _read_bdev_label unable to decode label at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3]
stderr: 2023-09-12T13:57:36.076+0800 7ff19e2a3500 -1 bluestore(/var/lib/ceph/osd/ceph-2/) _read_fsid unparsable uuid
--> ceph-volume lvm prepare successful for: /dev/sdb1
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-9971c15a-3dda-44b8-9ae6-14095c8f64f4/osd-block-65198810-535a-4535-a8e4-833eb6cc6e39 --path /var/lib/ceph/osd/ceph-2 --no-mon-config
Running command: /usr/bin/ln -snf /dev/ceph-9971c15a-3dda-44b8-9ae6-14095c8f64f4/osd-block-65198810-535a-4535-a8e4-833eb6cc6e39 /var/lib/ceph/osd/ceph-2/block
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
Running command: /usr/bin/systemctl enable ceph-volume@lvm-2-65198810-535a-4535-a8e4-833eb6cc6e39
stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-2-65198810-535a-4535-a8e4-833eb6cc6e39.service → /usr/lib/systemd/system/ceph-volume@.service.
Running command: /usr/bin/systemctl enable --runtime ceph-osd@2
stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@2.service → /usr/lib/systemd/system/ceph-osd@.service.
Running command: /usr/bin/systemctl start ceph-osd@2
--> ceph-volume lvm activate successful for osd ID: 2
--> ceph-volume lvm create successful for: /dev/sdb1
[root@node01 ~]#
3)检查集群状态
[root@node01 ~]# ceph -s
cluster:
id: 1293692e-ff54-43d7-a6b2-f96d82d2a6ac
health: HEALTH_WARN
1 mgr modules have recently crashed
services:
mon: 1 daemons, quorum node01 (age 46m)
mgr: node01(active, since 40m)
osd: 3 osds: 3 up (since 87s), 3 in (since 106s)
data:
pools: 1 pools, 1 pgs
objects: 2 objects, 449 KiB
usage: 81 MiB used, 60 GiB / 60 GiB avail
pgs: 1 active+clean
4)确认Ceph块存储容量
[root@node01 ~]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.05846 root default
-3 0.01949 host node01
0 hdd 0.01949 osd.0 up 1.00000 1.00000
-5 0.01949 host node02
1 hdd 0.01949 osd.1 up 1.00000 1.00000
-7 0.01949 host node03
2 hdd 0.01949 osd.2 up 1.00000 1.00000
[root@node01 ~]# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 60 GiB 60 GiB 81 MiB 81 MiB 0.13
TOTAL 60 GiB 60 GiB 81 MiB 81 MiB 0.13
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
.mgr 1 1 449 KiB 2 1.3 MiB 0 19 GiB
[root@node01 ~]# ceph osd df
ID CLASS WEIGHT REWEIGHT SIZE RAW USE DATA OMAP META AVAIL %USE VAR PGS STATUS
0 hdd 0.01949 1.00000 20 GiB 27 MiB 604 KiB 0 B 26 MiB 20 GiB 0.13 0.99 1 up
1 hdd 0.01949 1.00000 20 GiB 27 MiB 604 KiB 0 B 26 MiB 20 GiB 0.13 1.00 1 up
2 hdd 0.01949 1.00000 20 GiB 27 MiB 604 KiB 0 B 26 MiB 20 GiB 0.13 1.01 1 up
TOTAL 60 GiB 81 MiB 1.8 MiB 0 B 78 MiB 60 GiB 0.13
MIN/MAX VAR: 0.99/1.01 STDDEV: 0
[root@node01 ~]#
下一章节将介绍使用块存储,欢迎持续关注!