Ceph L快速安裝

ceph角色分配

172.31.68.241 admin-node/deph-deploy/mon/mgr/mds/rgw
172.31.68.242 osd.0/mon
172.31.68.243 osd.1/mon

配置ssh無密碼登錄

admin-node要可以無密碼ssh登錄osd機器,如果是普通用戶,則要分配sudo權限,如下:

useradd -d /home/cephadmin -m cephadmin
echo "cephadmin ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephadmin
chmod 0440 /etc/sudoers.d/cephadmin

快速安裝

ceph-deploy new ceph1

ceph-deploy install ceph1 ceph2 ceph3

ceph-deploy --overwrite-conf mon create-initial

ceph-deploy admin ceph1 ceph2 ceph3


ceph-deploy mgr create ceph1

ceph-deploy osd create --data /dev/vdb1 ceph2
ceph-deploy osd create --data /dev/vdb1 ceph3
ceph health
ceph -s

擴展

ceph-deploy mds create ceph1

ceph-deploy mon add ceph2
ceph-deploy mon add ceph3

ceph quorum_status --format json-pretty


安裝rgw

ceph-deploy rgw create ceph1

調整配置
[client.rgw.ceph1]
rgw_frontends = "civetweb port=8080"

ceph-deploy --overwrite-conf admin ceph1 ceph2 ceph3 ceph4
systemctl restart [email protected]
curl http://172.31.68.241:8080 -I

模擬client

apt-get install ceph
ceph-deploy admin ceph4


對象存儲

echo 'hello ceph oject storage' > testfile.txt

創建pool
ceph osd pool create mytest 8

上傳文件
rados put test-object-1 testfile.txt --pool=mytest


rados -p mytest ls

獲取文件
rados get test-object-1 testfile.txt.1 --pool=mytest

查看映射位置
ceph osd map mytest test-object-1

刪除文件
rados rm test-object-1 --pool=mytest
刪除pool
ceph osd pool rm mytest mytest --yes-i-really-really-mean-it

塊存儲

admin上執行:ceph osd pool create rdb 8
admin上執行:rbd pool init rdb

admin上執行:

rbd create foo --size 512 --image-feature layering -p rdb
rbd map foo --name client.admin -p rdb

cephfs

admin:ceph osd pool create cephfs_data 4
admin:ceph osd pool create cephfs_metadata 4
admin:ceph osd lspools
admin:ceph fs new cephfs cephfs_metadata cephfs_data

admin.secret內容爲ceph.client.admin.keyring內容的一部分
AQDhRX1baLeFFxAAskNapEuyipJ7SqS7Q1mh/Q==

內核級別掛載方法:
mkdir /mnt/mycephfs
mount -t ceph 172.31.68.241:6789,172.31.68.242:6789,172.31.68.243:6789:/ /mnt/mycephfs -o name=admin,secretfile=admin.secret

實驗
cd /mnt/mycephfs
echo 'hello ceph CephFS' > hello.txt
cd ~

卸載
umount -lf /mnt/mycephfs
rm -rf /mnt/mycephfs

用戶級別掛載:
mkdir /mnt/mycephfs
ceph-fuse -m 172.31.68.241:6789 /mnt/mycephfs

S3 存儲

ceph osd pool create .rgw 8 8
ceph osd pool create .rgw.root 8 8
ceph osd pool create .rgw.control 8 8
ceph osd pool create .rgw.gc 8 8
ceph osd pool create .rgw.buckets 8 8
ceph osd pool create .rgw.buckets.index 8 8
ceph osd pool create .rgw.buckets.extra 8 8
ceph osd pool create .log 8 8
ceph osd pool create .intent-log 8 8
ceph osd pool create .usage 8 8
ceph osd pool create .users 8 8
ceph osd pool create .users.email 8 8
ceph osd pool create .users.swift 8 8
ceph osd pool create .users.uid 8 8

vm外掛磁盤

cd /opt/vm/data_image
qemu-img create -f qcow2 ubuntu16.04-2-data.img 2G
qemu-img create -f qcow2 ubuntu16.04-3-data.img 2G
virsh attach-disk [--domin] $DOMIN  [--source] $SOURCEFILE [--target] $TARGET --subdriver qcow2 --config --live
virsh attach-disk Ubuntu16.04-2 /opt/vm/data_image/ubuntu16.04-2-data.img vdb  --subdriver qcow2
virsh attach-disk Ubuntu16.04-3 /opt/vm/data_image/ubuntu16.04-3-data.img vdb  --subdriver qcow2

清除安裝包

ceph-deploy purge ceph1 ceph2 ceph3

清除配置信息
ceph-deploy purgedata ceph1 ceph2 ceph3
ceph-deploy forgetkeys

每個節點刪除殘留的配置文件
rm -rf /var/lib/ceph/osd/*
rm -rf /var/lib/ceph/mon/*
rm -rf /var/lib/ceph/mds/*
rm -rf /var/lib/ceph/bootstrap-mds/*
rm -rf /var/lib/ceph/bootstrap-osd/*
rm -rf /var/lib/ceph/bootstrap-mon/*
rm -rf /var/lib/ceph/tmp/*
rm -rf /etc/ceph/*
rm -rf /var/run/ceph/*

清除lvm配置

vgscan 
vgdisplay -v
lvremove
vgremove
pvremove

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章