kolla-ansible部署openstack對接ceph集羣

環境信息

官方參考:
https://docs.ceph.com/docs/master/rbd/rbd-openstack/
https://docs.openstack.org/kolla-ansible/train/reference/storage/external-ceph-guide.html

環境準備:

  • 準備 kolla ansible部署的單節點all-in-one openstack
  • 準備外部ceph集羣(當前使用的外部ceph爲3節點集羣)
  • 其中192.168.93.30節點爲kolla節點

配置ceph

以下操作在ceph節點執行。
ceph節點創建存儲池

#創建存儲池
ceph osd pool create volumes 64 64
ceph osd pool create images 64 64
ceph osd pool create backups 64 64
ceph osd pool create vms 64 64

#初始化存儲池
rbd pool init volumes
rbd pool init images
rbd pool init backups
rbd pool init vms

#查看存儲池
# ceph osd pool ls
volumes
images
backups
vms

ceph節點配置認證

如果開啓了認證, 爲Nova、Cinder和Glance創建新的用戶

#創建glance用戶及權限
ceph auth get-or-create client.glance mon 'profile rbd' osd 'profile rbd pool=images' mgr 'profile rbd pool=images' -o /etc/ceph/ceph.client.glance.keyring

#創建cinder用戶及權限
ceph auth get-or-create client.cinder mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images' mgr 'profile rbd pool=volumes, profile rbd pool=vms' -o /etc/ceph/ceph.client.cinder.keyring

#創建cinder-backup用戶及權限
ceph auth get-or-create client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups' mgr 'profile rbd pool=backups' -o /etc/ceph/ceph.client.cinder-backup.keyring

#創建nova用戶及權限
ceph auth get-or-create client.nova mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images,profile rbd pool=backups' mgr 'profile rbd pool=volumes, profile rbd pool=vms' -o /etc/ceph/ceph.client.nova.keyring

安裝ceph客戶端
以下操作在openstack節點執行。
所有要訪問ceph集羣的節點安裝ceph客戶端工具,這裏只有一個openstack節點

yum install -y python-rbd
yum install -y ceph-common

kolla-deploy節點創建相關配置文件目錄

mkdir -p /etc/kolla/config/{glance,cinder,nova}
mkdir -p /etc/kolla/config/cinder/{cinder-volume,cinder-backup}

修改glance配置文件

以下操作在kolla deploy節點執行。
複製ceph key

scp 192.168.93.30:/etc/ceph/ceph.client.glance.keyring /etc/kolla/config/glance/

複製ceph配置文件

scp 192.168.93.30:/etc/ceph/ceph.conf /etc/kolla/config/glance/

對接glance服務,執行以下操作

cat > /etc/kolla/config/glance/glance-api.conf <<EOF
[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
EOF

修改cinder配置文件

複製key,注意cinder-backup需要2個keyrings,以訪問voluems以及備份pool。

scp 192.168.93.30:/etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-volume/
scp 192.168.93.30:/etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-backup/
scp 192.168.93.30:/etc/ceph/ceph.client.cinder-backup.keyring /etc/kolla/config/cinder/cinder-backup/

複製ceph.conf

scp 192.168.93.30:/etc/ceph/ceph.conf /etc/kolla/config/cinder/

創建cinder-volume.conf配置文件

cat > /etc/kolla/config/cinder/cinder-volume.conf <<EOF
[DEFAULT]
enabled_backends=rbd-1

[rbd-1]
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=cinder
backend_host=rbd:volumes
rbd_pool=volumes
volume_backend_name=rbd-1
volume_driver=cinder.volume.drivers.rbd.RBDDriver
rbd_secret_uuid = a0a12844-3910-452c-9808-5c82f0d6f67d
EOF

在/etc/kolla/passwords.yml中可以找到rbd_secret_uuid值,注意選擇cinder_rbd_secret_uuid。

# cat /etc/kolla/passwords.yml | grep rbd_secret_uuid
cinder_rbd_secret_uuid: a0a12844-3910-452c-9808-5c82f0d6f67d
rbd_secret_uuid: bf77feeb-3d95-437c-a05e-71ba93676770

創建cinder-backup.conf配置文件

cat > /etc/kolla/config/cinder/cinder-backup.conf <<EOF
[DEFAULT]
backup_ceph_conf=/etc/ceph/ceph.conf
backup_ceph_user=cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool=backups
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
EOF

修改nova配置文件

複製key

scp 192.168.93.30:/etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/nova/
scp 192.168.93.30:/etc/ceph/ceph.client.nova.keyring /etc/kolla/config/nova/

複製ceph配置文件

scp 192.168.93.30:/etc/ceph/ceph.conf /etc/kolla/config/nova/

創建nova-compute.conf配置文件,爲nova配置RBD後端

cat > /etc/kolla/config/nova/nova-compute.conf <<EOF
[libvirt]
images_rbd_pool=vms
images_type=rbd
images_rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=nova
rbd_secret_uuid: bf77feeb-3d95-437c-a05e-71ba93676770
EOF

在/etc/kolla/passwords.yml中可以找到rbd_secret_uuid值,注意選擇rbd_secret_uuid。

# cat /etc/kolla/passwords.yml | grep rbd_secret_uuid
cinder_rbd_secret_uuid: a0a12844-3910-452c-9808-5c82f0d6f67d
rbd_secret_uuid: bf77feeb-3d95-437c-a05e-71ba93676770

最終/etc/kolla/config/配置信息

[root@kolla ~]# tree /etc/kolla/config/
/etc/kolla/config/
├── cinder
│   ├── ceph.conf
│   ├── cinder-backup
│   │   ├── ceph.client.cinder-backup.keyring
│   │   └── ceph.client.cinder.keyring
│   ├── cinder-backup.conf
│   ├── cinder-volume
│   │   └── ceph.client.cinder.keyring
│   └── cinder-volume.conf
├── glance
│   ├── ceph.client.glance.keyring
│   ├── ceph.conf
│   └── glance-api.conf
└── nova
    ├── ceph.client.cinder.keyring
    ├── ceph.client.nova.keyring
    ├── ceph.conf
    └── nova-compute.conf

修改globals.yaml

編輯kolla globals.yml,由於對接外部ceph,不執行ceph部署,開啓Cinder服務,並開啓Glance、Cinder和Nova的後端Ceph功能:

cat >> /etc/kolla/globals.yml <<EOF

#version
kolla_base_distro: "centos"
kolla_install_type: "binary"
openstack_release: "train"

#vip
kolla_internal_vip_address: "192.168.93.100"

#docker registry
docker_registry: "registry.cn-shenzhen.aliyuncs.com"
docker_namespace: "kollaimage"

#network
network_interface: "ens33"
neutron_external_interface: "ens37"
neutron_plugin_agent: "openvswitch"
enable_neutron_provider_networks: "yes"

#storage
enable_cinder: "yes"

#virt_type
nova_compute_virt_type: "qemu"

#ceph
enable_ceph: "no"
glance_backend_ceph: "yes"
cinder_backend_ceph: "yes"
nova_backend_ceph: "yes"
gnocchi_backend_storage: "ceph"
enable_manila_backend_cephfs_native: "yes"
EOF

重新配置openstack

kolla-ansible -i all-in-one reconfigure

驗證對接ceph

查看部署的volume服務概況

$ openstack volume service list
+------------------+-------------------+------+---------+-------+----------------------------+
| Binary           | Host              | Zone | Status  | State | Updated At                 |
+------------------+-------------------+------+---------+-------+----------------------------+
| cinder-scheduler | kolla             | nova | enabled | up    | 2020-06-17T09:42:03.000000 |
| cinder-backup    | kolla             | nova | enabled | up    | 2020-06-17T09:41:57.000000 |
| cinder-volume    | rbd:volumes@rbd-1 | nova | enabled | up    | 2020-06-17T09:42:01.000000 |
+------------------+-------------------+------+---------+-------+----------------------------+

查看初始的RBD存儲池情況,全部是空的:

rbd -p images ls
rbd -p volumes ls
rbd -p vms ls

創建鏡像

上傳鏡像然後查看新增的鏡像信息:

# openstack image list 
+--------------------------------------+----------------+--------+
| ID                                   | Name           | Status |
+--------------------------------------+----------------+--------+
| 0fec116d-ec19-4bca-ba71-bd0c40e4630c | cirros01       | active |
+--------------------------------------+----------------+--------+

查看image詳細信息,properties的location參數可以看到使用rbd存儲

$ openstack image show 0fec116d-ec19-4bca-ba71-bd0c40e4630c
+------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field            | Value                                                                                                                                                                                                                                                                                                                               |
+------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| checksum         | 1d3062cd89af34e419f7100277f38b2b                                                                                                                                                                                                                                                                                                    |
| container_format | bare                                                                                                                                                                                                                                                                                                                                |
| created_at       | 2020-06-17T09:44:35Z                                                                                                                                                                                                                                                                                                                |
| disk_format      | qcow2                                                                                                                                                                                                                                                                                                                               |
| file             | /v2/images/0fec116d-ec19-4bca-ba71-bd0c40e4630c/file                                                                                                                                                                                                                                                                                |
| id               | 0fec116d-ec19-4bca-ba71-bd0c40e4630c                                                                                                                                                                                                                                                                                                |
| min_disk         | 0                                                                                                                                                                                                                                                                                                                                   |
| min_ram          | 0                                                                                                                                                                                                                                                                                                                                   |
| name             | cirros01                                                                                                                                                                                                                                                                                                                            |
| owner            | 65850af146fe478ab13f59f7edf838ec                                                                                                                                                                                                                                                                                                    |
| properties       | locations='[{u'url': u'rbd://c64af733-b16a-4962-b613-d37faaab60fe/images/0fec116d-ec19-4bca-ba71-bd0c40e4630c/snap', u'metadata': {}}]', os_hash_algo='sha512', os_hash_value='553d220ed58cfee7dafe003c446a9f197ab5edf8ffc09396c74187cf83873c877e7ae041cb80f3b91489acf687183adcd689b53b38e3ddd22e627e7f98a09c46', os_hidden='False' |
| protected        | False                                                                                                                                                                                                                                                                                                                               |
| schema           | /v2/schemas/image                                                                                                                                                                                                                                                                                                                   |
| size             | 16338944                                                                                                                                                                                                                                                                                                                            |
| status           | active                                                                                                                                                                                                                                                                                                                              |
| tags             |                                                                                                                                                                                                                                                                                                                                     |
| updated_at       | 2020-06-17T09:44:36Z                                                                                                                                                                                                                                                                                                                |
| virtual_size     | None                                                                                                                                                                                                                                                                                                                                |
| visibility       | shared                                                                                                                                                                                                                                                                                                                              |
+------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+

查看RBD存儲池的變化,可見鏡像被存儲在images存儲池中

[root@ceph01 ~]# rbd -p images ls
0fec116d-ec19-4bca-ba71-bd0c40e4630c

查看存儲池鏡像詳細信息,並且鏡像有一個快照:

[root@ceph01 ~]# rbd -p images info 0fec116d-ec19-4bca-ba71-bd0c40e4630c
rbd image '0fec116d-ec19-4bca-ba71-bd0c40e4630c':
        size 16 MiB in 2 objects
        order 23 (8 MiB objects)
        snapshot_count: 1
        id: d9976b58fc3d
        block_name_prefix: rbd_data.d9976b58fc3d
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features: 
        flags: 
        create_timestamp: Wed Jun 17 17:44:35 2020
        access_timestamp: Wed Jun 17 17:44:35 2020
        modify_timestamp: Wed Jun 17 17:44:35 2020

[root@ceph01 ~]# rbd -p images snap list 0fec116d-ec19-4bca-ba71-bd0c40e4630c
SNAPID NAME SIZE   PROTECTED TIMESTAMP                
     4 snap 16 MiB yes       Wed Jun 17 17:44:36 2020

創建實例

# openstack server list
+--------------------------------------+-------+--------+--------------------+--------+---------+
| ID                                   | Name  | Status | Networks           | Image  | Flavor  |
+--------------------------------------+-------+--------+--------------------+--------+---------+
| 42dc6abb-b6b6-4f56-a8fd-d29d71c15057 | demo1 | ACTIVE | demo-net=10.0.0.12 | cirros | m1.tiny |
+--------------------------------------+-------+--------+--------------------+--------+---------+

注意創建實例時,選擇不創建新卷
在這裏插入圖片描述

可見虛擬機在vms存儲池中創建了一個卷:

[root@ceph01 ceph]# rbd -p vms ls
42dc6abb-b6b6-4f56-a8fd-d29d71c15057_disk

登錄虛擬機所在節點,可以看到虛擬機的系統卷使用的是在vms中創建的這個卷,

[root@kolla ~]# docker exec -it nova_libvirt virsh list
 Id    Name                           State
----------------------------------------------------
 1     instance-00000001              running

查看dumpxml

[root@kolla ~]# docker exec -it nova_libvirt virsh dumpxml 1 | grep "disk type" -A14
    <disk type='network' device='disk'>
      <driver name='qemu' type='raw' cache='none'/>
      <auth username='nova'>
        <secret type='ceph' uuid='bf77feeb-3d95-437c-a05e-71ba93676770'/>
      </auth>
      <source protocol='rbd' name='vms/42dc6abb-b6b6-4f56-a8fd-d29d71c15057_disk'>
        <host name='192.168.93.60' port='6789'/>
        <host name='192.168.93.61' port='6789'/>
        <host name='192.168.93.62' port='6789'/>
      </source>
      <target dev='vda' bus='virtio'/>
      <alias name='virtio-disk0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
    </disk>

從進程參數可以看出qemu直接使用的是Ceph的librbd庫訪問的RBD塊設備:

[root@kolla ~]# ps -aux | grep qemu 

[root@kolla ~]# ldd /usr/libexec/qemu-kvm | grep -e ceph -e rbd
        librbd.so.1 => /lib64/librbd.so.1 (0x00007f7cfaa3f000)

創建卷

$ openstack volume create --size 1 volume01

# openstack volume list
+--------------------------------------+----------+--------+------+--------------------------------+
| ID                                   | Name     | Status | Size | Attached to                    |
+--------------------------------------+----------+--------+------+--------------------------------+
| 5e662290-f806-477a-8de1-d564904b7231 | volume01 | in-use |    1 | Attached to demo1 on /dev/vdb  |
+--------------------------------------+----------+--------+------+--------------------------------+

查看存儲池狀態,可以看到新建的卷被放在volumes存儲池:

[root@ceph01 ~]# rbd -p volumes ls
volume-5e662290-f806-477a-8de1-d564904b7231

創建卷備份

創建一個卷備份

# openstack volume backup create volume01
+-------+--------------------------------------+
| Field | Value                                |
+-------+--------------------------------------+
| id    | c4eca260-81a6-4763-9234-0e193c157b7f |
| name  | None                                 |
+-------+--------------------------------------+

# openstack volume backup list

可以看到是創建在backups存儲池中:

[root@ceph01 ~]# rbd -p backups ls
volume-5e662290-f806-477a-8de1-d564904b7231.backup.c4eca260-81a6-4763-9234-0e193c157b7f

[root@ceph01 ceph]# rbd -p backups snap list volume-5e662290-f806-477a-8de1-d564904b7231.backup.c4eca260-81a6-4763-9234-0e193c157b7f
SNAPID NAME                                                           SIZE  PROTECTED TIMESTAMP                
     6 backup.c4eca260-81a6-4763-9234-0e193c157b7f.snap.1592556822.77 1 GiB           Fri Jun 19 16:53:44 2020

連接卷,把新增的卷鏈接到之前創建的虛擬機中:

# openstack server add volume demo1 volume01

到虛擬機所在節點查看其libvirt上參數的變化,發現新增了一個RBD磁盤:

[root@kolla ~]# docker exec -it nova_libvirt virsh dumpxml 1 | grep "disk type" -A14
    <disk type='network' device='disk'>
      <driver name='qemu' type='raw' cache='none'/>
      <auth username='nova'>
        <secret type='ceph' uuid='bf77feeb-3d95-437c-a05e-71ba93676770'/>
      </auth>
      <source protocol='rbd' name='vms/42dc6abb-b6b6-4f56-a8fd-d29d71c15057_disk'>
        <host name='192.168.93.60' port='6789'/>
        <host name='192.168.93.61' port='6789'/>
        <host name='192.168.93.62' port='6789'/>
      </source>
      <target dev='vda' bus='virtio'/>
      <alias name='virtio-disk0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
    </disk>
    <disk type='network' device='disk'>
      <driver name='qemu' type='raw' cache='none' discard='unmap'/>
      <auth username='cinder'>
        <secret type='ceph' uuid='a0a12844-3910-452c-9808-5c82f0d6f67d'/>
      </auth>
      <source protocol='rbd' name='volumes/volume-5e662290-f806-477a-8de1-d564904b7231'>
        <host name='192.168.93.60' port='6789'/>
        <host name='192.168.93.61' port='6789'/>
        <host name='192.168.93.62' port='6789'/>
      </source>
      <target dev='vdb' bus='virtio'/>
      <serial>5e662290-f806-477a-8de1-d564904b7231</serial>
      <alias name='virtio-disk1'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
    </disk>

爲虛擬機創建一個浮動IP,使用SSH登陸進去:

# openstack floating ip create public1

# openstack server add floating ip demo01 192.168.1.205

# openstack server list
+--------------------------------------+-------+--------+-----------------------------------+--------+---------+
| ID                                   | Name  | Status | Networks                          | Image  | Flavor  |
+--------------------------------------+-------+--------+-----------------------------------+--------+---------+
| 42dc6abb-b6b6-4f56-a8fd-d29d71c15057 | demo1 | ACTIVE | demo-net=10.0.0.12, 192.168.1.205 | cirros | m1.tiny |
+--------------------------------------+-------+--------+-----------------------------------+--------+---------+

#用戶名"cirros",密碼"gocubsgo"
$ ssh [email protected]  
 
$ sudo passwd root
Changing password for root
New password: 
Bad password: too weak
Retype password: 
Password for root changed by root
$ su -
Password:

創建分區並寫入測試文件,最後卸載分區:

# lsblk
NAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
vda     253:0    0    1G  0 disk 
|-vda1  253:1    0 1015M  0 part /
`-vda15 253:15   0    8M  0 part 
vdb     253:16   0    1G  0 disk

# mkfs.ext4 /dev/vdb
mke2fs 1.42.12 (29-Aug-2014)
Creating filesystem with 262144 4k blocks and 65536 inodes
Filesystem UUID: bb0e8ce5-ef15-49be-b8f8-97131e072a5e
Superblock backups stored on blocks: 
        32768, 98304, 163840, 229376

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (8192 blocks): done
Writing superblocks and filesystem accounting information: done

# mount /dev/vdb /mnt
# df -h
Filesystem                Size      Used Available Use% Mounted on
/dev                    240.2M         0    240.2M   0% /dev
/dev/vda1               978.9M     24.0M    914.1M   3% /
tmpfs                   244.2M         0    244.2M   0% /dev/shm
tmpfs                   244.2M     92.0K    244.1M   0% /run
/dev/vdb                975.9M      1.3M    907.4M   0% /mnt

斷開卷與實例的綁定

$ openstack server remove volume demo1 volume01
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章