OpenStack 安裝腳本

準備

  • 設置主機名和hosts文件

    給每個節點設置主機名,並在所有節點設置hosts,虛擬IP在hosts文件中配置主機名爲controller

    # 設置主機名
    # controller1 執行
    hostnamectl set-hostname controller1
    hostname
    # [root@localhost ~]# hostnamectl set-hostname controller1
    # [root@localhost ~]# hostname
    # controller1
    
    # controller2
    hostnamectl set-hostname controller2
    hostname
    # [root@localhost ~]# hostnamectl set-hostname controller2
    # [root@localhost ~]# hostname
    # controller2
    
    # controller3
    hostnamectl set-hostname controller3
    hostname
    # [root@localhost ~]# hostnamectl set-hostname controller3
    # [root@localhost ~]# hostname
    # controller3
    
    # 配置hosts
    cat << EOF >> /etc/hosts
    192.168.5.20        controller
    192.168.5.21        controller1
    192.168.5.22        controller2
    192.168.5.23        controller3
    EOF
    
    # 驗證
    cat /etc/hosts
    # [root@localhost ~]# cat /etc/hosts
    # 127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
    # ::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
    # 192.168.5.20        controller
    # 192.168.5.21        controller1
    # 192.168.5.22        controller2
    # 192.168.5.23        controller3
    
    ping -c 4 controller1
    # [root@localhost ~]# ping -c 4 controller1
    # PING controller1 (192.168.5.21) 56(84) bytes of data.
    # 64 bytes from controller1 (192.168.5.21): icmp_seq=1 ttl=64 time=0.052 ms
    # 64 bytes from controller1 (192.168.5.21): icmp_seq=2 ttl=64 time=0.080 ms
    # 64 bytes from controller1 (192.168.5.21): icmp_seq=3 ttl=64 time=0.073 ms
    # 64 bytes from controller1 (192.168.5.21): icmp_seq=4 ttl=64 time=0.061 ms
    # 
    # --- controller1 ping statistics ---
    # 4 packets transmitted, 4 received, 0% packet loss, time 2999ms
    # rtt min/avg/max/mdev = 0.052/0.066/0.080/0.013 ms
    
    ping -c 4 controller2
    # [root@localhost ~]# ping -c 4 controller2
    # PING controller2 (192.168.5.22) 56(84) bytes of data.
    # 64 bytes from controller2 (192.168.5.22): icmp_seq=1 ttl=64 time=0.316 ms
    # 64 bytes from controller2 (192.168.5.22): icmp_seq=2 ttl=64 time=0.583 ms
    # 64 bytes from controller2 (192.168.5.22): icmp_seq=3 ttl=64 time=0.426 ms
    # 64 bytes from controller2 (192.168.5.22): icmp_seq=4 ttl=64 time=0.445 ms
    # 
    # --- controller2 ping statistics ---
    # 4 packets transmitted, 4 received, 0% packet loss, time 2999ms
    # rtt min/avg/max/mdev = 0.316/0.442/0.583/0.097 ms
    
    ping -c 4 controller3
    # [root@localhost ~]# ping -c 4 controller3
    # PING controller3 (192.168.5.23) 56(84) bytes of data.
    # 64 bytes from controller3 (192.168.5.23): icmp_seq=1 ttl=64 time=0.287 ms
    # 64 bytes from controller3 (192.168.5.23): icmp_seq=2 ttl=64 time=0.385 ms
    # 64 bytes from controller3 (192.168.5.23): icmp_seq=3 ttl=64 time=0.454 ms
    # 64 bytes from controller3 (192.168.5.23): icmp_seq=4 ttl=64 time=0.374 ms
    # 
    # --- controller3 ping statistics ---
    # 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
    # rtt min/avg/max/mdev = 0.287/0.375/0.454/0.059 ms
    
    
  • 防火牆相互放行

    在全部主機設置,放行各個主機的IP

    firewall-cmd --permanent --zone=trusted --add-source=192.168.5.20 --add-source=192.168.5.21 --add-source=192.168.5.22 --add-source=192.168.5.23
    # 驗證
    firewall-cmd --zone=trusted --list-sources --permanent
    # [root@localhost ~]# firewall-cmd --zone=trusted --list-sources --permanent
    # 192.168.5.20 192.168.5.21 192.168.5.22 192.168.5.23
    
    firewall-cmd --zone=trusted --add-source=192.168.5.20 --add-source=192.168.5.21 --add-source=192.168.5.22 --add-source=192.168.5.23
    # 驗證
    firewall-cmd --zone=trusted --list-sources
    # [root@localhost ~]# firewall-cmd --zone=trusted --list-sources
    # 192.168.5.20 192.168.5.21 192.168.5.22 192.168.5.23
    
  • 節點互信

    # controller1 操作
    ssh-keygen
    # [root@localhost ~]# ssh-keygen
    # Generating public/private rsa key pair.
    # Enter file in which to save the key (/root/.ssh/id_rsa):
    # Created directory '/root/.ssh'.
    # Enter passphrase (empty for no passphrase):
    # Enter same passphrase again:
    # Your identification has been saved in /root/.ssh/id_rsa.
    # Your public key has been saved in /root/.ssh/id_rsa.pub.
    # The key fingerprint is:
    # SHA256:BvqnKepnbPEirSfukBb9PrAFYpwtz3PiHP0uc1WvjU0 root@controller1
    # The key's randomart image is:
    # +---[RSA 2048]----+
    # |                 |
    # |                 |
    # |. o   .          |
    # | *.o . .   .     |
    # |..=.+   S . .    |
    # | ..Bo= . .   E   |
    # |o.ooO+o o   *    |
    # |..o+@+.*   o o   |
    # | +*X +O.         |
    # +----[SHA256]-----+
    
    ssh-copy-id root@controller1
    # [root@localhost ~]# ssh-copy-id root@controller1
    # /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
    # The authenticity of host 'controller1 (192.168.5.21)' can't be established.
    # ECDSA key fingerprint is SHA256:1UIr4UMccY+KofSegOIfp/SbKDH2cpLSlWYzTtVBUQo.
    # ECDSA key fingerprint is MD5:63:ea:b3:bc:0c:42:17:db:c0:ca:f0:45:a1:84:2e:c3.
    # Are you sure you want to continue connecting (yes/no)? yes
    # /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
    # /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
    # root@controller1's password:
    # 
    # Number of key(s) added: 1
    # 
    # Now try logging into the machine, with:   "ssh 'root@controller1'"
    # and check to make sure that only the key(s) you wanted were added.
    
    scp -r ~/.ssh root@controller2:~/
    # [root@localhost ~]# scp -r ~/.ssh root@controller2:~/
    # The authenticity of host 'controller2 (192.168.5.22)' can't be established.
    # ECDSA key fingerprint is SHA256:1UIr4UMccY+KofSegOIfp/SbKDH2cpLSlWYzTtVBUQo.
    # ECDSA key fingerprint is MD5:63:ea:b3:bc:0c:42:17:db:c0:ca:f0:45:a1:84:2e:c3.
    # Are you sure you want to continue connecting (yes/no)? yes
    # Warning: Permanently added 'controller2,192.168.5.22' (ECDSA) to the list of known hosts.
    # root@controller2's password:
    # id_rsa                           100% 1675     1.4MB/s   00:00
    # id_rsa.pub                       100%  398   431.6KB/s   00:00
    # known_hosts                      100%  372   516.4KB/s   00:00
    # authorized_keys                  100%  398   453.9KB/s   00:00
    
    scp -r ~/.ssh root@controller3:~/
    # [root@localhost ~]# scp -r ~/.ssh root@controller3:~/
    # The authenticity of host 'controller3 (192.168.5.23)' can't be established.
    # ECDSA key fingerprint is SHA256:1UIr4UMccY+KofSegOIfp/SbKDH2cpLSlWYzTtVBUQo.
    # ECDSA key fingerprint is MD5:63:ea:b3:bc:0c:42:17:db:c0:ca:f0:45:a1:84:2e:c3.
    # Are you sure you want to continue connecting (yes/no)? yes
    # Warning: Permanently added 'controller3,192.168.5.23' (ECDSA) to the list of known hosts.
    # root@controller3's password:
    # id_rsa                          100% 1675     1.2MB/s   00:00
    # id_rsa.pub                      100%  398   332.9KB/s   00:00
    # known_hosts                     100%  558   540.3KB/s   00:00
    # authorized_keys                 100%  398   494.5KB/s   00:00
    
    
    # 驗證
    ssh root@controller1 hostname
    # [root@localhost ~]# ssh root@controller1 hostname
    # controller1
    
    ssh root@controller2 hostname
    # [root@localhost ~]# ssh root@controller2 hostname
    # controller2
    
    ssh root@controller3 hostname
    # [root@localhost ~]# ssh root@controller3 hostname
    # controller3
    
    
  • 上傳安裝軟件包,解壓到指定文件夾

    mkdir -p /data/packages
    tar xvf openstack.tar.gz -C /data/packages/
    # 驗證
    ls /data/packages/openstack/
    # [root@localhost ~]# ls /data/packages/openstack/
    # base  ceph-deploy  cinder  haproxy  keystone  memcached 
    # neutron-controller  nova-controller  rabbitmq ceph  chrony 
    # glance  images   mariadb   neutron-compute  nova-compute pacemaker
    
    

修改集羣配置文件

使用文本編輯工具,按照集羣的規格,編輯config文件內容。

  • 檢查配置文件內容
    egrep -v '(^#|^$)' config
    

基礎配置

# 加載集羣配置信息
source config
#執行基礎環境腳本
bash base/base.sh
# 安裝基礎軟件包
yum localinstall -y $PACKAGES_DIR/base/*.rpm
# 重啓服務器
reboot

配置網絡時間

# 加載集羣配置信息
source config
#執行安裝腳本
bash chrony/chrony.sh

PACEMAKER集羣

# 加載集羣配置信息
source config
# 執行安裝腳本(主節點存在交互操作)
bash pacemaker/pacemaker.sh
# 驗證
pcs status
ping -c 4 $OPENSTACK_VIP

HaProxy集羣

# 加載集羣配置信息
source config
# 執行安裝腳本
bash haproxy/haproxy.sh
# 驗證
# 訪問網頁 http://ip:8888/admin
# 賬號: admin  密碼通過命令 echo $HAPROXY_PASS 查看

MariaDB集羣

# 加載集羣配置信息
source config
# 執行安裝腳本(主節點存在交互操作)
bash mariadb/mariadb.sh
# 刪除不安全用戶
mysql -uroot -e "DELETE FROM mysql.user WHERE user='' OR host='`hostname`';"
mysql -uroot -e 'FLUSH PRIVILEGES;'
# 驗證
mysql -uhaproxy -h controller -e 'SHOW STATUS LIKE "wsrep%";'

RabbitMQ

# 加載集羣配置信息
source config
# 執行安裝腳本(其它節點存在交互操作)
bash rabbitmq/rabbitmq.sh
# 驗證
rabbitmqctl authenticate_user openstack $RABBIT_PASS

Memcached

# 加載集羣配置信息
source config
# 執行安裝腳本
bash memcached/memcached.sh

ceph

# 加載集羣配置信息
source config
# 執行安裝腳本(存在交互操作)
bash ceph/ceph.sh
# 驗證
ceph -s | grep health:
# 設置副本數(非必須,默認值爲3)
openstack-config --set /etc/ceph/ceph.conf 'global' 'osd pool default size' '3'
systemctl restart ceph.target

keystone

# 加載集羣配置信息
source config
# 執行安裝腳本
bash keystone/keystone.sh
# 驗證
. ~/openstack/admin-openrc
openstack token issue

glance

# 加載集羣配置信息
source config
# 執行安裝腳本
bash glance/glance.sh
# 集成ceph
bash glance/glance-ceph.sh
# 驗證
# 上傳鏡像 controller1執行
openstack image create "cirros" \
--file $PACKAGES_DIR/images/cirros-0.4.0-x86_64-disk.raw \
--disk-format raw --container-format bare \
--property hw_scsi_model=virtio-scsi \
--property hw_disk_bus=scsi \
--property hw_qemu_guest_agent=yes \
--property os_require_quiesce=yes \
--property os_type=linux \
--property os_admin_user=root \
--property login_name=cirros \
--property login_password=gocubsgo \
--public 
# 查看鏡像列表
openstack image list
# 查看ceph文件
rbd ls images

nova controller

# 加載集羣配置信息
source config
# 執行安裝腳本(存在交互操作)
bash nova/nova-controller.sh
# 驗證
openstack compute service list --host `hostname`
# [root@controller1 openstack-script]# openstack compute service list --host `hostname`
# +----+------------------+-------------+----------+---------+-------+----------------------------+
# | ID | Binary           | Host        | Zone     | Status  | State | Updated At                 |
# +----+------------------+-------------+----------+---------+-------+----------------------------+
# | 10 | nova-consoleauth | controller1 | internal | enabled | up    | 2019-07-11T10:06:55.000000 |
# | 49 | nova-scheduler   | controller1 | internal | enabled | up    | 2019-07-11T10:06:55.000000 |
# | 88 | nova-conductor   | controller1 | internal | enabled | up    | 2019-07-11T10:06:56.000000 |
# +----+------------------+-------------+----------+---------+-------+----------------------------+

nova compute

# 加載集羣配置信息
source config
# 執行安裝腳本
bash nova/nova-compute.sh
# 驗證
openstack compute service list --host `hostname` --service nova-compute
# [root@controller1 openstack-script]# openstack compute service list --host `hostname` --service nova-compute
# +-----+--------------+-------------+------+---------+-------+----------------------------+
# |  ID | Binary       | Host        | Zone | Status  | State | Updated At                 |
# +-----+--------------+-------------+------+---------+-------+----------------------------+
# | 154 | nova-compute | controller1 | nova | enabled | up    | 2019-07-11T10:11:56.000000 |
# +-----+--------------+-------------+------+---------+-------+----------------------------+

# 如不物理機不支持kvm虛擬機,則需要配置qemu虛擬化(可選)
egrep -o '(vmx|svm)' /proc/cpuinfo
openstack-config --set /etc/nova/nova.conf 'libvirt' 'virt_type' 'qemu'
openstack-config --set /etc/nova/nova.conf 'libvirt' 'cpm_mode' 'none'
systemctl restart openstack-nova-compute.service
systemctl status openstack-nova-compute.service

neutron controller

# 加載集羣配置信息
source config
# 執行安裝腳本(存在交互操作)
bash neutron/neutron-controller.sh
# 驗證
openstack network agent list --host `hostname`
# [root@controller1 openstack-script]# openstack network agent list --host `hostname`
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
# | ID                                   | Agent Type         | Host        | Availability Zone | Alive | State | Binary                    |
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
# | 07ed26e0-6a1a-4405-a3a2-c6f2413ebfe6 | DHCP agent         | controller1 | nova              | :-)   | UP    | neutron-dhcp-agent        |
# | 30bc443f-e54b-466d-baa6-1d2646f6e290 | L3 agent           | controller1 | nova              | :-)   | UP    | neutron-l3-agent          |
# | 82966bcd-b726-493c-a03c-490fa14b0764 | Metadata agent     | controller1 | None              | :-)   | UP    | neutron-metadata-agent    |
# | d796e67b-3772-4eb1-8d18-889a6dbb0b4a | Linux bridge agent | controller1 | None              | :-)   | UP    | neutron-linuxbridge-agent |
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+

neutron compute

# 加載集羣配置信息
source config
# 執行安裝腳本
bash neutron/neutron-compute.sh
# 驗證
openstack network agent list --host `hostname` --agent-type linux-bridge
# [root@controller1 openstack-script]# openstack network agent list --host `hostname` --agent-type linux-bridge
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
# | ID                                   | Agent Type         | Host        | Availability Zone | Alive | State | Binary                    |
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
# | d796e67b-3772-4eb1-8d18-889a6dbb0b4a | Linux bridge agent | controller1 | None              | :-)   | UP    | neutron-linuxbridge-agent |
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+

創建網絡

注意:在任意一個控制節點執行

  • 創建外部網絡

    [controller1]

    openstack network create  --share --external \
      --provider-physical-network provider \
      --provider-network-type flat provider
    
  • 創建外部網絡子網

    # 創建一個IP範圍爲192.168.5.100-192.168.5.120,子網掩碼24,網關192.168.5.1,dns爲114.114.114.114,8.8.8.8的外部網絡子網
    openstack subnet create --network provider \
      --allocation-pool start=192.168.5.100,end=192.168.5.120 \
      --dns-nameserver 114.114.114.114 --dns-nameserver 8.8.8.8 \
      --gateway 192.168.5.1 \
      --subnet-range 192.168.5.0/24 provider
    
  • 創建內部網絡

    openstack network create selfservice
    
  • 創建內部網絡子網

    # 創建一個IP範圍爲172.16.1.1-172.16.1.254,子網掩碼24,網關172.16.1.1,dns爲114.114.114.114,8.8.8.8的內部網絡子網
    openstack subnet create --network selfservice \
      --dns-nameserver 114.114.114.114 --dns-nameserver 8.8.8.8 \
      --gateway 172.16.1.1 \
      --subnet-range 172.16.1.0/24 selfservice
    
  • 創建路由

    openstack router create router
    
  • 內部網絡添加路由

    neutron router-interface-add router selfservice
    
  • 路由設置外部網關

    neutron router-gateway-set router provider
    

cinder

# 加載集羣配置信息
source config
# 執行安裝腳本(存在交互操作)
bash cinder/cinder.sh
# 集成ceph
bash cinder/cinder-ceph.sh
# 設置單個卷大小限制
openstack quota set --class --per-volume-gigabytes 100 default
# 驗證
openstack volume service list
# [root@controller1 openstack-script]# openstack volume service list
# +------------------+------------------+------+---------+-------+----------------------------+
# | Binary           | Host             | Zone | Status  | State | Updated At                 |
# +------------------+------------------+------+---------+-------+----------------------------+
# | cinder-scheduler | controller2      | nova | enabled | up    | 2019-07-11T10:43:16.000000 |
# | cinder-scheduler | controller3      | nova | enabled | up    | 2019-07-11T10:43:16.000000 |
# | cinder-scheduler | controller1      | nova | enabled | up    | 2019-07-11T10:43:16.000000 |
# | cinder-volume    | controller2@ceph | nova | enabled | up    | 2019-07-11T10:43:15.000000 |
# | cinder-volume    | controller3@ceph | nova | enabled | up    | 2019-07-11T10:43:16.000000 |
# | cinder-volume    | controller1@ceph | nova | enabled | up    | 2019-07-11T10:43:15.000000 |
# +------------------+------------------+------+---------+-------+----------------------------+

huluer

# 加載集羣配置信息
source config
# 執行安裝腳本
bash huluer/huluer.sh

服務檢測和重啓腳本

# 服務狀態檢測
cp -f utils/check-openstack-service.sh ~/openstack/check-openstack-service.sh
chmod +x ~/openstack/check-openstack-service.sh
# 驗證
~/openstack/check-openstack-service.sh

# 重啓服務腳本
cp -f utils/restart-openstack-service.sh ~/openstack/restart-openstack-service.sh
chmod +x ~/openstack/restart-openstack-service.sh
# 驗證,注意:重啓腳本不要同時執行
~/openstack/restart-openstack-service.sh

數據庫恢復腳本

啓動關閉的數據庫,支持以下幾種情況:1.集羣還存在運行節點。2.集羣全部正常關閉。3.集羣全部異常關閉。

cp -f utils/recovery-mariadb.sh ~/openstack/recovery-mariadb.sh
chmod +x ~/openstack/recovery-mariadb.sh
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章