Create Openstack Lab. Part 2 – Ceph Deployment
Login to maas node as user ‘openstack’
Install and update python package manager
sudo apt install python3-pip
pip install -U pip
Add additional PATH to local python binary
vi ~/.bashrc
Add following line at the end of ~/.bashrc file
export PATH=$PATH:/home/openstack/.local/bin
Apply new additional PATH
source ~/.bashrc
Install python virtual environment support
sudo apt install python3-venv
Create virtual environment for ceph-ansible
python3 -m venv ~/venv/ceph
Activate ceph-ansible virtual environment
source ~/venv/ceph/bin/activate
Update and install required package
pip install -U pip
pip install wheel
Make project directory then clone ceph-ansible from GIT
mkdir -p ~/projects/ceph
cd ~/projects/ceph
git clone https://github.com/ceph/ceph-ansible.git
Configure ceph-ansible
cd ~/projects/ceph/ceph-ansible
git checkout stable-6.0
pip install -r ./requirements.txt
ansible-galaxy install -r requirements.yml
cp ./site.yml.sample ./site.yml
cp ./group_vars/all.yml.sample ./group_vars/all.yml
cp ./group_vars/osds.yml.sample ./group_vars/osds.yml
cp ./group_vars/mons.yml.sample ./group_vars/mons.yml
cp ./group_vars/mgrs.yml.sample ./group_vars/mgrs.yml
cp ./group_vars/clients.yml.sample ./group_vars/clients.yml
Edit configuration file all.yml
vi ~/projects/ceph/ceph-ansible/group_vars/all.yml
Edit following lines
ntp_daemon_type: chronyd
ceph_origin: repository
ceph_repository: uca
ceph_stable_openstack_release_uca: xena
monitor_address_block: 10.6.13.0/24
journal_size: 5120
public_network: 10.6.12.0/24
cluster_network: 10.6.14.0/24
dashboard_enabled: true
dashboard_protocol: http
dashboard_port: 8080
dashboard_admin_user: admin
dashboard_admin_password: passw0rd
node_exporter_port: 9500
grafana_admin_user: admin
grafana_admin_password: passw0rd
grafana_dashboard_version: pacific
grafana_port: 3000
Edit configuration file osds.yml
vi ~/projects/ceph/ceph-ansible/group_vars/osds.yml
Edit following lines
osd_auto_discovery: false
devices:
- /dev/sdb
- /dev/sdc
Edit configuration file /etc/ansible/hosts
sudo mkdir -p /etc/ansible
sudo vi /etc/ansible/hosts
Add following lines
[mons]
ceph-public.controller-1.lab
ceph-public.controller-2.lab
ceph-public.controller-3.lab
[osds]
ceph-public.controller-1.lab
ceph-public.controller-2.lab
ceph-public.controller-3.lab
ceph-public.compute-1.lab
ceph-public.compute-2.lab
ceph-public.compute-3.lab
[mgrs]
ceph-public.controller-1.lab
ceph-public.controller-2.lab
ceph-public.controller-3.lab
[grafana-server]
controller-1.lab
Edit configuration file clients.yml
vi ~/projects/ceph/ceph-ansible/group_vars/clients.yml
Edit following variable
copy_admin_key: true
Test connection
ansible -m ping all
Deploy ceph
ansible-playbook ./site.yml
When done, to check login to node-1
sudo ceph status
sudo ceph osd tree
Sample output
openstack@node-1:~$ sudo ceph status
cluster:
id: 1eb2aee2-ad00-4b2f-8f83-071f03692864
health: HEALTH_WARN
mons are allowing insecure global_id reclaim
services:
mon: 3 daemons, quorum node-1,node-2,node-3 (age 6m)
mgr: node-2(active, since 4m), standbys: node-1, node-3
osd: 12 osds: 12 up (since 7m), 12 in (since 7m)
data:
pools: 1 pools, 1 pgs
objects: 0 objects, 0 B
usage: 63 MiB used, 96 GiB / 96 GiB avail
pgs: 1 active+clean
openstack@node-1:~$ sudo ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.09357 root default
-9 0.01559 host node-1
5 hdd 0.00780 osd.5 up 1.00000 1.00000
10 hdd 0.00780 osd.10 up 1.00000 1.00000
-13 0.01559 host node-2
4 hdd 0.00780 osd.4 up 1.00000 1.00000
11 hdd 0.00780 osd.11 up 1.00000 1.00000
-11 0.01559 host node-3
3 hdd 0.00780 osd.3 up 1.00000 1.00000
9 hdd 0.00780 osd.9 up 1.00000 1.00000
-3 0.01559 host node-4
1 hdd 0.00780 osd.1 up 1.00000 1.00000
8 hdd 0.00780 osd.8 up 1.00000 1.00000
-5 0.01559 host node-5
2 hdd 0.00780 osd.2 up 1.00000 1.00000
6 hdd 0.00780 osd.6 up 1.00000 1.00000
-7 0.01559 host node-6
0 hdd 0.00780 osd.0 up 1.00000 1.00000
7 hdd 0.00780 osd.7 up 1.00000 1.00000
Create and initialize pools
sudo ceph osd pool create volumes
sudo ceph osd pool create images
sudo ceph osd pool create backups
sudo ceph osd pool create vms
sudo rbd pool init volumes
sudo rbd pool init images
sudo rbd pool init backups
sudo rbd pool init vms
Configure client authorizations
sudo ceph auth get-or-create client.glance \
mon 'profile rbd' \
osd 'profile rbd pool=images' \
mgr 'profile rbd pool=images' \
-o /etc/ceph/ceph.client.glance.keyring
sudo ceph auth get-or-create client.cinder \
mon 'profile rbd' \
osd 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images' \
mgr 'profile rbd pool=volumes, profile rbd pool=vms' \
-o /etc/ceph/ceph.client.cinder.keyring
sudo ceph auth get-or-create client.cinder-backup \
mon 'profile rbd' \
osd 'profile rbd pool=backups' \
-o /etc/ceph/ceph.client.cinder-backup.keyring
sudo ceph auth get-or-create client.nova \
mon 'profile rbd' \
osd 'profile rbd pool=vms, profile rbd pool=images' \
-o /etc/ceph/ceph.client.nova.keyring
We’re done