get rpm key: rpm --import ''
somewhere on the above page find the content of the ceph.repo file and dump
this in /etc/yum.repos.d (we are using 'mimic' and 'el7')

Apparently we are installing a storage cluster *manually*:
yum install yum-plugin-priorities
yum install snappy leveldb gdisk python-argparse gperftools-libs
yum install ceph

* Configuration:

* Execute commands under "Monitor Bootstrapping"
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
monmaptool --create --add --fsid 6260b8f3-d557-4443-a8c4-15ba142c9478 /tmp/monmap
monmaptool --add --fsid 6260b8f3-d557-4443-a8c4-15ba142c9478 /tmp/monmap
monmaptool --add --fsid 6260b8f3-d557-4443-a8c4-15ba142c9478 /tmp/monmap

* Quarantine /var/lib/ceph (on cldsk03,4,5)
df -h
umount /dev/mapper/cldsk03vg-lstagevol
lvremove /dev/mapper/cldsk03vg-lstagevol
Do you really want to remove active logical volume cldsk03vg/lstagevol? [y/n]: y
lvcreate -n cephvol -L 10G cldsk03vg
WARNING: ext4 signature detected on /dev/cldsk04vg/cephvol at offset 1080. Wipe it? [y/n]: y

mkfs.ext4 /dev/cldsk03vg/cephvol
tune2fs -c0 -i0 /dev/cldsk03vg/cephvol
emacs /etc/fstab:
remove: /dev/mapper/cldsk03vg-lstagevol /srv/localstage ext4 defaults 1 2
add: /dev/mapper/cldsk03vg-cephvol /var/lib/ceph ext4 defaults 1 2
as /var/lib/ceph isn't empty at that point, preserve old content:
cd /var/lib
mv ceph/ ceph.old
mkdir ceph
mount -a
ls -l
chown ceph:ceph ceph
chmod 750 ceph
mv ceph.old/* ceph (note the '*')
rmdir ceph.old/ (not rm -rf)
restorecon -vr ceph

Note: Some bits of the install/documentation seem to assume the cluster name is 'ceph', so for the time being we'll go with that.
At this point all three nodes need (the *same*, copied from cldsk03): /tmp/ceph.mon.keyring (change ownership to 'ceph;ceph') /tmp/monmap and the initial /etc/ceph/ceph.conf: fsid = 6260b8f3-d557-4443-a8c4-15ba142c9478 mon initial members =,, mon host =,,
scp -3 root@cldsk03:/etc/ceph/ceph.client.admin.keyring root@cldsk05:/etc/ceph/ceph.client.admin.keyring
scp -3 root@cldsk03:/etc/ceph/ceph.client.admin.keyring root@cldsk04:/etc/ceph/ceph.client.admin.keyring

* bootstrapping ("creating the initial datafiles") the monitors
runuser -s /bin/bash ceph
make a monitor directory: mkdir /var/lib/ceph/mon/
ceph-mon --cluster ceph --mkfs -i --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
[repeat on 04 and 05]
Now configure /etc/ceph/ceph.conf (identical on all nodes).
[root@cldsk03 ceph]# cat /etc/ceph/ceph.conf
fsid = 6260b8f3-d557-4443-a8c4-15ba142c9478
mon initial members =,,
mon host =,,

# public nodes here are the openstack nodes
public network =
# This sets the authentication mode. Only options are none or cephx.
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
# write cache size, best guess
osd journal size = 8192
# number of copies of each file (3 nodes, hence 3 copies)
osd pool default size = 3
# Allow writing n copies in a degraded state.
osd pool default min size = 2
# Here: 1 disk = 1 OSD ('object storage device')
# 36 disks * 100 / (osd pool default size) = 1200
# 2048 for future expansion
osd pool default pg num = 2048
osd pool default pgp num = 2048
# setting it to one make CEPH put
# replicas on different nodes, rather than
# just different oject stores (which can be onthe same node)
osd crush chooseleaf type = 1

Now start the ceph monitors (on all nodes)

systemctl start ceph-mon@cldsk03,4,
systemctl enable ceph-mon@cldsk03,4,

# check it's working: ceph -s
(HEALTH_OK, mon: 3 daemons)

* install ceph-mgr
Note that the official instructions are incomplete.
This has to be done on all three nodes.

Make a key:
ceph auth get-or-create mon 'allow profile mgr' osd 'allow *' mds 'allow *'
(grab key from screen)
and dump it in a (new) file in
chown ceph:ceph /var/lib/ceph/mgr/
chmod 600 /var/lib/ceph/mgr/

start ceph-mgr via systemctl (if starting by hand for testing, note the cluster name is "ceph"):
systemctl start
(and check via ceph -s it has actually started and registered)
systemctl enable

# Making OSDs
scp -3 root@cldsk03:/var/lib/ceph/bootstrap-osd/ceph.keyring root@cldsk04,05:/var/lib/ceph/bootstrap-osd/ceph.keyring

it pays to do the first one by hand:
ceph-volume lvm create --data /dev/sda
for i in b c d e f g h i j k l; do ceph-volume lvm create --data /dev/sd$i; done
do this for all node, one after the other
ceph -s to check that the OSDs have all appeared

At the end it should look like this ?
[root@cldsk05 ~]# ceph -s
id: 6260b8f3-d557-4443-a8c4-15ba142c9478
health: HEALTH_OK

mon: 3 daemons, quorum,,
mgr:, standbys:,
osd: 36 osds: 36 up, 36 in

pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 37 GiB used, 321 TiB / 321 TiB avail

* install the metadata service
mkdir -p /var/lib/ceph/mds/
ceph-authtool --create-keyring /var/lib/ceph/mds/ --gen-key -n chown ceph:ceph /var/lib/ceph/mds/
ceph auth add osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/
emacs /etc/ceph/ceph.conf

add (include all node so the conf file is identical on all of them)
host =

host =

[] host =

systemctl enable
systemctl start