http://docs.ceph.com/docs/master/install/get-packages/
get rpm key: rpm --import 'https://download.ceph.com/keys/release.asc'
somewhere on the above page find the content of the ceph.repo file and dump
this in /etc/yum.repos.d (we are using 'mimic' and 'el7')

Apparently we are installing a storage cluster *manually*:
http://docs.ceph.com/docs/master/install/install-storage-cluster/
yum install yum-plugin-priorities
yum install snappy leveldb gdisk python-argparse gperftools-libs
yum install ceph

* Configuration:
http://docs.ceph.com/docs/master/install/manual-deployment/

* Execute commands under "Monitor Bootstrapping"
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
monmaptool --create --add cldsk03.cloud 10.0.1.1 --fsid 6260b8f3-d557-4443-a8c4-15ba142c9478 /tmp/monmap
monmaptool --add cldsk04.cloud 10.0.1.2 --fsid 6260b8f3-d557-4443-a8c4-15ba142c9478 /tmp/monmap
monmaptool --add cldsk05.cloud 10.0.1.3 --fsid 6260b8f3-d557-4443-a8c4-15ba142c9478 /tmp/monmap

* Quarantine /var/lib/ceph (on cldsk03,4,5)
df -h
umount /dev/mapper/cldsk03vg-lstagevol
lvremove /dev/mapper/cldsk03vg-lstagevol
Do you really want to remove active logical volume cldsk03vg/lstagevol? [y/n]: y
lvcreate -n cephvol -L 10G cldsk03vg
WARNING: ext4 signature detected on /dev/cldsk04vg/cephvol at offset 1080. Wipe it? [y/n]: y

mkfs.ext4 /dev/cldsk03vg/cephvol
tune2fs -c0 -i0 /dev/cldsk03vg/cephvol
emacs /etc/fstab:
remove: /dev/mapper/cldsk03vg-lstagevol /srv/localstage ext4 defaults 1 2
add: /dev/mapper/cldsk03vg-cephvol /var/lib/ceph ext4 defaults 1 2
as /var/lib/ceph isn't empty at that point, preserve old content:
cd /var/lib
mv ceph/ ceph.old
mkdir ceph
mount -a
ls -l
chown ceph:ceph ceph
chmod 750 ceph
mv ceph.old/* ceph (note the '*')
rmdir ceph.old/ (not rm -rf)
restorecon -vr ceph

Note: Some bits of the install/documentation seem to assume the cluster name is 'ceph', so for the time being we'll go with that.
At this point all three nodes need (the *same*, copied from cldsk03): /tmp/ceph.mon.keyring (change ownership to 'ceph;ceph') /tmp/monmap and the initial /etc/ceph/ceph.conf: fsid = 6260b8f3-d557-4443-a8c4-15ba142c9478 mon initial members = cldsk03.cloud, cldsk04.cloud, cldsk05.cloud mon host = 10.0.1.1, 10.0.1.2, 10.0.1.3
scp -3 root@cldsk03:/etc/ceph/ceph.client.admin.keyring root@cldsk05:/etc/ceph/ceph.client.admin.keyring
scp -3 root@cldsk03:/etc/ceph/ceph.client.admin.keyring root@cldsk04:/etc/ceph/ceph.client.admin.keyring

* bootstrapping ("creating the initial datafiles") the monitors
runuser -s /bin/bash ceph
make a monitor directory: mkdir /var/lib/ceph/mon/ceph-cldsk03.cloud/
ceph-mon --cluster ceph --mkfs -i cldsk03.cloud --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
[repeat on 04 and 05]
Now configure /etc/ceph/ceph.conf (identical on all nodes).
[root@cldsk03 ceph]# cat /etc/ceph/ceph.conf
[global]
fsid = 6260b8f3-d557-4443-a8c4-15ba142c9478
mon initial members = cldsk03.cloud, cldsk04.cloud, cldsk05.cloud
mon host = 10.0.1.1, 10.0.1.2, 10.0.1.3

# public nodes here are the openstack nodes
public network = 10.0.0.0/21
# This sets the authentication mode. Only options are none or cephx.
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
#
# write cache size, best guess
osd journal size = 8192
# number of copies of each file (3 nodes, hence 3 copies)
osd pool default size = 3
# Allow writing n copies in a degraded state.
osd pool default min size = 2
# Here: 1 disk = 1 OSD ('object storage device')
# 36 disks * 100 / (osd pool default size) = 1200
# 2048 for future expansion
osd pool default pg num = 2048
osd pool default pgp num = 2048
# setting it to one make CEPH put
# replicas on different nodes, rather than
# just different oject stores (which can be onthe same node)
osd crush chooseleaf type = 1

Now start the ceph monitors (on all nodes)

systemctl start ceph-mon@cldsk03,4,5.cloud
systemctl enable ceph-mon@cldsk03,4,5.cloud

# check it's working: ceph -s
(HEALTH_OK, mon: 3 daemons)

* install ceph-mgr
Note that the official instructions are incomplete.
This has to be done on all three nodes.

Make a key:
ceph auth get-or-create mgr.cldsk03.cloud mon 'allow profile mgr' osd 'allow *' mds 'allow *'
(grab key from screen)
and dump it in a (new) file in
/var/lib/ceph/mgr/ceph-cldsk03.cloud/keyring
chown ceph:ceph /var/lib/ceph/mgr/ceph-cldsk03.cloud/keyring
chmod 600 /var/lib/ceph/mgr/ceph-cldsk03.cloud/keyring

start ceph-mgr via systemctl (if starting by hand for testing, note the cluster name is "ceph"):
systemctl start ceph-mgr@cldsk03.cloud
(and check via ceph -s it has actually started and registered)
systemctl enable ceph-mgr@cldsk03.cloud

# Making OSDs
scp -3 root@cldsk03:/var/lib/ceph/bootstrap-osd/ceph.keyring root@cldsk04,05:/var/lib/ceph/bootstrap-osd/ceph.keyring

it pays to do the first one by hand:
ceph-volume lvm create --data /dev/sda
for i in b c d e f g h i j k l; do ceph-volume lvm create --data /dev/sd$i; done
do this for all node, one after the other
ceph -s to check that the OSDs have all appeared

At the end it should look like this ?
[root@cldsk05 ~]# ceph -s
cluster:
id: 6260b8f3-d557-4443-a8c4-15ba142c9478
health: HEALTH_OK

services:
mon: 3 daemons, quorum cldsk03.cloud,cldsk04.cloud,cldsk05.cloud
mgr: cldsk03.cloud(active), standbys: cldsk04.cloud, cldsk05.cloud
osd: 36 osds: 36 up, 36 in

data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 37 GiB used, 321 TiB / 321 TiB avail
pgs:

* install the metadata service
mkdir -p /var/lib/ceph/mds/ceph-cldsk03.cloud
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-cldsk03.cloud/keyring --gen-key -n mds.cldsk03.cloud chown ceph:ceph /var/lib/ceph/mds/ceph-cldsk03.cloud/keyring
ceph auth add mds.cldsk03.cloud osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-cldsk03.cloud/keyring
emacs /etc/ceph/ceph.conf

add (include all node so the conf file is identical on all of them)
[mds.cldsk03.cloud]
host = cldsk03.cloud

[mds.cldsk04.cloud]
host = cldsk04.cloud

[mds.cldsk05.cloud] host = cldsk05.cloud


systemctl enable ceph-mds@cldsk03.cloud
systemctl start ceph-mds@cldsk03.cloud