Comment 5 for bug 1659225

Revision history for this message
Vance Morris (vmorris) wrote :

Could you give more information on why this is necessary?

I'm using a z/KVM compute node now with a ceph backend and I'm able to create instances that both boot from volume and also boot from image.

I can give more information on my configuration and environment if needed.

[root@zs95kf ~]# cat /etc/os-release
NAME="KVM for IBM z Systems"
VERSION="1.1.3-rc.3 (Z)"
ID="kvmibm"
ID_LIKE="rhel fedora"
VERSION_ID="1.1.3-rc.3"
PRETTY_NAME="KVM for IBM z Systems 1.1.3-rc.3 (Z)"
ANSI_COLOR="0;34"
CPE_NAME="cpe:/o:ibm:kvmibm:1.1.3-rc.3"
BUILD_ID="20161216"

[root@zs95kf ~]# yum list | grep ceph
ceph.s390x 1:10.2.3-4.el7_2.kvmibm1_1_3.1 @frobisher
ceph-base.s390x 1:10.2.3-4.el7_2.kvmibm1_1_3.1 @frobisher
ceph-common.s390x 1:10.2.3-4.el7_2.kvmibm1_1_3.1 @frobisher
ceph-fuse.s390x 1:10.2.3-4.el7_2.kvmibm1_1_3.1 @frobisher
ceph-mds.s390x 1:10.2.3-4.el7_2.kvmibm1_1_3.1 @frobisher
ceph-mon.s390x 1:10.2.3-4.el7_2.kvmibm1_1_3.1 @frobisher
ceph-osd.s390x 1:10.2.3-4.el7_2.kvmibm1_1_3.1 @frobisher
ceph-selinux.s390x 1:10.2.3-4.el7_2.kvmibm1_1_3.1 @frobisher
libcephfs1.s390x 1:10.2.3-4.el7_2.kvmibm1_1_3.1 @frobisher
python-cephfs.s390x 1:10.2.3-4.el7_2.kvmibm1_1_3.1 @frobisher

[root@zs95kf ~]# cat /etc/ceph/ceph.conf
[global]
fsid = 7e60958b-2adf-4313-ba47-95f2c725d886
mon_initial_members = xcephu27, xcephu29, xcephu31
mon_host = 10.20.92.121,10.20.92.122,10.20.92.123
public_network = 10.20.92.0/24
cluster_network = 10.20.93.0/24
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
[mon]
mon_clock_drift_allowed = .500
[client]
    rbd cache = true
    rbd cache writethrough until flush = true
    admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok
    log file = /var/log/qemu/qemu-guest-$pid.log
    rbd concurrent management ops = 20

[root@zs95kf ~]# cat /etc/nova/nova.conf
...
[libvirt]
disk_cachemodes = block=none
live_migration_tunnelled = True
live_migration_uri = qemu+tcp://stack@%s/system
virt_type = kvm
inject_password = false
inject_key = false
inject_partition = -2
use_usb_tablet = False
cpu_mode = none
images_type = rbd
images_rbd_pool = u14-vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
disk_cachemodes="network=writeback"
rbd_user = cinder
rbd_secret_uuid = <redacted>

[root@zs95kf ~]# virsh dumpxml instance-228
....
    <disk type='network' device='disk'>
      <driver name='qemu' type='raw' cache='writeback'/>
      <auth username='cinder'>
        <secret type='ceph' uuid='053c94bd-d581-426c-9748-b1f11b63fdc6'/>
      </auth>
      <source protocol='rbd' name='u14-volumes/volume-84972b12-fea7-4330-a6e8-00c63d8b1a52'>
        <host name='10.20.92.121' port='6789'/>
        <host name='10.20.92.122' port='6789'/>
        <host name='10.20.92.123' port='6789'/>
      </source>
      <backingStore/>
      <target dev='vda' bus='virtio'/>
      <serial>84972b12-fea7-4330-a6e8-00c63d8b1a52</serial>
      <alias name='virtio-disk0'/>
      <address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0000'/>
    </disk>
....