Could you give more information on why this is necessary?
I'm using a z/KVM compute node now with a ceph backend and I'm able to create instances that both boot from volume and also boot from image.
I can give more information on my configuration and environment if needed.
[root@zs95kf ~]# cat /etc/os-release
NAME="KVM for IBM z Systems"
VERSION="1.1.3-rc.3 (Z)"
ID="kvmibm"
ID_LIKE="rhel fedora"
VERSION_ID="1.1.3-rc.3"
PRETTY_NAME="KVM for IBM z Systems 1.1.3-rc.3 (Z)"
ANSI_COLOR="0;34"
CPE_NAME="cpe:/o:ibm:kvmibm:1.1.3-rc.3"
BUILD_ID="20161216"
Could you give more information on why this is necessary?
I'm using a z/KVM compute node now with a ceph backend and I'm able to create instances that both boot from volume and also boot from image.
I can give more information on my configuration and environment if needed.
[root@zs95kf ~]# cat /etc/os-release ID="1.1. 3-rc.3" "cpe:/o: ibm:kvmibm: 1.1.3-rc. 3"
NAME="KVM for IBM z Systems"
VERSION="1.1.3-rc.3 (Z)"
ID="kvmibm"
ID_LIKE="rhel fedora"
VERSION_
PRETTY_NAME="KVM for IBM z Systems 1.1.3-rc.3 (Z)"
ANSI_COLOR="0;34"
CPE_NAME=
BUILD_ID="20161216"
[root@zs95kf ~]# yum list | grep ceph 3-4.el7_ 2.kvmibm1_ 1_3.1 @frobisher 3-4.el7_ 2.kvmibm1_ 1_3.1 @frobisher 3-4.el7_ 2.kvmibm1_ 1_3.1 @frobisher 3-4.el7_ 2.kvmibm1_ 1_3.1 @frobisher 3-4.el7_ 2.kvmibm1_ 1_3.1 @frobisher 3-4.el7_ 2.kvmibm1_ 1_3.1 @frobisher 3-4.el7_ 2.kvmibm1_ 1_3.1 @frobisher 3-4.el7_ 2.kvmibm1_ 1_3.1 @frobisher 3-4.el7_ 2.kvmibm1_ 1_3.1 @frobisher 3-4.el7_ 2.kvmibm1_ 1_3.1 @frobisher
ceph.s390x 1:10.2.
ceph-base.s390x 1:10.2.
ceph-common.s390x 1:10.2.
ceph-fuse.s390x 1:10.2.
ceph-mds.s390x 1:10.2.
ceph-mon.s390x 1:10.2.
ceph-osd.s390x 1:10.2.
ceph-selinux.s390x 1:10.2.
libcephfs1.s390x 1:10.2.
python-cephfs.s390x 1:10.2.
[root@zs95kf ~]# cat /etc/ceph/ceph.conf 2adf-4313- ba47-95f2c725d8 86 121,10. 20.92.122, 10.20.92. 123 required = cephx required = cephx required = cephx drift_allowed = .500 ceph/guests/ $cluster- $type.$ id.$pid. $cctid. asok qemu/qemu- guest-$ pid.log
[global]
fsid = 7e60958b-
mon_initial_members = xcephu27, xcephu29, xcephu31
mon_host = 10.20.92.
public_network = 10.20.92.0/24
cluster_network = 10.20.93.0/24
auth_cluster_
auth_service_
auth_client_
[mon]
mon_clock_
[client]
rbd cache = true
rbd cache writethrough until flush = true
admin socket = /var/run/
log file = /var/log/
rbd concurrent management ops = 20
[root@zs95kf ~]# cat /etc/nova/nova.conf tunnelled = True //stack@ %s/system rbd_ceph_ conf = /etc/ceph/ceph.conf ="network= writeback"
...
[libvirt]
disk_cachemodes = block=none
live_migration_
live_migration_uri = qemu+tcp:
virt_type = kvm
inject_password = false
inject_key = false
inject_partition = -2
use_usb_tablet = False
cpu_mode = none
images_type = rbd
images_rbd_pool = u14-vms
images_
disk_cachemodes
rbd_user = cinder
rbd_secret_uuid = <redacted>
[root@zs95kf ~]# virsh dumpxml instance-228 d581-426c- 9748-b1f11b63fd c6'/> volumes/ volume- 84972b12- fea7-4330- a6e8-00c63d8b1a 52'> backingStore/ > serial> 84972b12- fea7-4330- a6e8-00c63d8b1a 52</serial> disk0'/ >
....
<disk type='network' device='disk'>
<driver name='qemu' type='raw' cache='writeback'/>
<auth username='cinder'>
<secret type='ceph' uuid='053c94bd-
</auth>
<source protocol='rbd' name='u14-
<host name='10.20.92.121' port='6789'/>
<host name='10.20.92.122' port='6789'/>
<host name='10.20.92.123' port='6789'/>
</source>
<
<target dev='vda' bus='virtio'/>
<
<alias name='virtio-
<address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0000'/>
</disk>
....