2020-03-11 01:16:18 |
norman shen |
description |
Currently, seems cinder will always rename rbd image after volume migration. https://github.com/openstack/cinder/blob/c29285e7dcda7f5300c5a3375294dfa4aeb56430/cinder/volume/drivers/rbd.py#L1774 as long as it could do so.
But I think it brought up problem when i try to retype an in-used volume. After retyping succeeds, cinder volume will rename it back to previous name, this works fine until I shutdown and start vm. since block device mapping always remembers previous image name, it will try to connect to a non-existent volume which is problematic and causes boot failed. |
Currently, seems cinder will always rename rbd image after volume migration. https://github.com/openstack/cinder/blob/c29285e7dcda7f5300c5a3375294dfa4aeb56430/cinder/volume/drivers/rbd.py#L1774 as long as it could do so.
But I think it brought up problem when i try to retype an in-used volume. After retyping succeeds, cinder volume will rename it back to previous name, this works fine until I shutdown and start vm. since block device mapping always remembers previous image name, it will try to connect to a non-existent volume which is problematic and causes boot failed.
Cinder version:
stable/rocky on ubuntu 16.04. the reported issue seems still exists in mainline.
Cinder configuration:
[ceph]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = 7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a
rbd_user = cinder
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
[test]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = 7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a
rbd_user = cinder
rbd_pool = test
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = test
during retyping
root@sjtt-test:~# openstack volume list
+--------------------------------------+------+-----------+------+-----------------------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+------+-----------+------+-----------------------------+
| fbb17466-46d6-4a8d-bd74-44bb259d6a4c | v3 | retyping | 1 | Attached to s1 on /dev/vdb |
| 49f43fe8-b514-4964-a153-850d7030f7b9 | v3 | attaching | 1 | |
| 1d55de9f-18d1-4450-bb6e-c9bba925408e | v2 | available | 1 | |
| 1dca9c63-6b3c-4b5d-8805-7bf2c01ab163 | v1 | in-use | 1 | Attached to s1 on /dev/vda |
+--------------------------------------+------+-----------+------+-----------------------------+
after retype
root@sjtt-test:/etc/cinder# rbd ls test
volume-fbb17466-46d6-4a8d-bd74-44bb259d6a4c
mysql> select * from block_device_mapping where deleted = 0 and device_name='/dev/vdb' \G
*************************** 1. row ***************************
created_at: 2020-03-11 01:10:17
updated_at: 2020-03-11 01:11:38
deleted_at: NULL
id: 78
device_name: /dev/vdb
delete_on_termination: 0
snapshot_id: NULL
volume_id: fbb17466-46d6-4a8d-bd74-44bb259d6a4c
volume_size: 1
no_device: 0
connection_info: {"status": "reserved", "detached_at": "", "volume_id": "49f43fe8-b514-4964-a153-850d7030f7b9", "attach_mode": "null", "driver_volume_type": "rbd", "instance": "ca9b9311-17d2-4dbc-9054-a26296424543", "attached_at": "", "serial": "fbb17466-46d6-4a8d-bd74-44bb259d6a4c", "data": {"secret_type": "ceph", "name": "test/volume-49f43fe8-b514-4964-a153-850d7030f7b9", "encrypted": false, "discard": true, "keyring": null, "cluster_name": "ceph", "secret_uuid": "7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a", "qos_specs": null, "auth_enabled": true, "volume_id": "49f43fe8-b514-4964-a153-850d7030f7b9", "hosts": ["10.110.31.164"], "access_mode": "rw", "auth_username": "cinder", "ports": ["6789"]}}
instance_uuid: ca9b9311-17d2-4dbc-9054-a26296424543
deleted: 0
source_type: volume
destination_type: volume
guest_format: NULL
device_type: NULL
disk_bus: NULL
boot_index: NULL
image_id: NULL
tag: NULL
attachment_id: 340eee36-d01e-48ab-ba9c-af72c9de6b5c
uuid: 8ad634a8-5437-4855-b8e4-984dcd92e449
1 row in set (0.00 sec)
apparently bdm does not change to new volume name. |
|
2020-03-11 01:16:51 |
norman shen |
description |
Currently, seems cinder will always rename rbd image after volume migration. https://github.com/openstack/cinder/blob/c29285e7dcda7f5300c5a3375294dfa4aeb56430/cinder/volume/drivers/rbd.py#L1774 as long as it could do so.
But I think it brought up problem when i try to retype an in-used volume. After retyping succeeds, cinder volume will rename it back to previous name, this works fine until I shutdown and start vm. since block device mapping always remembers previous image name, it will try to connect to a non-existent volume which is problematic and causes boot failed.
Cinder version:
stable/rocky on ubuntu 16.04. the reported issue seems still exists in mainline.
Cinder configuration:
[ceph]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = 7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a
rbd_user = cinder
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
[test]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = 7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a
rbd_user = cinder
rbd_pool = test
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = test
during retyping
root@sjtt-test:~# openstack volume list
+--------------------------------------+------+-----------+------+-----------------------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+------+-----------+------+-----------------------------+
| fbb17466-46d6-4a8d-bd74-44bb259d6a4c | v3 | retyping | 1 | Attached to s1 on /dev/vdb |
| 49f43fe8-b514-4964-a153-850d7030f7b9 | v3 | attaching | 1 | |
| 1d55de9f-18d1-4450-bb6e-c9bba925408e | v2 | available | 1 | |
| 1dca9c63-6b3c-4b5d-8805-7bf2c01ab163 | v1 | in-use | 1 | Attached to s1 on /dev/vda |
+--------------------------------------+------+-----------+------+-----------------------------+
after retype
root@sjtt-test:/etc/cinder# rbd ls test
volume-fbb17466-46d6-4a8d-bd74-44bb259d6a4c
mysql> select * from block_device_mapping where deleted = 0 and device_name='/dev/vdb' \G
*************************** 1. row ***************************
created_at: 2020-03-11 01:10:17
updated_at: 2020-03-11 01:11:38
deleted_at: NULL
id: 78
device_name: /dev/vdb
delete_on_termination: 0
snapshot_id: NULL
volume_id: fbb17466-46d6-4a8d-bd74-44bb259d6a4c
volume_size: 1
no_device: 0
connection_info: {"status": "reserved", "detached_at": "", "volume_id": "49f43fe8-b514-4964-a153-850d7030f7b9", "attach_mode": "null", "driver_volume_type": "rbd", "instance": "ca9b9311-17d2-4dbc-9054-a26296424543", "attached_at": "", "serial": "fbb17466-46d6-4a8d-bd74-44bb259d6a4c", "data": {"secret_type": "ceph", "name": "test/volume-49f43fe8-b514-4964-a153-850d7030f7b9", "encrypted": false, "discard": true, "keyring": null, "cluster_name": "ceph", "secret_uuid": "7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a", "qos_specs": null, "auth_enabled": true, "volume_id": "49f43fe8-b514-4964-a153-850d7030f7b9", "hosts": ["10.110.31.164"], "access_mode": "rw", "auth_username": "cinder", "ports": ["6789"]}}
instance_uuid: ca9b9311-17d2-4dbc-9054-a26296424543
deleted: 0
source_type: volume
destination_type: volume
guest_format: NULL
device_type: NULL
disk_bus: NULL
boot_index: NULL
image_id: NULL
tag: NULL
attachment_id: 340eee36-d01e-48ab-ba9c-af72c9de6b5c
uuid: 8ad634a8-5437-4855-b8e4-984dcd92e449
1 row in set (0.00 sec)
apparently bdm does not change to new volume name. |
Currently, seems cinder will always rename rbd image after volume migration. https://github.com/openstack/cinder/blob/c29285e7dcda7f5300c5a3375294dfa4aeb56430/cinder/volume/drivers/rbd.py#L1774 as long as it could do so.
But I think it brought up problem when i try to retype an in-used volume. After retyping succeeds, cinder volume will rename it back to previous name, this works fine until I shutdown and start vm. since block device mapping always remembers previous image name, it will try to connect to a non-existent volume which is problematic and causes boot failed.
Cinder version:
stable/rocky on ubuntu 16.04. the reported issue seems still exists in mainline.
Cinder configuration:
[ceph]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = 7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a
rbd_user = cinder
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
[test]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = 7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a
rbd_user = cinder
rbd_pool = test
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = test
during retyping
root@sjtt-test:~# openstack volume list
+--------------------------------------+------+-----------+------+-----------------------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+------+-----------+------+-----------------------------+
| fbb17466-46d6-4a8d-bd74-44bb259d6a4c | v3 | retyping | 1 | Attached to s1 on /dev/vdb |
| 49f43fe8-b514-4964-a153-850d7030f7b9 | v3 | attaching | 1 | |--------------+
after retype
root@sjtt-test:~# openstack volume list
+--------------------------------------+------+-----------+------+-----------------------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+------+-----------+------+-----------------------------+
| fbb17466-46d6-4a8d-bd74-44bb259d6a4c | v3 | in-use | 1 | Attached to s1 on /dev/vdb |
root@sjtt-test:/etc/cinder# rbd ls test
volume-fbb17466-46d6-4a8d-bd74-44bb259d6a4c
mysql> select * from block_device_mapping where deleted = 0 and device_name='/dev/vdb' \G
*************************** 1. row ***************************
created_at: 2020-03-11 01:10:17
updated_at: 2020-03-11 01:11:38
deleted_at: NULL
id: 78
device_name: /dev/vdb
delete_on_termination: 0
snapshot_id: NULL
volume_id: fbb17466-46d6-4a8d-bd74-44bb259d6a4c
volume_size: 1
no_device: 0
connection_info: {"status": "reserved", "detached_at": "", "volume_id": "49f43fe8-b514-4964-a153-850d7030f7b9", "attach_mode": "null", "driver_volume_type": "rbd", "instance": "ca9b9311-17d2-4dbc-9054-a26296424543", "attached_at": "", "serial": "fbb17466-46d6-4a8d-bd74-44bb259d6a4c", "data": {"secret_type": "ceph", "name": "test/volume-49f43fe8-b514-4964-a153-850d7030f7b9", "encrypted": false, "discard": true, "keyring": null, "cluster_name": "ceph", "secret_uuid": "7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a", "qos_specs": null, "auth_enabled": true, "volume_id": "49f43fe8-b514-4964-a153-850d7030f7b9", "hosts": ["10.110.31.164"], "access_mode": "rw", "auth_username": "cinder", "ports": ["6789"]}}
instance_uuid: ca9b9311-17d2-4dbc-9054-a26296424543
deleted: 0
source_type: volume
destination_type: volume
guest_format: NULL
device_type: NULL
disk_bus: NULL
boot_index: NULL
image_id: NULL
tag: NULL
attachment_id: 340eee36-d01e-48ab-ba9c-af72c9de6b5c
uuid: 8ad634a8-5437-4855-b8e4-984dcd92e449
1 row in set (0.00 sec)
apparently bdm does not change to new volume name. |
|
2020-03-11 01:19:03 |
norman shen |
description |
Currently, seems cinder will always rename rbd image after volume migration. https://github.com/openstack/cinder/blob/c29285e7dcda7f5300c5a3375294dfa4aeb56430/cinder/volume/drivers/rbd.py#L1774 as long as it could do so.
But I think it brought up problem when i try to retype an in-used volume. After retyping succeeds, cinder volume will rename it back to previous name, this works fine until I shutdown and start vm. since block device mapping always remembers previous image name, it will try to connect to a non-existent volume which is problematic and causes boot failed.
Cinder version:
stable/rocky on ubuntu 16.04. the reported issue seems still exists in mainline.
Cinder configuration:
[ceph]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = 7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a
rbd_user = cinder
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
[test]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = 7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a
rbd_user = cinder
rbd_pool = test
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = test
during retyping
root@sjtt-test:~# openstack volume list
+--------------------------------------+------+-----------+------+-----------------------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+------+-----------+------+-----------------------------+
| fbb17466-46d6-4a8d-bd74-44bb259d6a4c | v3 | retyping | 1 | Attached to s1 on /dev/vdb |
| 49f43fe8-b514-4964-a153-850d7030f7b9 | v3 | attaching | 1 | |--------------+
after retype
root@sjtt-test:~# openstack volume list
+--------------------------------------+------+-----------+------+-----------------------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+------+-----------+------+-----------------------------+
| fbb17466-46d6-4a8d-bd74-44bb259d6a4c | v3 | in-use | 1 | Attached to s1 on /dev/vdb |
root@sjtt-test:/etc/cinder# rbd ls test
volume-fbb17466-46d6-4a8d-bd74-44bb259d6a4c
mysql> select * from block_device_mapping where deleted = 0 and device_name='/dev/vdb' \G
*************************** 1. row ***************************
created_at: 2020-03-11 01:10:17
updated_at: 2020-03-11 01:11:38
deleted_at: NULL
id: 78
device_name: /dev/vdb
delete_on_termination: 0
snapshot_id: NULL
volume_id: fbb17466-46d6-4a8d-bd74-44bb259d6a4c
volume_size: 1
no_device: 0
connection_info: {"status": "reserved", "detached_at": "", "volume_id": "49f43fe8-b514-4964-a153-850d7030f7b9", "attach_mode": "null", "driver_volume_type": "rbd", "instance": "ca9b9311-17d2-4dbc-9054-a26296424543", "attached_at": "", "serial": "fbb17466-46d6-4a8d-bd74-44bb259d6a4c", "data": {"secret_type": "ceph", "name": "test/volume-49f43fe8-b514-4964-a153-850d7030f7b9", "encrypted": false, "discard": true, "keyring": null, "cluster_name": "ceph", "secret_uuid": "7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a", "qos_specs": null, "auth_enabled": true, "volume_id": "49f43fe8-b514-4964-a153-850d7030f7b9", "hosts": ["10.110.31.164"], "access_mode": "rw", "auth_username": "cinder", "ports": ["6789"]}}
instance_uuid: ca9b9311-17d2-4dbc-9054-a26296424543
deleted: 0
source_type: volume
destination_type: volume
guest_format: NULL
device_type: NULL
disk_bus: NULL
boot_index: NULL
image_id: NULL
tag: NULL
attachment_id: 340eee36-d01e-48ab-ba9c-af72c9de6b5c
uuid: 8ad634a8-5437-4855-b8e4-984dcd92e449
1 row in set (0.00 sec)
apparently bdm does not change to new volume name. |
Currently, seems cinder will always rename rbd image after volume migration. https://github.com/openstack/cinder/blob/c29285e7dcda7f5300c5a3375294dfa4aeb56430/cinder/volume/drivers/rbd.py#L1774 as long as it could do so.
But I think it brought up problem when i try to retype an in-used volume. After retyping succeeds, cinder volume will rename it back to previous name, this works fine until I shutdown and start vm. since block device mapping always remembers previous image name, it will try to connect to a non-existent volume which is problematic and causes boot failed.
Cinder version:
stable/rocky on ubuntu 16.04. the reported issue seems still exists in mainline.
Cinder configuration:
[ceph]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = 7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a
rbd_user = cinder
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
[test]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = 7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a
rbd_user = cinder
rbd_pool = test
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = test
during retyping
root@sjtt-test:~# openstack volume list
+--------------------------------------+------+-----------+------+-----------------------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+------+-----------+------+-----------------------------+
| fbb17466-46d6-4a8d-bd74-44bb259d6a4c | v3 | retyping | 1 | Attached to s1 on /dev/vdb |
| 49f43fe8-b514-4964-a153-850d7030f7b9 | v3 | attaching | 1 | |--------------+
after retype
root@sjtt-test:~# openstack volume list
+--------------------------------------+------+-----------+------+-----------------------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+------+-----------+------+-----------------------------+
| fbb17466-46d6-4a8d-bd74-44bb259d6a4c | v3 | in-use | 1 | Attached to s1 on /dev/vdb |
root@sjtt-test:/etc/cinder# rbd ls test
volume-fbb17466-46d6-4a8d-bd74-44bb259d6a4c
mysql> select * from block_device_mapping where deleted = 0 and device_name='/dev/vdb' \G
*************************** 1. row ***************************
created_at: 2020-03-11 01:10:17
updated_at: 2020-03-11 01:11:38
deleted_at: NULL
id: 78
device_name: /dev/vdb
delete_on_termination: 0
snapshot_id: NULL
volume_id: fbb17466-46d6-4a8d-bd74-44bb259d6a4c
volume_size: 1
no_device: 0
connection_info: {"status": "reserved", "detached_at": "", "volume_id": "49f43fe8-b514-4964-a153-850d7030f7b9", "attach_mode": "null", "driver_volume_type": "rbd", "instance": "ca9b9311-17d2-4dbc-9054-a26296424543", "attached_at": "", "serial": "fbb17466-46d6-4a8d-bd74-44bb259d6a4c", "data": {"secret_type": "ceph", "name": "test/volume-49f43fe8-b514-4964-a153-850d7030f7b9", "encrypted": false, "discard": true, "keyring": null, "cluster_name": "ceph", "secret_uuid": "7de9ebf7-d0b6-4b08-97df-1a0eb3721c5a", "qos_specs": null, "auth_enabled": true, "volume_id": "49f43fe8-b514-4964-a153-850d7030f7b9", "hosts": ["10.110.31.164"], "access_mode": "rw", "auth_username": "cinder", "ports": ["6789"]}}
instance_uuid: ca9b9311-17d2-4dbc-9054-a26296424543
deleted: 0
source_type: volume
destination_type: volume
guest_format: NULL
device_type: NULL
disk_bus: NULL
boot_index: NULL
image_id: NULL
tag: NULL
attachment_id: 340eee36-d01e-48ab-ba9c-af72c9de6b5c
uuid: 8ad634a8-5437-4855-b8e4-984dcd92e449
1 row in set (0.00 sec)
apparently bdm does not change to new volume name.
and thus, shutdown and boot a vm failed with follow exception
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server if ret == -1: raise libvirtError ('virDomainCreateWithFlags() failed', dom=self)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server libvirtError: internal error: process exited while connecting to monitor: 2020-03-11T01:17:28.423223Z qemu-system-x86_64: -drive file=rbd:test/volume-49f43fe8-b514-4964-a153-850d7030f7b9:id=cinder:auth_supported=cephx\;none:mon_host=10.110.31.164\:6789,file.password-secret=virtio-disk1-secret0,format=raw,if=none,id=drive-virtio-disk1,serial=fbb17466-46d6-4a8d-bd74-44bb259d6a4c,cache=writeback,discard=unmap: 'serial' is deprecated, please use the corresponding option of '-device' instead
...skipping...
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/opt/stack/nova/nova/compute/manager.py", line 206, in decorated_function
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server return function(self, context, *args, **kwargs)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/opt/stack/nova/nova/compute/manager.py", line 2908, in start_instance
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server self._power_on(context, instance)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/opt/stack/nova/nova/compute/manager.py", line 2878, in _power_on
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server block_device_info)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/opt/stack/nova/nova/virt/libvirt/driver.py", line 3199, in power_on
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server self._hard_reboot(context, instance, network_info, block_device_info)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/opt/stack/nova/nova/virt/libvirt/driver.py", line 3089, in _hard_reboot
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server vifs_already_plugged=True)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/opt/stack/nova/nova/virt/libvirt/driver.py", line 5948, in _create_domain_and_network
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server destroy_disks_on_failure)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/usr/local/lib/python2.7/dist-packages/oslo_utils/excutils.py", line 220, in __exit__
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server self.force_reraise()
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/usr/local/lib/python2.7/dist-packages/oslo_utils/excutils.py", line 196, in force_reraise
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server six.reraise(self.type_, self.value, self.tb)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/opt/stack/nova/nova/virt/libvirt/driver.py", line 5917, in _create_domain_and_network
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server post_xml_callback=post_xml_callback)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/opt/stack/nova/nova/virt/libvirt/driver.py", line 5851, in _create_domain
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server guest.launch(pause=pause)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/opt/stack/nova/nova/virt/libvirt/guest.py", line 144, in launch
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server self._encoded_xml, errors='ignore')
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/usr/local/lib/python2.7/dist-packages/oslo_utils/excutils.py", line 220, in __exit__
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server self.force_reraise()
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/usr/local/lib/python2.7/dist-packages/oslo_utils/excutils.py", line 196, in force_reraise
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server six.reraise(self.type_, self.value, self.tb)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/opt/stack/nova/nova/virt/libvirt/guest.py", line 139, in launch
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server return self._domain.createWithFlags(flags)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/usr/local/lib/python2.7/dist-packages/eventlet/tpool.py", line 186, in doit
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server result = proxy_call(self._autowrap, f, *args, **kwargs)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/usr/local/lib/python2.7/dist-packages/eventlet/tpool.py", line 144, in proxy_call
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server rv = execute(f, *args, **kwargs)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/usr/local/lib/python2.7/dist-packages/eventlet/tpool.py", line 125, in execute
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server six.reraise(c, e, tb)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/usr/local/lib/python2.7/dist-packages/eventlet/tpool.py", line 83, in tworker
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server rv = meth(*args, **kwargs)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server File "/usr/local/lib/python2.7/dist-packages/libvirt.py", line 1098, in createWithFlags
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server if ret == -1: raise libvirtError ('virDomainCreateWithFlags() failed', dom=self)
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server libvirtError: internal error: process exited while connecting to monitor: 2020-03-11T01:17:28.423223Z qemu-system-x86_64: -drive file=rbd:test/volume-49f43fe8-b514-4964-a153-850d7030f7b9:id=cinder:auth_supported=cephx\;none:mon_host=10.110.31.164\:6789,file.password-secret=virtio-disk1-secret0,format=raw,if=none,id=drive-virtio-disk1,serial=fbb17466-46d6-4a8d-bd74-44bb259d6a4c,cache=writeback,discard=unmap: 'serial' is deprecated, please use the corresponding option of '-device' instead
Mar 11 09:17:29 sjtt-test nova-compute[31220]: ERROR oslo_messaging.rpc.server 2020-03-11T01:17:28.444643Z qemu-system-x86_64: -drive file=rbd:test/volume-49f43fe8-b514-4964-a153-850d7030f7b9:id=cinder:auth_supported=cephx\;none:mon_host=10.110.31.164\:6789,file.password-secret=virtio-disk1-secret0,format=raw,if=none,id=drive-virtio-disk1,serial=fbb17466-46d6-4a8d-bd74-44bb259d6a4c,cache=writeback,discard=unmap: error reading header from volume-49f43fe8-b514-4964-a153-850d7030f7b9: No such file or directory |
|