Glance no more multiple ceph clusters since commit

Bug #1946743 reported by Florian Engelmann
6
This bug affects 1 person
Affects Status Importance Assigned to Milestone
kolla-ansible
New
Undecided
Unassigned

Bug Description

Hi,

we do use glance multistore feature to store images in multiple availability zones. Since the following commit we are not more able to copy multiple ceph.conf (eg. ceph.az1.conf, ceph.az2.conf and ceph.az3.conf) to the glance container:

https://opendev.org/openstack/kolla-ansible/commit/1f929336e363f1c3d168bad569460401e122de8f

Before an asterisk was used to copy all ceph.* files which was fine for us.

How to handle multiple ceph clusters after this commit?

Revision history for this message
Florian Engelmann (engelmann) wrote (last edit ):

Example glance confoiguration using multiple ceph clusters as stores:

[DEFAULT]
debug = False
log_file = /var/log/kolla/glance/glance-api.log
use_forwarded_for = true
bind_host = yyyyyyyyyyyyyyyy
bind_port = 9292
workers = 5
registry_host = glance.yyyyyyyyyyyyyyyyyyyyyyy
show_multiple_locations = True
cinder_catalog_info = volume:cinder:internalURL
transport_url = rabbit://yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
image_size_cap = 1099511627776
show_image_direct_url = True
enabled_backends = az1:rbd, az2:rbd, az3:rbd
worker_self_reference_url = http://yyyyyyyyyyyyyyyyyyyyyyyyyy:9292

[database]
connection = mysql+pymysql://glance:yyyyyyyyyyyyyyyyyyyyyy:6033/glance
max_retries = -1

[keystone_authtoken]
www_authenticate_uri = http://keystone.yyyyyyyyyyyyyyyyy:5000
auth_url = http://keystone-admin.yyyyyyyyyyyyyyy:35357
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
memcache_security_strategy = ENCRYPT
memcache_secret_key = yyyyyyyyyyyyyyyyyyyy
memcached_servers = xxxxxxxxxx
[paste_deploy]
flavor = keystone

[glance_store]
default_backend = az1

[oslo_middleware]
enable_proxy_headers_parsing = True

[oslo_messaging_notifications]
transport_url = rabbit://yyyyyyyyyyyyyyyyyyy
driver = messagingv2
topics = notifications

[oslo_policy]
policy_file = policy.yaml

[profiler]
enabled = true
trace_sqlalchemy = true
hmac_keys = yyyyyyyyyyyyyyyyyy
connection_string = elasticsearch://elasticsearch.xxxxxxxxxxxxxxx:9200

[image_format]

[taskflow_executor]
conversion_format = raw

[os_glance_staging_store]
filesystem_store_datadir = /var/lib/glance/os_glance_staging_store

[os_glance_tasks_store]
filesystem_store_datadir = /var/lib/glance/os_glance_tasks_store

[az1]
store_description = AZ1 Image Store
rbd_store_pool = az1glance
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.az1.conf
rbd_store_chunk_size = 8

[az2]
store_description = AZ2 Image Store
rbd_store_pool = az2glance
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.az2.conf
rbd_store_chunk_size = 8

[az3]
store_description = AZ3 Image Store
rbd_store_pool = az3glance
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.az3.conf
rbd_store_chunk_size = 8

[cors]
allowed_origin = https://xxxxxxxxxxxxxxxxxxxxxxxx

To post a comment you must log in.
This report contains Public information  
Everyone can see this information.

Other bug subscribers

Remote bug watches

Bug watches keep track of this bug in other bug trackers.