This issue can be reproduced easily when nova, glance, cinder-ceph pools are created in one ceph cluster.
[hooks/charmhelpers/contrib/storage/linux/ceph.py]
def create_pool(service, name, replicas=3):
"""Create a new RADOS pool."""
if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING)
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 // replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
This issue can be reproduced easily when nova, glance, cinder-ceph pools are created in one ceph cluster.
[hooks/ charmhelpers/ contrib/ storage/ linux/ceph. py] pool(service, name, replicas=3): service, name): .format( name),
level= WARNING)
def create_
"""Create a new RADOS pool."""
if pool_exists(
log("Ceph pool {} already exists, skipping creation"
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 // replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
check_call(cmd)