Ceph was deployed using the yaml file using juju. Below is the content related to Ceph on the yaml file. # CEPH configuration osd-devices: &osd-devices >- /dev/disk/by-dname/bcache0 /dev/disk/by-dname/bcache1 /dev/disk/by-dname/bcache2 /dev/disk/by-dname/bcache3 /dev/disk/by-dname/bcache4 /dev/disk/by-dname/bcache5 /dev/disk/by-dname/bcache6 /dev/disk/by-dname/bcache7 /dev/disk/by-dname/bcache8 /dev/disk/by-dname/bcache9 /dev/disk/by-dname/bcache10 /dev/disk/by-dname/bcache11 osd-config-flags: &osd-config-flags > { osd: { # enable discard as bluestore has to manage it # instead of os doing it on a file system # see https://github.com/ceph/ceph/pull/14727 bdev_enable_discard: true, bdev_async_discard: true } } customize-failure-domain: &customize-failure-domain False # Expected OSD count is total number of OSD disks that will be part of Ceph cluster. # Never set this number higher or much lower than the real number. 10-20% less than # actual number is acceptable #expected-osd-count: &expected-osd-count 450 expected-osd-count: &expected-osd-count 192 expected-mon-count: &expected-mon-count 3 # CEPH access network ceph-public-space: &ceph-public-space ceph-access-space # CEPH replication network ceph-cluster-space: &ceph-cluster-space ceph-cluster-space overlay-space: &overlay-space overlay-space # Workaround for 'only one default binding supported' oam-space-constr: &oam-space-constr spaces=oam-space ceph-access-constr: &ceph-access-constr spaces=ceph-access-space combi-access-constr: &combi-access-constr spaces=ceph-access-space,oam-space cinder: charm: cs:cinder-300 num_units: 3 constraints: *combi-access-constr bindings: "": *oam-space public: *public-space admin: *admin-space internal: *internal-space shared-db: *internal-space ceph: *ceph-public-space options: worker-multiplier: *worker-multiplier openstack-origin: *openstack-origin block-device: None glance-api-version: 2 vip: *cinder-vip use-internal-endpoints: True region: *openstack-region config-flags: 'default_volume_type=cinder-ceph-ssd' to: - lxd:101 - lxd:102 - lxd:103 cinder-ceph: charm: cs:cinder-ceph-253 options: restrict-ceph-pools: False cinder-ceph-ssd: charm: cs:cinder-ceph-253 options: restrict-ceph-pools: False cinder-ceph-nvme: charm: cs:cinder-ceph-253 options: restrict-ceph-pools: False ceph-mon: charm: cs:ceph-mon-45 num_units: 3 bindings: "": *oam-space public: *ceph-public-space osd: *ceph-public-space client: *ceph-public-space admin: *ceph-public-space #cluster: *ceph-cluster-space options: expected-osd-count: *expected-osd-count source: *openstack-origin monitor-count: *expected-mon-count customize-failure-domain: *customize-failure-domain to: - 201 - 202 - 203 ceph-radosgw: charm: cs:ceph-radosgw-285 num_units: 4 constraints: *combi-access-constr bindings: "": *oam-space public: *public-space admin: *admin-space internal: *internal-space mon: *ceph-public-space options: source: *openstack-origin vip: *rados-gateway-vip region: *openstack-region operator-roles: "Member,admin" # Contrail requires admin and not Admin to: - 211 - 212 - 213 - 214 ceph-osd: charm: cs:ceph-osd-298 num_units: 16 bindings: "": *oam-space public: *ceph-public-space cluster: *ceph-cluster-space secrets-storage: *internal-space mon: *ceph-public-space options: osd-devices: *osd-devices source: *openstack-origin customize-failure-domain: *customize-failure-domain autotune: true aa-profile-mode: complain bluestore: true config-flags: *osd-config-flags sysctl: '{ kernel.pid_max : 2097152, vm.max_map_count : 524288, kernel.threads-max: 2097152, fs.aio-max-nr: 1048576 }' #osd-encrypt: True #osd-encrypt-keymanager: vault to: - 2001 - 2002 - 2003 - 2004 - 2005 - 2006 - 2007 - 2008 - 2009 - 2010 - 2011 - 2012 - 2013 - 2014 - 2015 - 2016 Hope this helps.