Comment 1 for bug 1882387

Revision history for this message
John Fulton (jfulton-org) wrote :

ceph health is actually fine

[root@oc0-controller-0 ~]# podman exec ceph-mon-$HOSTNAME ceph -s
  cluster:
    id: a7c1c1e4-5cd6-4f1c-8bc2-a37140ee09a8
    health: HEALTH_WARN
            too few PGs per OSD (8 < min 30)

  services:
    mon: 3 daemons, quorum oc0-controller-2,oc0-controller-0,oc0-controller-1 (age 22h)
    mgr: oc0-controller-2(active, since 22h), standbys: oc0-controller-0, oc0-controller-1
    osd: 12 osds: 12 up (since 22h), 12 in (since 22h)

  data:
    pools: 3 pools, 96 pgs
    objects: 0 objects, 0 B
    usage: 12 GiB used, 588 GiB / 600 GiB avail
    pgs: 96 active+clean

[root@oc0-controller-0 ~]#