Comment 5 for bug 1710773

Revision history for this message
John Fulton (jfulton-org) wrote :

- I reproduced the problem on my machine [1]
- Root cause seems to be that the ceph cluster was in HEALTH_ERR [2]
- I then dynamically changed the pool size from 3 to 1 (which fits a 1 OSD deploy) to get the cluster back to HEALTH_OK [3]
- The problem was then resolved and glance didn't give me the 503 error [4]
- Thus, I think Giulio's patch will fix this [5]

[1]
```
(overcloud) [stack@undercloud ~]$ ./test-glance.sh
Using existing images in /home/stack/cirros_images
503 Service Unavailable: No server is available to handle this request. (HTTP 503)
(overcloud) [stack@undercloud ~]$

```

[2]
```
[root@overcloud-controller-0 ~]# ceph -s
    cluster 4b5c8c0a-ff60-454b-a1b4-9747aa737d19
     health HEALTH_ERR
            288 pgs are stuck inactive for more than 300 seconds
            288 pgs degraded
            288 pgs stuck degraded
            288 pgs stuck inactive
            288 pgs stuck unclean
            288 pgs stuck undersized
            288 pgs undersized
            2 requests are blocked > 32 sec
     monmap e1: 1 mons at {overcloud-controller-0=192.168.24.16:6789/0}
            election epoch 3, quorum 0 overcloud-controller-0
     osdmap e17: 1 osds: 1 up, 1 in
            flags sortbitwise,require_jewel_osds
      pgmap v3073: 288 pgs, 8 pools, 0 bytes data, 0 objects
            20171 MB used, 31016 MB / 51187 MB avail
                 288 undersized+degraded+peered
[root@overcloud-controller-0 ~]#
```
[3]
```
[root@overcloud-controller-0 ~]# ceph osd pool get images size
size: 3
[root@overcloud-controller-0 ~]# for pool in rbd backups images manila_data manila_metadata metrics vms volumes ; do \
> ceph osd pool set $pool size 1; \
> ceph osd pool set $pool min_size 1; \
> done
set pool 0 size to 1
set pool 0 min_size to 1
set pool 1 size to 1
set pool 1 min_size to 1
set pool 2 size to 1
set pool 2 min_size to 1
set pool 3 size to 1
set pool 3 min_size to 1
set pool 4 size to 1
set pool 4 min_size to 1
set pool 5 size to 1
set pool 5 min_size to 1
set pool 6 size to 1
set pool 6 min_size to 1
set pool 7 size to 1
set pool 7 min_size to 1
[root@overcloud-controller-0 ~]# ceph -s
    cluster 4b5c8c0a-ff60-454b-a1b4-9747aa737d19
     health HEALTH_WARN
            1 requests are blocked > 32 sec
     monmap e1: 1 mons at {overcloud-controller-0=192.168.24.16:6789/0}
            election epoch 3, quorum 0 overcloud-controller-0
     osdmap e33: 1 osds: 1 up, 1 in
            flags sortbitwise,require_jewel_osds
      pgmap v9028: 288 pgs, 8 pools, 0 bytes data, 1 objects
            20680 MB used, 30507 MB / 51187 MB avail
                 288 active+clean
[root@overcloud-controller-0 ~]# ceph -s
    cluster 4b5c8c0a-ff60-454b-a1b4-9747aa737d19
     health HEALTH_OK
     monmap e1: 1 mons at {overcloud-controller-0=192.168.24.16:6789/0}
            election epoch 3, quorum 0 overcloud-controller-0
     osdmap e33: 1 osds: 1 up, 1 in
            flags sortbitwise,require_jewel_osds
      pgmap v9029: 288 pgs, 8 pools, 0 bytes data, 1 objects
            20681 MB used, 30506 MB / 51187 MB avail
                 288 active+clean
[root@overcloud-controller-0 ~]#
```

[4]
```
(undercloud) [stack@undercloud ~]$ ./test-glance.sh
Using existing images in /home/stack/cirros_images
(undercloud) [stack@undercloud ~]$
```

[5] https://review.openstack.org/#/c/494176