Comment 23 for bug 1832047

Revision history for this message
Qianshui Jiang (qianshui) wrote : Re: 200.006 alarm "controller-0 is degraded due to the failure of its 'pci-irq-affinity-agent' process" after reboot

We’ve tried to reproduce the bug, however we didn’t get the same phenomenon when using the .iso file which come out in 6.7;
In our test, the ovs and libvirtd pod seems work properly when reboot the controller-0. Here is the log information;

controller-0:~$ openstack hypervisor list
+----+---------------------+-----------------+---------------+-------+
| ID | Hypervisor Hostname | Hypervisor Type | Host IP | State |
+----+---------------------+-----------------+---------------+-------+
| 4 | controller-1 | QEMU | 192.168.206.4 | up |
| 6 | controller-0 | QEMU | 192.168.206.3 | up |
+----+---------------------+-----------------+---------------+-------+

controller-0:/home/wrsroot# kubectl logs -n openstack libvirt-libvirt-default-69mlr
'[' -n '' ']'
+ rm -f /var/run/libvirtd.pid
+ [[ -c /dev/kvm ]]
+ chmod 660 /dev/kvm
+ chown root:kvm /dev/kvm
+ CGROUPS=
+ for CGROUP in cpu rdma hugetlb
+ '[' -d /sys/fs/cgroup/cpu ']'
+ CGROUPS+=cpu,
+ for CGROUP in cpu rdma hugetlb
+ '[' -d /sys/fs/cgroup/rdma ']'
+ for CGROUP in cpu rdma hugetlb
+ '[' -d /sys/fs/cgroup/hugetlb ']'
+ CGROUPS+=hugetlb,
+ cgcreate -g cpu,hugetlb:/osh-libvirt
++ cat /proc/meminfo
++ grep HugePages_Total
++ tr -cd '[:digit:]'
+ hp_count=78225
+ '[' 078225 -gt 0 ']'
+ echo 'INFO: Detected hugepage count of '\''78225'\''. Enabling hugepage settings for libvirt/qemu.'
INFO: Detected hugepage count of '78225'. Enabling hugepage settings for libvirt/qemu.
++ grep KVM_HUGEPAGES=0 /etc/default/qemu-kvm
grep: /etc/default/qemu-kvm: No such file or directory
+ '[' -n '' ']'
+ echo KVM_HUGEPAGES=1
+ '[' '!' -d /dev/hugepages ']'
+ '[' -d /sys/fs/cgroup/hugetlb ']'
++ ls /sys/fs/cgroup/hugetlb/k8s-infra/hugetlb.1GB.limit_in_bytes /sys/fs/cgroup/hugetlb/k8s-infra/hugetlb.2MB.limit_in_bytes
+ limits='/sys/fs/cgroup/hugetlb/k8s-infra/hugetlb.1GB.limit_in_bytes
/sys/fs/cgroup/hugetlb/k8s-infra/hugetlb.2MB.limit_in_bytes'
+ for limit in '$limits'
+++ awk -F: '($2~/hugetlb/){print $3}' /proc/self/cgroup
++ dirname /k8s-infra/kubepods/besteffort/pod23696212-9be3-11e9-b11d-3cfdfed21024/ecce25603440da89fe3b6d5539f051e6b47943ac196f73e9afdc694a1113f38c
++ basename /sys/fs/cgroup/hugetlb/k8s-infra/hugetlb.1GB.limit_in_bytes
+ target=/sys/fs/cgroup/hugetlb//k8s-infra/kubepods/besteffort/pod23696212-9be3-11e9-b11d-3cfdfed21024/hugetlb.1GB.limit_in_bytes
+ '[' '!' -f /sys/fs/cgroup/hugetlb//k8s-infra/kubepods/besteffort/pod23696212-9be3-11e9-b11d-3cfdfed21024/hugetlb.1GB.limit_in_bytes ']'
++ cat /sys/fs/cgroup/hugetlb/k8s-infra/hugetlb.1GB.limit_in_bytes
+ echo 9223372036854771712
+ for limit in '$limits'
+++ awk -F: '($2~/hugetlb/){print $3}' /proc/self/cgroup
++ dirname /k8s-infra/kubepods/besteffort/pod23696212-9be3-11e9-b11d-3cfdfed21024/ecce25603440da89fe3b6d5539f051e6b47943ac196f73e9afdc694a1113f38c
++ basename /sys/fs/cgroup/hugetlb/k8s-infra/hugetlb.2MB.limit_in_bytes
+ target=/sys/fs/cgroup/hugetlb//k8s-infra/kubepods/besteffort/pod23696212-9be3-11e9-b11d-3cfdfed21024/hugetlb.2MB.limit_in_bytes
+ '[' '!' -f /sys/fs/cgroup/hugetlb//k8s-infra/kubepods/besteffort/pod23696212-9be3-11e9-b11d-3cfdfed21024/hugetlb.2MB.limit_in_bytes ']'
++ cat /sys/fs/cgroup/hugetlb/k8s-infra/hugetlb.2MB.limit_in_bytes
+ echo 9223372036854771712
++ cat /proc/meminfo
++ grep Hugepagesize
++ tr -cd '[:digit:]'
+ default_hp_kb=2048
++ cat /sys/kernel/mm/hugepages/hugepages-2048kB/free_hugepages
++ tr -cd '[:digit:]'
+ num_free_pages=78225
+ echo 'INFO: '\''78225'\'' free hugepages of size 2048kB'
+ '[' 078225 -gt 0 ']'
INFO: '78225' free hugepages of size 2048kB
+ fallocate -o0 -l 2048 /dev/hugepages/foo
+ rm /dev/hugepages/foo
+ '[' -n '' ']'
+ exec cgexec -g cpu,hugetlb:/osh-libvirt systemd-run --scope --slice=system libvirtd --listen
Running scope as unit run-715348.scope.