Comment 4 for bug 1952363

Revision history for this message
Andre Ruiz (andre-ruiz) wrote (last edit ):

This is a generic issue and not specifically related to a deployment I'm doing now. I could just reproduce it on LXD with a very simple bundle (just keystone + database + hacluster).

This is the bundle I used: https://pastebin.canonical.com/p/HTy3csYwjN/

This is before the change:

root@juju-88a011-3:~# ip addr list
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
157: eth0@if158: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 00:16:3e:d2:78:07 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 198.18.2.188/24 brd 198.18.2.255 scope global dynamic eth0
       valid_lft 2148sec preferred_lft 2148sec
    inet 198.18.2.49/24 brd 198.18.2.255 scope global secondary eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::216:3eff:fed2:7807/64 scope link
       valid_lft forever preferred_lft forever
159: eth1@if160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 00:16:3e:65:6f:ab brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet6 fe80::216:3eff:fe65:6fab/64 scope link
       valid_lft forever preferred_lft forever

root@juju-88a011-3:~# crm status
Cluster Summary:
  * Stack: corosync
  * Current DC: juju-88a011-3 (version 2.0.3-4b1f869f0f) - partition with quorum
  * Last updated: Fri Nov 26 14:52:53 2021
  * Last change: Fri Nov 26 14:52:45 2021 by root via crm_node on juju-88a011-3
  * 3 nodes configured
  * 4 resource instances configured

Node List:
  * Online: [ juju-88a011-3 juju-88a011-4 juju-88a011-5 ]

Full List of Resources:
  * Resource Group: grp_ks_vips:
    * res_ks_4f63c45_vip (ocf::heartbeat:IPaddr2): Started juju-88a011-3
  * Clone Set: cl_ks_haproxy [res_ks_haproxy]:
    * Started: [ juju-88a011-3 juju-88a011-4 juju-88a011-5 ]

root@juju-88a011-3:~# crm configure show
node 1000: juju-88a011-3
node 1001: juju-88a011-4
node 1002: juju-88a011-5
primitive res_ks_4f63c45_vip IPaddr2 \
 params ip=198.18.2.49 \
 op monitor timeout=20s interval=10s \
 op_params depth=0
primitive res_ks_haproxy lsb:haproxy \
 meta migration-threshold=INFINITY failure-timeout=5s \
 op monitor interval=5s
group grp_ks_vips res_ks_4f63c45_vip
clone cl_ks_haproxy res_ks_haproxy
property cib-bootstrap-options: \
 have-watchdog=false \
 dc-version=2.0.3-4b1f869f0f \
 cluster-infrastructure=corosync \
 cluster-name=debian \
 no-quorum-policy=stop \
 cluster-recheck-interval=60 \
 stonith-enabled=false \
 last-lrm-refresh=1637938243
rsc_defaults rsc-options: \
 resource-stickiness=100 \
 failure-timeout=180