This is a OpenNebula cluster. AFAICS all hosts are effected as soon as they run with the new versions. Hosts have different hardware but similar network configuration: 2 bonded NICs and a bridged interface to be used by VMs.
Network setup on VH host
auto lo
iface lo inet loopback
#eth0 is manually configured, and slave to the "bond0" bonded NIC
auto eth0
iface eth0 inet manual
bond-master bond1
#eth1 ditto, thus creating a 2-link bond.
auto eth1
iface eth1 inet manual
bond-master bond1
# bond1 is the bonded NIC and can be used like any other normal NIC.
auto bond1
iface bond1 inet manual
# bond1 uses standard IEEE 802.3ad LACP bonding protocol
bond-mode 802.3ad
bond-miimon 100
bond-lacp-rate 1
bond-xmit-hash-policy layer3+4
bond-slaves eth0 eth1
# Bridged interface to be used by VMs
auto ipblsrvrs
iface ipblsrvrs inet static
bridge_ports bond1
address 172.20.4.106
gateway 172.20.4.1
netmask 255.255.252.0
bridge_stp on
bridge_fd 1
bridge_hello 2
bridge_maxage 12
# Guest XML created by OpenNebula looks like this:
This is a OpenNebula cluster. AFAICS all hosts are effected as soon as they run with the new versions. Hosts have different hardware but similar network configuration: 2 bonded NICs and a bridged interface to be used by VMs.
Network setup on VH host hash-policy layer3+4
auto lo
iface lo inet loopback
#eth0 is manually configured, and slave to the "bond0" bonded NIC
auto eth0
iface eth0 inet manual
bond-master bond1
#eth1 ditto, thus creating a 2-link bond.
auto eth1
iface eth1 inet manual
bond-master bond1
# bond1 is the bonded NIC and can be used like any other normal NIC.
auto bond1
iface bond1 inet manual
# bond1 uses standard IEEE 802.3ad LACP bonding protocol
bond-mode 802.3ad
bond-miimon 100
bond-lacp-rate 1
bond-xmit-
bond-slaves eth0 eth1
# Bridged interface to be used by VMs
auto ipblsrvrs
iface ipblsrvrs inet static
bridge_ports bond1
address 172.20.4.106
gateway 172.20.4.1
netmask 255.255.252.0
bridge_stp on
bridge_fd 1
bridge_hello 2
bridge_maxage 12
# Guest XML created by OpenNebula looks like this:
<domain type='kvm' xmlns:qemu='http:// libvirt. org/schemas/ domain/ qemu/1. 0'> one-1110< /name> <![CDATA[ 2]]></vcpu> 205</shares> 4194304< /memory> 64'>hvm< /type> passthrough' > <![CDATA[ /usr/bin/ kvm]]>< /emulator> var/lib/ one//datastores /101/1110/ disk.0' /> var/lib/ one//datastores /101/1110/ disk.1' /> 'ipblsrvrs' /> '02:00: 3d:86:51: 7b'/> dev/pts/ 5'/><target port='0' /></serial> <console type='pty' tty='/dev/ pts/5'> <source path='/ dev/pts/ 5'/><target port='0' /></console> </devices> datastore> <![CDATA[ /var/lib/ one//datastores /101/1110] ]> </system_datastore>
<name>
<vcpu>
<cputune>
<shares>
</cputune>
<memory>
<os>
<type arch='x86_
</os>
<cpu mode='host-
</cpu>
<devices>
<emulator>
<disk type='file' device='disk'>
<source file='/
<target dev='vda'/>
<boot order='2'/>
<driver name='qemu' type='raw' cache='writeback'/>
</disk>
<disk type='file' device='cdrom'>
<source file='/
<target dev='hda'/>
<readonly/>
<driver name='qemu' type='raw'/>
</disk>
<interface type='bridge'>
<source bridge=
<mac address=
<target dev='one-1110-0'/>
<boot order='1'/>
<model type='virtio'/>
</interface>
<graphics type='vnc' listen='0.0.0.0' port='7010' passwd='secret'/>
</devices>
<features>
<acpi/>
</features>
<devices><serial type='pty'><source path='/
<metadata>
<system_
</metadata>
</domain>