Draft: Adjust capm3-virt to 2 interfaces per bond
What does this MR do and why?
Tests the idea implemented in upstream sylva-projects/sylva-elements/container-images/libvirt-metal!24 (closed).
Although it may not be the best option, some capm3-virt jobs have worked with this setup in upstream pipelines/1184687155.
Please do share any ideas for a different way or concerns.
Related reference(s)
For #989 (closed)
Test coverage
What we get in a local libvirt-metal deployment (Ubuntu based OS):
root@management-cluster-control-plane-xkrbk:/home/sylva-user # cat /etc/netplan/50-cloud-init.yaml
# This file is generated from information provided by the datasource. Changes
# to it will not persist across an instance reboot. To disable cloud-init's
# network configuration capabilities, write a file
# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
# network: {config: disabled}
network:
version: 2
ethernets:
ens4:
match:
macaddress: '52:54:00:44:44:00'
mtu: 1500
set-name: ens4
ens5:
match:
macaddress: '52:54:00:55:55:00'
mtu: 1500
set-name: ens5
ens6:
match:
macaddress: 52:54:00:66:66:00
mtu: 1500
set-name: ens6
ens7:
match:
macaddress: 52:54:00:77:77:00
mtu: 1500
set-name: ens7
bonds:
bond0:
addresses:
- 192.168.10.20/24
interfaces:
- ens4
- ens6
macaddress: '52:54:00:44:44:00'
mtu: 1500
nameservers:
addresses:
- 10.193.21.160
search: []
parameters:
mode: balance-tlb
transmit-hash-policy: layer3+4
bond1:
addresses:
- 192.168.100.20/24
interfaces:
- ens5
- ens7
macaddress: '52:54:00:55:55:00'
mtu: 1500
nameservers:
addresses:
- 10.193.21.160
search: []
parameters:
mode: balance-tlb
transmit-hash-policy: layer3+4
routes:
- to: 0.0.0.0/0
via: 192.168.100.1
root@management-cluster-control-plane-xkrbk:/home/sylva-user#
root@management-cluster-control-plane-xkrbk:/home/sylva-user # ip link show bond0
6: bond0: <NO-CARRIER,BROADCAST,MULTICAST,MASTER,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default qlen 1000
link/ether 52:54:00:44:44:00 brd ff:ff:ff:ff:ff:ff
root@management-cluster-control-plane-xkrbk:/home/sylva-user # ip link show bond1
7: bond1: <NO-CARRIER,BROADCAST,MULTICAST,MASTER,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default qlen 1000
link/ether 52:54:00:55:55:00 brd ff:ff:ff:ff:ff:ff
root@management-cluster-control-plane-xkrbk:/home/sylva-user#
root@management-cluster-control-plane-xkrbk:/home/sylva-user # cat /proc/net/bonding/bond0
Ethernet Channel Bonding Driver: v5.15.0-92-generic
Bonding Mode: transmit load balancing
Primary Slave: None
Currently Active Slave: None
MII Status: down
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Peer Notification Delay (ms): 0
Slave Interface: ens6
MII Status: down
Speed: Unknown
Duplex: Unknown
Link Failure Count: 1
Permanent HW addr: 52:54:00:66:66:00
Slave queue ID: 0
Slave Interface: ens4
MII Status: down
Speed: Unknown
Duplex: Unknown
Link Failure Count: 1
Permanent HW addr: 52:54:00:44:44:00
Slave queue ID: 0
root@management-cluster-control-plane-xkrbk:/home/sylva-user # cat /proc/net/bonding/bond1
Ethernet Channel Bonding Driver: v5.15.0-92-generic
Bonding Mode: transmit load balancing
Primary Slave: None
Currently Active Slave: None
MII Status: down
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Peer Notification Delay (ms): 0
Slave Interface: ens7
MII Status: down
Speed: Unknown
Duplex: Unknown
Link Failure Count: 1
Permanent HW addr: 52:54:00:77:77:00
Slave queue ID: 0
Slave Interface: ens5
MII Status: down
Speed: Unknown
Duplex: Unknown
Link Failure Count: 1
Permanent HW addr: 52:54:00:55:55:00
Slave queue ID: 0
root@management-cluster-control-plane-xkrbk:/home/sylva-user#
Edited by Bogdan-Adrian Burciu