Commit d66dea94 authored by Piotr Szlenk's avatar Piotr Szlenk

Demo preps

parent 3dfdefe0
......@@ -75,6 +75,7 @@ Vagrant.configure("2") do |config|
k8smaster1.vm.provision "file", source: "k8s-provisioning/kubeadm-init.sh", destination: "$HOME/k8s-provisioning/05_kubeadm-init.sh"
k8smaster1.vm.provision "file", source: "k8s-provisioning/labels.k8s-nodes.sh", destination: "$HOME/k8s-provisioning/06_labels.k8s-nodes.sh"
k8smaster1.vm.provision "file", source: "calico/", destination: "$HOME/calico"
k8smaster1.vm.provision "file", source: "demo/", destination: "$HOME/demo"
end
config.vm.define("k8s-node-l1-1") do |k8snode1|
......
......@@ -619,6 +619,8 @@ spec:
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "10.64.0.0/12"
- name: CALICO_ADVERTISE_CLUSTER_IPS
value: "10.80.0.0/12"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
......
......@@ -29,8 +29,10 @@ bgp:
ipv4_unicast_peers:
- peer_ip: 172.16.0.0
peer_as: 65001
softinbound: true
- peer_ip: 172.16.0.4
peer_as: 65002
softinbound: true
ipv4_unicast_peer_groups:
- group_name: K8S-NODES
......@@ -38,6 +40,7 @@ bgp:
group_range: 10.0.101.0/24
rr_client: true
next_hop_self: true
softinbound: true
routemaps:
- name: CONNECTED_TO_BGP
......
......@@ -28,8 +28,10 @@ bgp:
ipv4_unicast_peers:
- peer_ip: 172.16.0.2
peer_as: 65001
softinbound: true
- peer_ip: 172.16.0.6
peer_as: 65002
softinbound: true
ipv4_unicast_peer_groups:
- group_name: K8S-NODES
......@@ -37,6 +39,7 @@ bgp:
group_range: 10.0.102.0/24
rr_client: true
next_hop_self: true
softinbound: true
routemaps:
- name: CONNECTED_TO_BGP
......
......@@ -20,8 +20,10 @@ bgp:
ipv4_unicast_peers:
- peer_ip: 172.16.0.1
peer_as: 65101
softinbound: true
- peer_ip: 172.16.0.3
peer_as: 65102
softinbound: true
routemaps:
- name: CONNECTED_TO_BGP
......
......@@ -20,8 +20,10 @@ bgp:
ipv4_unicast_peers:
- peer_ip: 172.16.0.5
peer_as: 65101
softinbound: true
- peer_ip: 172.16.0.7
peer_as: 65102
softinbound: true
routemaps:
- name: CONNECTED_TO_BGP
......
......@@ -10,6 +10,9 @@
{% if peer['multihop'] is defined and peer['multihop'] %}
add bgp neighbor {{peer['peer_ip']}} ebgp-multihop
{% endif %}
{%if peer['softinbound'] is defined and peer['softinbound'] %}
add bgp neighbor {{peer['peer_ip']}} soft-reconfiguration inbound
{% endif %}
{% endfor %}
commit: false
notify: "Configuration was changed"
......@@ -33,6 +36,9 @@
{% if group['next_hop_self'] is defined and group ['next_hop_self'] %}
add bgp neighbor {{group['group_name']}} next-hop-self
{% endif %}
{%if group['softinbound'] is defined and group['softinbound'] %}
add bgp neighbor {{group['group_name']}} soft-reconfiguration inbound
{% endif %}
{% endfor %}
commit: false
notify: "Configuration was changed"
......
# Create service, deployment and namespace
kubectl apply -f demo/namespace.yaml
kubectl apply -f demo/deployment.yaml
kubectl apply -f demo/service.yaml
# Check pods and services
kubectl get pods -n space1 -o wide
kubectl get services -n space1 -o wide
# Verify route propagation
vagrant ssh leaf1 -c "sudo net show bgp "
vagrant ssh leaf2 -c "sudo net show bgp "
# Scale deployment
kubectl scale deployment/nginx-deployment --replicas=6 -n space1
# Check pods
kubectl get pods -n space1 -o wide
# Verify route propagation
vagrant ssh leaf1 -c "sudo net show bgp "
vagrant ssh leaf2 -c "sudo net show bgp "
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
run: nginx-deployment
name: nginx-deployment
namespace: space1
spec:
replicas: 1
selector:
matchLabels:
run: nginx-deployment
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
run: nginx-deployment
spec:
containers:
- image: nginx
name: nginx-deployment
resources: {}
restartPolicy: Always
apiVersion: v1
kind: Namespace
metadata:
name: space1
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: nginx-service
name: nginx-service
namespace: space1
spec:
ports:
- name: "6969"
port: 6969
protocol: TCP
targetPort: 80
selector:
run: nginx-deployment
type: LoadBalancer
externalTrafficPolicy: Local
status:
loadBalancer: {}
#!/bin/sh
kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calicoctl.yaml
# kubectl exec -ti -n kube-system calicoctl -- /calicoctl get profiles -o wide
#alias calicoctl="kubectl exec -i -n kube-system calicoctl /calicoctl -- "
# calicoctl create -f - < my_manifest.yaml
\ No newline at end of file
......@@ -9,6 +9,8 @@ vagrant ssh k8s-master-l1-1 -c "kubectl label nodes k8s-node-l2-2 'asnum=65102
vagrant ssh k8s-master-l1-1 -c "kubectl apply -f calico/calicoctl.yaml"
vagrant ssh k8s-master-l1-1 -c "kubectl apply -f calico/calico.yaml"
sleep 30
vagrant ssh k8s-master-l1-1 -c "kubectl exec -i -n kube-system calicoctl -- calicoctl apply -f - < calico/calico.nodes.yaml"
vagrant ssh k8s-master-l1-1 -c "kubectl exec -i -n kube-system calicoctl -- calicoctl apply -f - < calico/calico.bgpconfig.yaml"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment