Commit f135cfb5 authored by MrMan's avatar MrMan

Merge branch 'port-config'

parents 404e9424 cea5fe10
Copyright 2018 vados@vadosware.io
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
---
ssh_user: root
# This is only necessary in the case where you DON'T have passwordless sudo @ the start.
ssh_initial_password: ubuntu
# containerd (a container runtime) - https://github.com/containerd/
containerd_version: "1.1.2"
containerd_url: "https://github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}.linux-amd64.tar.gz"
containerd_sha256_checksum: 7c5b23e425eda65cf3ea612eee64645583f0bc594ffed6046f928987e96b70c9
# kubernetes/kubeadm related variables
k8s_cluster_name: godzilla
k8s_version: stable-1.11
k8s_version_number: "1.11.1"
k8s_cri_socket: /run/containerd/containerd.sock
k8s_pod_network_cidr: "10.244.0.0/16"
k8s_kubeadm_token: "{{ lookup('file', '../secrets/kubernetes/clusters/godzilla/kubeadm-cluster-token') }}"
k8s_admin_config_default_path: /etc/kubernetes/admin.conf # determined by how kubeadm works
k8s_pki_default_dir: /etc/kubernetes/pki # determined by how kubeadm works
kubeadm_extra_opts: ""
k8s_allow_workloads_on_master: true
# kube-proxy binary
kube_proxy_binary_url: "https://storage.googleapis.com/kubernetes-release/release/v{{k8s_version_number}}/bin/linux/amd64/kube-proxy"
kube_proxy_sha256_checksum: 36a86e101435cc3f7087ffad2dc7052f9e49c4d5ee3309b21d1ec3d8966f3cee
# CNI (container networking interface) - https://github.com/containernetworking/cni/
cni_version: "0.7.1"
cni_url: "https://github.com/containernetworking/plugins/releases/download/v{{ cni_version }}/cni-plugins-amd64-v{{ cni_version }}.tgz"
cni_sha512_checksum: b3b0c1cc7b65cea619bddae4c17b8b488e7e13796345c7f75e069af93d1146b90a66322be6334c4c107e8a0ccd7c6d0b859a44a6745f9b85a0239d1be9ad4ccd
[hetzner-ubuntu-k8s]
127.0.1.1 # TODO: replace with the actual machine IP
\ No newline at end of file
---
# Bootstrap the bare machine to be managed by ansible
- name: ansible-managed
hosts: hetzner-ubuntu-k8s
remote_user: root # hetzner default remote user is root
gather_facts: no # gathering facts would fail since python is not installed by default
vars:
public_key: ~/.ssh/id_rsa.pub
roles:
- {role: ansible-ubuntu, tags: ['ansible-ubuntu']}
# Install kubernetes and related/necessary software
- name: k8s cluster masters
hosts: hetzner-ubuntu-k8s
remote_user: root
vars:
public_key: ~/.ssh/id_rsa.pub
roles:
- {role: common-ubuntu, tags: ['common-ubuntu']}
- {role: containerd, tags: ['containerd']}
- {role: kubernetes/master, tags: ['kubernetes-master']}
## You can either have canal OR kube-router, not both
- {role: kube-router, tags: ['kube-router']}
#- {role: canal, tags: ['canal']}
---
#####################
# Passwordless Sudo #
#####################
- name: check for passwordless sudo
raw: "timeout 1s sudo echo 'check'"
register: passwordless_sudo_check
ignore_errors: yes
no_log: true
- name: create admin group
when: passwordless_sudo_check["rc"] != 0
raw: |
echo {{ ssh_initial_password }} | sudo -Ss &&
sudo groupadd admins --system || true
- name: add user to admin group
when: passwordless_sudo_check["rc"] != 0
raw: |
echo {{ ssh_initial_password }} | sudo -Ss &&
sudo usermod -a -G admins {{ ssh_user }}
- name: copy sudoers file, make temporary editable
when: passwordless_sudo_check["rc"] != 0
raw: |
echo {{ ssh_initial_password }} | sudo -Ss &&
sudo cp /etc/sudoers /etc/sudoers.bak &&
sudo cp /etc/sudoers /etc/sudoers.tmp &&
sudo chmod 777 /etc/sudoers.tmp
- name: add admins no passwd rule for sudoers file
when: passwordless_sudo_check["rc"] != 0
raw: |
echo {{ ssh_initial_password }} | sudo -Ss &&
sudo echo -e "\n%admins ALL=(ALL:ALL) NOPASSWD:ALL" >> /etc/sudoers.tmp &&
sudo chmod 440 /etc/sudoers.tmp
- name: check and install new sudoers
when: passwordless_sudo_check["rc"] != 0
raw: |
echo {{ ssh_initial_password }} | sudo -Ss &&
sudo visudo -q -c -f /etc/sudoers.tmp &&c
sudo cp -f /etc/sudoers.tmp /etc/sudoers
###################
# Ansible install #
###################
- name: check for installed ansible (apt)
raw: "dpkg -s ansible"
register: ansible_check
ignore_errors: yes
no_log: true
- name: enable universe repository
when: ansible_check["rc"] != 0
raw: "sudo add-apt-repository universe"
- name: apt-get update
when: ansible_check["rc"] != 0
raw: "sudo apt-get update"
- name: apt-get install software-properties-common
when: ansible_check["rc"] != 0
raw: "sudo apt-get install -y software-properties-common"
- name: add apt repo for ansible
when: ansible_check["rc"] != 0
raw: "sudo apt-add-repository -y ppa:ansible/ansible"
- name: apt-get update and install ansible
when: ansible_check["rc"] != 0
raw: "sudo apt-get update && sudo apt-get install -y ansible"
This diff is collapsed.
---
- name: copy over canal kubernetes code
copy:
src: canal/
dest: ~/kubernetes/canal/
# Install canal
# a lot of things will go wrong if this isn't put in place shortly after cluster start
- name: install canal
shell: "kubectl apply -f canal-all-in-one.yaml"
args:
chdir: ~/kubernetes/canal
---
- name: install general packages
become: yes
apt:
name: "{{ packages }}"
update_cache: yes
state: present
vars:
packages:
- make
- libseccomp2
- apt-transport-https
- ufw
- name: enable UFW, default reject
become: yes
ufw:
state: enabled
policy: reject
- name: allow ssh access
become: yes
ufw:
rule: allow
name: OpenSSH
- name: limit ssh
become: yes
ufw:
rule: limit
port: ssh
proto: tcp
# TODO: SSH hardening (fail2ban?)
# TODO: Network hardening -- Mass port closure/ICMP disabling etc.
# This hack is necessary because hetzner sometimes puts in too many nameservers...
# Kubernetes can't deal with this many and will actually error, so we must limit the nameservers to 3
# There just happen to be 3 ipv4 and 3 ipv6 entries
- name: remove ipv6 nameserver entries from /etc/resolv.conf
tags: ["trim-resolv-conf"]
become: yes
replace:
path: /etc/resolv.conf
regexp: '^nameserver\s+[a-zA-Z0-9\:]+\s*$'
replace: ''
backup: yes
---
- name: install runc
become: yes
apt:
name: runc
state: present
- name: download containerd ({{ containerd_version }})
get_url:
url: "{{ containerd_url }}"
checksum: sha256:{{ containerd_sha256_checksum }}
dest: /tmp/containerd.tar.gz
- name: install containerd
become: yes
unarchive:
remote_src: yes
src: /tmp/containerd.tar.gz
dest: /
- name: install containerd systemd service
become: yes
template:
src: containerd.service.j2
dest: /etc/systemd/system/containerd.service
owner: root
group: root
mode: 0755
- name: ensure containerd service is runinng
become: yes
systemd: name=containerd state=started enabled=yes
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
Wants=network-online.target
Requires=network-online.target
After=network-online.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-router
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- services
- nodes
- endpoints
verbs:
- list
- get
- watch
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
- list
- get
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-router
subjects:
- kind: ServiceAccount
name: kube-router
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-router-cfg
namespace: kube-system
labels:
tier: node
k8s-app: kube-router
data:
cni-conf.json: |
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam": {
"type":"host-local"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-router
tier: node
name: kube-router
namespace: kube-system
spec:
template:
metadata:
labels:
k8s-app: kube-router
tier: node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: kube-router
containers:
- name: kube-router
image: cloudnativelabs/kube-router:v0.2.0-beta.9
imagePullPolicy: Always
args:
- --run-router=true
- --run-firewall=true
- --run-service-proxy=true
- "--kubeconfig=/var/lib/kube-router/kubeconfig"
- --v=3
env:
- name: KUBE_ROUTER_CNI_CONF_FILE
value: /etc/cni/net.d/10-kuberouter.conflist
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
livenessProbe:
httpGet:
path: /healthz
port: 20244
initialDelaySeconds: 10
periodSeconds: 3
resources:
requests:
cpu: 250m
memory: 250Mi
securityContext:
privileged: true
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kubeconfig
mountPath: /var/lib/kube-router
readOnly: true
hostNetwork: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kube-router-cfg
configMap:
name: kube-router-cfg
- name: kubeconfig
hostPath:
path: /var/lib/kube-router
---
- name: copy over kube-router kubernetes resource YAML
copy:
src: kube-router/
dest: ~/kubernetes/kube-router/
- name: copy over kubeconfig for kube-router to use
become: yes
copy: src=/etc/kubernetes/admin.conf dest=/var/lib/kube-router/kubeconfig remote_src=yes
- name: download cni binaries (v{{ cni_version }})
tags: ["cni"]
become: yes
get_url:
url: "{{ cni_url }}"
checksum: sha512:{{ cni_sha512_checksum }}
dest: /tmp/cni.tar.gz
- name: ensure /opt/cni/bin is present
tags: ["cni"]
become: yes
file: path=/opt/cni/bin state=directory
- name: unzip cni binaries to /opt/cni/bin
tags: ["cni"]
unarchive:
src: /tmp/cni.tar.gz
dest: /opt/cni/bin
remote_src: yes
- name: copy over cni config for kube-router
become: yes
tags: ["cni"]
template: src=10-kuberouter.conflist.j2 dest=/etc/cni/net.d/10-kuberouter.conflist
- name: install kube-router
shell: "kubectl apply -f kube-router-all-in-one.yaml"
args:
chdir: ~/kubernetes/kube-router
- name: disable kube-proxy
tags: ['disable-kube-proxy']
shell: "kubectl -n kube-system delete ds kube-proxy"
ignore_errors: yes
- name: download kube-proxy binary
tags: ['disable-kube-proxy']
get_url:
url: "{{ kube_proxy_binary_url }}"
checksum: sha256:{{ kube_proxy_sha256_checksum }}
dest: /tmp/kube-proxy
mode: 0755
- name: cleanup kube-proxy's mess
become: yes
tags: ['disable-kube-proxy']
shell: "/tmp/kube-proxy --cleanup"
ignore_errors: yes
{
"cniVersion":"0.3.0",
"name":"mynet",
"plugins": [
{
"bridge":"kube-bridge",
"ipam": {
"subnet":"{{ k8s_pod_network_cidr }}",
"type":"host-local"
},
"isDefaultGateway": true,
"name":"kubernetes",
"type":"bridge"
},
{
"capabilities": {
"portMappings":true,
"snat":true
},
"type":"portmap"
}
]
}
---
- name: ensure br_netfilter kernel module is available
become: yes
modprobe:
name: br_netfilter
state: present
- name: install required packages (libseccomp, etc)
become: yes
apt:
name: libseccomp2
update_cache: yes
state: present
- name: add google apt signing key
become: yes
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
- name: add google apt repository
become: yes
apt_repository:
repo: deb http://apt.kubernetes.io/ kubernetes-xenial main
state: present
# Ensure kubernetes requirements are installed
- name: install kubernetes packages
become: yes
apt:
name: "{{ packages }}"
update_cache: yes
state: present
vars:
packages:
- "kubelet={{ k8s_version_number }}-00"
- "kubeadm={{ k8s_version_number }}-00"
- "kubectl={{ k8s_version_number }}-00"
###########
# kubeadm #
###########
# Below tasks are mostly from https://github.com/kairen/kubeadm-ansible/blob/master/roles/kubernetes/master/tasks/main.yml
- name: check if admin config has been generated
stat: path={{k8s_admin_config_default_path}}
register: kubernetes_dir_exists
- name: quit early if kubernetes directory already exists
fail:
msg: "kubeadm init artifact @ [{{k8s_admin_config_default_path}}] already exists, quitting early..."
when: kubernetes_dir_exists.stat.exists
- name: set net.bridge.bridge-nf-call-iptables to 1
become: yes
retries: 2
sysctl:
name: net.bridge.bridge-nf-call-iptables
value: 1
state: present
- name: set net.ipv4.ip_forward to 1
become: yes
retries: 2
sysctl:
name: net.ipv4.ip_forward
value: 1
state: present
- name: disable swap
become: yes
shell: swapoff -a
- name: Disable swap permanently (modify /etc/fstab)
become: yes
replace:
path: /etc/fstab
regexp: '^(.*swap.*)$'
backup: yes
replace: '#\1'
- name: run kubeadm init
become: yes
shell: |
kubeadm init --kubernetes-version {{ k8s_version }} \
--pod-network-cidr {{ k8s_pod_network_cidr }} \
--token {{ k8s_kubeadm_token }} \
--cri-socket {{ k8s_cri_socket }} \
{{ kubeadm_extra_opts }}
register: init_cluster
- name: add ~/.kube folder
tags: ['post-kubeadm']
file: path="~/.kube" state=directory
- name: update permissions for admin.conf
tags: ['post-kubeadm']
become: true
file: path=/etc/kubernetes/admin.conf mode=0775
- name: copy admin.conf to remote user's home directory
tags: ['post-kubeadm']
copy:
src: "{{ k8s_admin_config_default_path }}"
dest: "~/.kube/config"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0655
remote_src: yes
- name: remove master taint to allow workloads to be run on master
tags: ['post-kubeadm']
shell: "kubectl taint nodes --all node-role.kubernetes.io/master-"
when: k8s_allow_workloads_on_master
ignore_errors: yes # if this runs again after the taint might already be gone
- name: copy admin.conf to local machine
tags: ['post-kubeadm']
fetch:
src: "{{ k8s_admin_config_default_path }}"
dest: "../secrets/kubernetes/clusters/godzilla"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0655
remote_src: yes
- name: Enable and restart kubelet engine
tags: ['post-kubeadm']
become: yes
systemd:
name: kubelet
daemon_reload: yes
state: restarted
enabled: yes
register: started_kubelet
####################
# Firewall Updates #
####################
- name: Allow kubernetes API traffic on 6443
become: yes
tags: ['post-kubeadm', 'firewall-opening']
ufw:
rule: allow
port: 6443
proto: tcp
- name: Allow traffic on 443 (HTTPS)
become: yes
tags: ['post-kubeadm', 'firewall-opening']
ufw:
rule: allow
port: 443
- name: Allow kubernetes API traffic on 80 (HTTP)
become: yes
tags: ['post-kubeadm', 'firewall-opening']
ufw:
rule: allow
port: 80
- name: Allow all access from RFC1918 networks to this host
tags: ['post-kubeadm', 'firewall-opening']
ufw:
rule: allow
src: '{{ item }}'
with_items:
- 10.0.0.0/8
- "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
- name: Allow all access to RFC1918 networks from anywhere
tags: ['post-kubeadm', 'firewall-opening']
ufw:
rule: allow
dest: '{{ item }}'
with_items:
- 10.0.0.0/8
- "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment