1
0
mirror of https://github.com/jcwimer/kubernetes-ansible synced 2026-03-24 16:44:44 +00:00

Changed from Kubeadm to RKE for deployment

This commit is contained in:
2019-05-03 15:10:23 -04:00
parent 0537239d74
commit fc94d74b32
16 changed files with 477 additions and 83 deletions

View File

@@ -1,5 +1,5 @@
# kubernetes-ansible
Deploy kubernetes with kubeadm with ansible. This currently only supports a single master kubernetes cluster. This currently only deploys weave net for pod networking.
Deploy kubernetes with rke with ansible.
# Deploy
@@ -7,16 +7,15 @@ Deploy kubernetes with kubeadm with ansible. This currently only supports a sing
1. Python
2. Pip
3. Pipenv
5. Nodes already deployed and running.
5. Nodes already deployed and running with ubuntu.
6. SSH access to all nodes you're deploying to.
* You will need to define an environment variable for your ssh key. `export PRIVATE_KEY="/location/of/key"`
* OR you will need a ssh agent running.
### Steps
1. Copy hosts.example to hosts
* Put ip addresses under the sections.
* Master is a single node used for the kubernetes api. See kubernetes documentation for more info on masters: https://kubernetes.io/docs/concepts/overview/components/
* Workers are nodes used for running containers. You can have as many as necessary.
* Name the node and under ansible_host put the ip of the node.
* Master nodes run the control plane. See kubernetes documentation for more info on masters: https://kubernetes.io/docs/concepts/overview/components/
* Workers are nodes used for running containers.
2. Copy group_vars/all.example to group_vars/all
* Fill out with the settings that pertain to your configuration.
3. Run `bash supporting-scripts/run-setup.sh`

View File

@@ -1,8 +1,19 @@
docker_ce_version_to_install: 18.03.1
kube_init_network_interface: enp0s8
# Kubernetes pod network. Choices are:
# flannel
# weavenet
# calico
kube_network: weavenet
# rke
# The directory for copying RKE config files to deploy the cluster. This is a local directory.
rke_directory: /root/rke
# The directory for copying RKE config files onto the kubernetes nodes. This folder will be created if it does not exist.
rke_node_directory: /rke
rke_version: 0.2.1
rke_cluster_name: rke-k8s
# The ssh key location to ssh into the kubernetes nodes. This is overrode by supporting-scripts/run-setup.sh
rke_ssh_key_location: /root/id_rsa
# Internal domain for kubernetes api to respond on
domain: test.local
# Standard ssh user for kubernetes nodes.
standard_user: user
# Optional - leave a localhost if you do not define. This is a vip address for kubernetes api to respond on if fronted by haproxy.
vip_address: localhost

View File

@@ -1,6 +1,7 @@
[masters]
192.168.254.2
master1 ansible_host=192.168.254.2
[workers]
192.168.254.3
192.168.254.4
192.168.254.5
worker1 ansible_host=192.168.254.3
worker2 ansible_host=192.168.254.4
worker3 ansible_host=192.168.254.5

26
playbooks/rke.yml Normal file
View File

@@ -0,0 +1,26 @@
---
- name: Setup ansible dependencies
hosts: all
gather_facts: false
serial: 100%
tasks:
- include: ../roles/ansible-dependencies/tasks/main.yml
- name: Pre rke
hosts: masters workers
gather_facts: yes
serial: 100%
tasks:
- include: ../roles/rke/tasks/pre-rke.yml
- name: Set up Kubernetes
hosts: localhost
tasks:
- include: ../roles/rke/tasks/main.yml
- name: Post rke
hosts: masters workers
gather_facts: yes
serial: 100%
tasks:
- include: ../roles/rke/tasks/post-rke.yml

View File

@@ -1,55 +0,0 @@
---
- name: Setup ansible dependencies
hosts: all
gather_facts: false
serial: 100%
tasks:
- include: ../roles/ansible-dependencies/tasks/main.yml
- name: Install kubernetes
hosts: all
gather_facts: yes
serial: 100%
tasks:
- include: ../roles/install-kubernetes/tasks/main.yml
- name: Bootstrap kubernetes
hosts: masters[0]
gather_facts: yes
serial: 100%
tasks:
- name: Set interface var name fact
set_fact:
kube_interface_var_name: "ansible_{{ kube_init_network_interface }}"
- name: Set swarm advertise ip address
set_fact:
init_ip: "{{ hostvars[inventory_hostname][kube_interface_var_name]['ipv4']['address'] }}"
- include: ../roles/initialize-kubernetes/tasks/main.yml
vars:
join_addr: "{{ init_ip }}"
- name: Join worker nodes
hosts: workers
gather_facts: yes
serial: 100%
# vars:
# join-ip:
# "{{ hostvars[groups['masters'][0]]['join-ip']['stdout'] }}"
# init-ip:
# "{{ hostvars[groups['masters'][0]]['init-ip'] }}"
tasks:
- name: Set interface var name fact
set_fact:
kube_interface_var_name: "ansible_{{ kube_init_network_interface }}"
- name: Set swarm advertise ip address
set_fact:
kube_interface_ip: "{{ hostvars[inventory_hostname][kube_interface_var_name]['ipv4']['address'] }}"
- include: ../roles/join-kubernetes-workers/tasks/main.yml
vars:
join_addr: "{{ hostvars[groups['masters'][0]]['init_ip'] }}"
join_token: "{{ hostvars[groups['masters'][0]]['join_token']['stdout'] }}"
kube_ip: "{{ kube_interface_ip }}"

View File

@@ -0,0 +1,8 @@
# rke
rke_directory: /root/rke
rke_node_directory: /rke
rke_version: 0.2.1
rke_cluster_name: rke-k8s
rke_ssh_key_location: /root/id_home
vip_address: localhost
domain: test.local

52
roles/rke/tasks/main.yml Normal file
View File

@@ -0,0 +1,52 @@
---
- name: Create RKE directory
file:
path: "{{ rke_directory }}"
state: directory
delegate_to: localhost
run_once: true
- name: Create RKE Configs directory
file:
path: "{{ rke_directory }}/configs"
state: directory
delegate_to: localhost
run_once: true
- name: Install RKE
get_url:
dest: "{{ rke_directory }}/rke"
url: https://github.com/rancher/rke/releases/download/v{{ rke_version }}/rke_linux-amd64
delegate_to: localhost
run_once: true
- name: Make RKE executable
file:
dest: "{{ rke_directory }}/rke"
mode: +x
delegate_to: localhost
run_once: true
- name: Put RKE cluster config in place
template:
src: ../templates/rke-cluster-deployment.yaml.j2
dest: "{{ rke_directory }}/{{ rke_cluster_name }}.yaml"
delegate_to: localhost
run_once: true
- name: Put RKE configs in place
template:
src: ../templates/rke-configs/{{ item }}.j2
dest: "{{ rke_directory }}/configs/{{ item }}"
with_items:
- kube-state-metrics-deployment.yaml
- kube-state-metrics-service.yaml
- kube-state-metrics-rbac.yaml
delegate_to: localhost
run_once: true
- name: Run RKE
shell: >
bash -c "{{ rke_directory }}/rke up --config {{ rke_directory }}/{{ rke_cluster_name }}.yaml"
delegate_to: localhost
run_once: true

View File

@@ -0,0 +1,12 @@
---
- name: Copy RKE kube config back to nodes after RKE run
copy:
src: "{{ rke_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
dest: "{{ rke_node_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
become: true
- name: Copy RKE cluster state back to nodes after RKE run
copy:
src: "{{ rke_directory }}/{{ rke_cluster_name }}.rkestate"
dest: "{{ rke_node_directory }}/{{ rke_cluster_name }}.rkestate"
become: true

View File

@@ -0,0 +1,88 @@
---
- name: Update apt
apt: update_cache=yes
become: true
- name: Install programs to add debian repositories
apt: name={{ item }} state=present force=yes
with_items:
- curl
- apt-transport-https
become: true
- name: Add kubernetes key
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
become: true
- name: Add kubernetes repo
apt_repository:
repo: deb https://apt.kubernetes.io/ kubernetes-xenial main
state: present
become: true
- name: Add docker key
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
become: true
- name: Add docker repo
apt_repository:
repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable
state: present
become: true
- name: Update apt
apt: update_cache=yes
become: true
- name: Install kubectl
apt: name={{ item }} state=present force=yes
with_items:
- kubectl
become: true
- name: Install docker-ce
shell: >
apt-get install -y -qq docker-ce=$(apt-cache madison docker-ce | grep "{{ docker_ce_version_to_install }}" | awk {'print $3'})
become: true
- name: Add standard user to docker group
shell: usermod -a -G docker {{ standard_user }}
become: true
- name: Creates RKE directory on nodes
file:
path: "{{ rke_node_directory }}"
state: directory
become: true
- name: Check if RKE cluster state file exists
stat:
path: "{{ rke_node_directory }}/{{ rke_cluster_name }}.rkestate"
register: cluster_state_result
become: true
- name: Check if RKE kubeconfig file exists
stat:
path: "{{ rke_node_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
register: kube_config_result
become: true
- name: Copy RKE cluster state back to local if it already exists
fetch:
src: "{{ rke_node_directory }}/{{ rke_cluster_name }}.rkestate"
dest: "{{ rke_directory }}/{{ rke_cluster_name }}.rkestate"
flat: yes
when: cluster_state_result.stat.exists == True
become: true
- name: Copy RKE kube config if it already exists
fetch:
src: "{{ rke_node_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
dest: "{{ rke_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
flat: yes
when: kube_config_result.stat.exists == True
become: true

View File

@@ -0,0 +1,34 @@
---
ssh_key_path: {{ rke_ssh_key_location }}
cluster_name: rke_cluster_name
ignore_docker_version: true
kubernetes_version: v1.13.4-rancher1-2
system_images:
kubernetes: rancher/hyperkube:v1.13.4-rancher1
nodes:
{% for node in groups['masters'] %}
- address: {{ hostvars[node]['ansible_host'] }}
name: {{node}}
user: {{standard_user}}
role:
- controlplane
- etcd
{% endfor %}
{% for node in groups['workers'] %}
- address: {{ hostvars[node]['ansible_host'] }}
name: {{node}}
user: {{standard_user}}
role:
- worker
{% endfor %}
authentication:
strategy: x509
sans:
- "{{ vip_address }}"
- "kube.{{ domain }}"

View File

@@ -0,0 +1,92 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-state-metrics
namespace: kube-system
labels:
k8s-app: kube-state-metrics
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.3.0
spec:
selector:
matchLabels:
k8s-app: kube-state-metrics
version: v1.3.0
replicas: 1
template:
metadata:
labels:
k8s-app: kube-state-metrics
version: v1.3.0
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
serviceAccountName: kube-state-metrics
containers:
- name: kube-state-metrics
image: quay.io/coreos/kube-state-metrics:v1.3.0
ports:
- name: http-metrics
containerPort: 8080
- name: telemetry
containerPort: 8081
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 5
- name: addon-resizer
image: k8s.gcr.io/addon-resizer:1.8.4
resources:
limits:
cpu: 100m
memory: 30Mi
requests:
cpu: 100m
memory: 30Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: config-volume
mountPath: /etc/config
command:
- /pod_nanny
- --config-dir=/etc/config
- --container=kube-state-metrics
- --cpu=100m
- --extra-cpu=1m
- --memory=100Mi
- --extra-memory=2Mi
- --threshold=5
- --deployment=kube-state-metrics
volumes:
- name: config-volume
configMap:
name: kube-state-metrics-config
---
# Config map for resource configuration.
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-state-metrics-config
namespace: kube-system
labels:
k8s-app: kube-state-metrics
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
data:
NannyConfiguration: |-
apiVersion: nannyconfig/v1alpha1
kind: NannyConfiguration

View File

@@ -0,0 +1,104 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-state-metrics
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-state-metrics
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources:
- configmaps
- secrets
- nodes
- pods
- services
- resourcequotas
- replicationcontrollers
- limitranges
- persistentvolumeclaims
- persistentvolumes
- namespaces
- endpoints
verbs: ["list", "watch"]
- apiGroups: ["extensions"]
resources:
- daemonsets
- deployments
- replicasets
verbs: ["list", "watch"]
- apiGroups: ["apps"]
resources:
- statefulsets
verbs: ["list", "watch"]
- apiGroups: ["batch"]
resources:
- cronjobs
- jobs
verbs: ["list", "watch"]
- apiGroups: ["autoscaling"]
resources:
- horizontalpodautoscalers
verbs: ["list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kube-state-metrics-resizer
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources:
- pods
verbs: ["get"]
- apiGroups: ["extensions"]
resources:
- deployments
resourceNames: ["kube-state-metrics"]
verbs: ["get", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-state-metrics
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-state-metrics
subjects:
- kind: ServiceAccount
name: kube-state-metrics
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kube-state-metrics
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kube-state-metrics-resizer
subjects:
- kind: ServiceAccount
name: kube-state-metrics
namespace: kube-system

View File

@@ -0,0 +1,23 @@
apiVersion: v1
kind: Service
metadata:
name: kube-state-metrics
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "kube-state-metrics"
annotations:
prometheus.io/scrape: 'true'
spec:
ports:
- name: http-metrics
port: 8080
targetPort: http-metrics
protocol: TCP
- name: telemetry
port: 8081
targetPort: telemetry
protocol: TCP
selector:
k8s-app: kube-state-metrics

View File

@@ -8,4 +8,4 @@ cd ${project_dir}
# Run
pipenv install
pipenv run ansible-playbook -u ${DEPLOY_USER} -i hosts --ssh-common-args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" --private-key=${private_key} playbooks/site.yml
pipenv run ansible-playbook -u ${DEPLOY_USER} -i hosts --ssh-common-args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" --private-key=${private_key} --extra-vars "rke_ssh_key_location=${private_key}" playbooks/rke.yml

View File

@@ -1,3 +1,9 @@
docker_ce_version_to_install: 18.03.1
kube_init_network_interface: enp0s8
kube_network: weavenet
# rke
rke_directory: /home/vagrant/rke
rke_node_directory: /rke
rke_version: 0.2.1
rke_cluster_name: rke-k8s
vip_address: localhost
domain: test.local
standard_user: vagrant

View File

@@ -40,19 +40,12 @@ function run-tests {
echo Giving containers time to start up.
sleep 90s
local kubectl_config="export KUBECONFIG=/home/vagrant/admin.conf"
local kubectl_config="export KUBECONFIG=/rke/kube_config_rke-k8s.yaml"
local curl_params="--silent --fail --max-time 10"
number_of_ready_nodes=$(run-ssh-command master "${kubectl_config}; kubectl get nodes | grep -v STATUS | grep Ready | wc -l")
number_of_ready_nodes=$(run-ssh-command master "sudo bash -c '${kubectl_config}; kubectl get nodes | grep -v STATUS | grep Ready | wc -l'")
testbash "There should be 4 nodes in Ready state." \
"test ${number_of_ready_nodes} -eq 4"
traefik_port=$(run-ssh-command master "${kubectl_config}; kubectl get service -n kube-system | grep traefik | awk {'print $5'} | cut -d , -f 2 | cut -d : -f 2 | cut -d / -f 1")
testbash "Traefik should be reachable from worker1 node." \
"run-ssh-command client 'curl ${curl_params} http://192.168.254.3:${traefik_port}/dashboard/ > /dev/null'"
testbash "Traefik should be reachable from worker2 node." \
"run-ssh-command client 'curl ${curl_params} http://192.168.254.3:${traefik_port}/dashboard/ > /dev/null'"
trap - ERR
}