1
0
mirror of https://github.com/jcwimer/multi-cloud-kubernetes synced 2026-03-24 17:34:43 +00:00

Added asible to configure Kubernetes with RKE

This commit is contained in:
2020-07-02 16:57:29 -04:00
parent 4defd3a0b9
commit 0a6a7d03c7
19 changed files with 359 additions and 28 deletions

3
.gitignore vendored
View File

@@ -3,4 +3,5 @@ terraform-code/inventory
terraform-code/.terraform
terraform
prod.env
ansible/playbooks/*.retry
ansible/playbooks/*.retry
rke/

View File

@@ -6,7 +6,7 @@ name = "pypi"
[dev-packages]
[packages]
ansible = "==2.4.0.0"
ansible = "==2.7.0.0"
[requires]
python_version = "2.7"

8
ansible/Pipfile.lock generated
View File

@@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
"sha256": "1e5c097d8a5cd9ad5a726b99b1e2e28b3fc9bc86409327d587cd0958edcd1586"
"sha256": "19471691f000ee34aa55e6c0f93b1e212fa6abc334c5d692e9ad049a9b8fb5ef"
},
"pipfile-spec": 6,
"requires": {
@@ -18,10 +18,10 @@
"default": {
"ansible": {
"hashes": [
"sha256:1a276fee7f72d4e6601a7994879e8467edb763dacc3e215258cfe71350b77c76"
"sha256:a1ab8e0f13e79a20661ad6546f45a142afeaeb664deb2c290e32362d8ae5b618"
],
"index": "pypi",
"version": "==2.4.0.0"
"version": "==2.7.0.0"
},
"bcrypt": {
"hashes": [
@@ -227,7 +227,7 @@
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
"sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'",
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.15.0"
}
},

View File

@@ -1,11 +1,33 @@
---
- name: All hosts ansible dependencies
- name: Wait for ssh and cloud-init
hosts: "masters,workers"
user: root
user: debian
gather_facts: false
serial: 100%
vars:
ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
ansible_ssh_common_args: '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
tasks:
- name: Update apt
raw: apt-get update
- name: Wait 600 seconds for target connection to become reachable/usable
wait_for_connection:
timeout: 600
- name: Wait for cloud init to finish
cloud_init_data_facts:
filter: status
register: res
until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
retries: 20
delay: 60
- name: Install kubernetes with rke
hosts: "masters,workers"
user: debian
gather_facts: true
serial: 100%
vars:
ansible_ssh_common_args: '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
roles:
- role: ../roles/kubernetes
vars:
longhorn_enabled: true
standard_user: debian

View File

@@ -0,0 +1,6 @@
---
rke_directory: /home/{{ standard_user }}/rke
rke_node_directory: /opt/rke
rke_version: 1.1.3
rke_cluster_name: rke-k8s
kubernetes_version: 1.15.12

View File

@@ -0,0 +1,77 @@
---
- name: Create RKE directory
file:
path: "{{ rke_directory }}"
state: directory
mode: '0774'
delegate_to: localhost
run_once: true
- name: Get zerotier interface
raw: ls -1 /sys/class/net/ | grep zt | tr '\n' ' ' | sed 's/\ //g'
register: zt_interface
run_once: true
- set_fact:
kubernetes_ansible_interface: "ansible_{{zt_interface.stdout}}"
- set_fact:
kubernetes_network_interface: "{{zt_interface.stdout}}"
- name: Create RKE Configs directory
file:
path: "{{ rke_directory }}/configs"
state: directory
delegate_to: localhost
run_once: true
- name: Install RKE
get_url:
dest: "{{ rke_directory }}/rke"
url: https://github.com/rancher/rke/releases/download/v{{ rke_version }}/rke_linux-amd64
delegate_to: localhost
run_once: true
- name: Make RKE executable
file:
dest: "{{ rke_directory }}/rke"
mode: +x
delegate_to: localhost
run_once: true
- name: Put RKE cluster config in place
template:
src: ../templates/rke-cluster-deployment.yaml
dest: "{{ rke_directory }}/{{ rke_cluster_name }}.yaml"
delegate_to: localhost
run_once: true
# - name: Put RKE configs in place
# template:
# src: ../templates/rke-configs/{{ item }}.j2
# dest: "{{ rke_directory }}/configs/{{ item }}"
# with_items:
# - kube-state-metrics-deployment.yaml
# - kube-state-metrics-service.yaml
# - kube-state-metrics-rbac.yaml
# delegate_to: localhost
# run_once: true
- name: Run RKE
shell: >
bash -c "{{ rke_directory }}/rke up --config {{ rke_directory }}/{{ rke_cluster_name }}.yaml"
delegate_to: localhost
run_once: true
retries: 5
delay: 5
register: rke_install
until: rke_install.rc == 0
- name: Set permissions on rke directory
file:
path: "{{ rke_directory }}"
state: directory
mode: '0774'
recurse: yes
delegate_to: localhost
run_once: true

View File

@@ -0,0 +1,9 @@
---
- name: RKE Pre Tasks
import_tasks: pre-rke.yml
- name: RKE Deploy Kubernetes
import_tasks: deploy-rke.yml
- name: RKE Post Tasks
import_tasks: post-rke.yml

View File

@@ -0,0 +1,12 @@
---
- name: Copy RKE kube config back to nodes after RKE run
copy:
src: "{{ rke_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
dest: "{{ rke_node_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
become: true
- name: Copy RKE cluster state back to nodes after RKE run
copy:
src: "{{ rke_directory }}/{{ rke_cluster_name }}.rkestate"
dest: "{{ rke_node_directory }}/{{ rke_cluster_name }}.rkestate"
become: true

View File

@@ -0,0 +1,98 @@
---
- name: Update apt
apt: update_cache=yes
become: true
- name: Install programs to add debian repositories
apt: name={{ item }} state=present force=yes
with_items:
- curl
- apt-transport-https
become: true
- name: Add kubernetes key
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
become: true
- name: Add kubernetes repo
apt_repository:
repo: deb https://apt.kubernetes.io/ kubernetes-xenial main
state: present
become: true
- name: Update apt
apt: update_cache=yes
become: true
- name: Install kubectl
apt: name={{ item }} state=present force=yes
with_items:
- kubectl
become: true
- name: Creates RKE directory on nodes
file:
path: "{{ rke_node_directory }}"
state: directory
mode: '0774'
become: true
- name: Create flex volume folder
file:
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
state: directory
become: true
- name: Create Rook storage Directory
file:
path: /var/lib/rook
state: directory
mode: '0774'
become: true
when: rook_enabled is defined and rook_enabled | bool == True
- name: Create Longhorn Storage Directory
file:
path: /var/lib/rancher/longhorn
state: directory
mode: '0774'
become: true
when: longhorn_enabled is defined and longhorn_enabled | bool == True
- name: Create Longhorn Setting Directory
file:
path: /var/lib/longhorn-setting
state: directory
mode: '0774'
become: true
when: longhorn_enabled is defined and longhorn_enabled | bool == True
- name: Check if RKE cluster state file exists
stat:
path: "{{ rke_node_directory }}/{{ rke_cluster_name }}.rkestate"
register: cluster_state_result
become: true
- name: Check if RKE kubeconfig file exists
stat:
path: "{{ rke_node_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
register: kube_config_result
become: true
- name: Copy RKE cluster state back to local if it already exists
fetch:
src: "{{ rke_node_directory }}/{{ rke_cluster_name }}.rkestate"
dest: "{{ rke_directory }}/{{ rke_cluster_name }}.rkestate"
flat: yes
when: cluster_state_result.stat.exists == True
become: true
- name: Copy RKE kube config if it already exists
fetch:
src: "{{ rke_node_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
dest: "{{ rke_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
flat: yes
when: kube_config_result.stat.exists == True
become: true

View File

@@ -0,0 +1,49 @@
---
ssh_key_path: {{ rke_ssh_key_location }}
cluster_name: {{ rke_cluster_name }}
ignore_docker_version: true
system_images:
kubernetes: rancher/hyperkube:v{{ kubernetes_version }}-rancher1
{% if (longhorn_enabled is defined and longhorn_enabled | bool == True) %}
services:
kubelet:
extra_args:
volume-plugin-dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
extra_binds:
- /usr/libexec/kubernetes/kubelet-plugins/volume/exec:/usr/libexec/kubernetes/kubelet-plugins/volume/exec
{% endif %}
network:
plugin: canal
{% if (kubernetes_network_interface is defined) %}
options:
canal_iface: {{ kubernetes_network_interface }}
{% endif %}
nodes:
{% for node in groups['masters'] %}
- address: {{node}}
name: {{hostvars[node]['ansible_hostname']}}
hostname_override: {{hostvars[node]['ansible_hostname']}}
internal_address: {{ hostvars[node][kubernetes_ansible_interface]['ipv4']['address'] }}
user: {{standard_user}}
role:
- controlplane
- etcd
{% endfor %}
{% for node in groups['workers'] %}
- address: {{node}}
internal_address: {{ hostvars[node][kubernetes_ansible_interface]['ipv4']['address'] }}
name: {{hostvars[node]['ansible_hostname']}}
hostname_override: {{hostvars[node]['ansible_hostname']}}
user: {{standard_user}}
role:
- worker
{% endfor %}

View File

@@ -2,4 +2,8 @@
project_dir=$(git rev-parse --show-toplevel)
cd ${project_dir}/ansible
ansible-playbook --inventory-file=${project_dir}/terraform-code/inventory --private-key ~/.ssh/id_home playbooks/site.yml
ansible-playbook --inventory-file=${project_dir}/terraform-code/inventory --private-key ~/.ssh/id_home \
-e rke_ssh_key_location=~/.ssh/id_home \
-e rke_directory=${project_dir}/rke \
-e cloudflare_api=${CLOUDFLARE_API}
playbooks/site.yml

View File

@@ -18,7 +18,6 @@ ${project_dir}/terraform apply -auto-approve -refresh=true \
-var "ramnode_username=${RAMNODE_USERNAME}" \
-var "ramnode_password=${RAMNODE_PASSWORD}" \
-var "do_token=${DIGITALOCEAN_ACCESS_TOKEN}"
sleep 60s
cd ${project_dir}/ansible
pipenv sync
pipenv run bash run-ansible.sh

View File

@@ -9,4 +9,6 @@ ${project_dir}/terraform destroy -force \
-var "home_password=${HOME_PASSWORD}" \
-var "ramnode_username=${RAMNODE_USERNAME}" \
-var "ramnode_password=${RAMNODE_PASSWORD}" \
-var "do_token=${DIGITALOCEAN_ACCESS_TOKEN}"
-var "do_token=${DIGITALOCEAN_ACCESS_TOKEN}"
rm -rf ${project_dir}/rke

View File

@@ -13,7 +13,7 @@ resource "digitalocean_tag" "worker" {
resource "digitalocean_droplet" "master" {
provider = digitalocean.digitalocean
image = "ubuntu-20-04-x64"
image = "debian-10-x64"
name = "multicloud-digitalocean-master"
region = "nyc1"
size = "s-1vcpu-2gb"
@@ -25,7 +25,7 @@ resource "digitalocean_droplet" "master" {
resource "digitalocean_droplet" "worker" {
provider = digitalocean.digitalocean
image = "ubuntu-20-04-x64"
image = "debian-10-x64"
name = "multicloud-digitalocean-worker"
region = "nyc1"
size = "s-1vcpu-2gb"

View File

@@ -23,11 +23,18 @@ resource "openstack_compute_secgroup_v2" "multicloud_home" {
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
rule {
from_port = 6443
to_port = 6443
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
}
data "openstack_images_image_v2" "ubuntu" {
provider = openstack.home
name = "xenial-image"
name = "debian-10-image"
most_recent = true
}
@@ -37,7 +44,7 @@ resource "openstack_compute_instance_v2" "home-master" {
flavor_name = "g1.medium"
key_pair = "multicloud"
security_groups = [openstack_compute_secgroup_v2.multicloud_home.name]
image_name = "xenial-image"
image_name = "debian-10-image"
user_data = data.template_file.user-data.rendered
network {
name = "GATEWAY_NET"
@@ -49,10 +56,11 @@ resource "openstack_compute_instance_v2" "home-master" {
block_device {
uuid = data.openstack_images_image_v2.ubuntu.id
source_type = "image"
volume_size = 20
volume_size = 50
volume_type = "standard"
boot_index = 0
destination_type = "volume"
delete_on_termination = false
delete_on_termination = true
}
count = 1
}
@@ -74,10 +82,10 @@ resource "openstack_compute_instance_v2" "home-worker" {
block_device {
uuid = data.openstack_images_image_v2.ubuntu.id
source_type = "image"
volume_size = 20
volume_size = 50
boot_index = 0
destination_type = "volume"
delete_on_termination = false
delete_on_termination = true
}
count = 1
}

View File

@@ -23,6 +23,13 @@ resource "openstack_compute_secgroup_v2" "multicloud_ramnode" {
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
rule {
from_port = 6443
to_port = 6443
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
}
resource "openstack_compute_instance_v2" "ramnode-master" {
@@ -31,7 +38,7 @@ resource "openstack_compute_instance_v2" "ramnode-master" {
flavor_name = "2GB SKVM"
key_pair = "multicloud"
security_groups = [openstack_compute_secgroup_v2.multicloud_ramnode.name]
image_name = "Ubuntu 20.04 Server Cloud"
image_name = "Debian 10 Cloud"
user_data = data.template_file.user-data.rendered
network {
name = "Public"
@@ -49,7 +56,7 @@ resource "openstack_compute_instance_v2" "ramnode-worker" {
flavor_name = "2GB SKVM"
key_pair = "multicloud"
security_groups = [openstack_compute_secgroup_v2.multicloud_ramnode.name]
image_name = "Ubuntu 20.04 Server Cloud"
image_name = "Debian 10 Cloud"
user_data = data.template_file.user-data.rendered
network {
name = "Public"

View File

@@ -5,7 +5,7 @@ resource "local_file" "hosts_cfg" {
ramnode_masters = "${join("\n", openstack_compute_instance_v2.ramnode-master.*.network.0.fixed_ip_v4)}"
home_workers = "${join("\n", openstack_compute_instance_v2.home-worker.*.network.0.fixed_ip_v4)}"
home_masters = "${join("\n", openstack_compute_instance_v2.home-master.*.network.0.fixed_ip_v4)}"
do_workers = "${join("\n", digitalocean_droplet.worker.*.ipv4_address)}"
do_workers = "${join("\n", digitalocean_droplet.worker.*.ipv4_address) }"
do_masters = "${join("\n", digitalocean_droplet.master.*.ipv4_address)}"
}
)

View File

@@ -1,6 +1,36 @@
#!/bin/bash
#!/bin/bash
apt-get update
apt-get install python-dev python-pip curl sudo -y
if ! which docker > /dev/null; then
curl -s -L https://raw.githubusercontent.com/rancher/install-docker/master/19.03.9.sh | bash
fi
curl -s https://install.zerotier.com | sudo bash
zerotier-cli join ${zerotier_network}
curl -s https://install.zerotier.com | bash
zerotier-cli join ${zerotier_network}
user=debian
if ! cat /etc/passwd | grep debian; then
# Add the user (--gecos "" ensures that this runs non-interactively)
adduser --disabled-password --gecos "" $user
# Give read-only access to log files by adding the user to adm group
# Other groups that you may want to add are apache, nginx, mysql etc. for their log files
usermod -a -G adm $user
# Give sudo access by adding the user to sudo group
usermod -a -G sudo $user
# Allow passwordless sudo
echo "$user ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/$user
# Add the user's auth key to allow ssh access
mkdir /home/$user/.ssh
cp /root/.ssh/authorized_keys /home/$user/.ssh/authorized_keys
# Change ownership and access modes for the new directory/file
chown -R $user:$user /home/$user/.ssh
chmod -R go-rx /home/$user/.ssh
fi
usermod -a -G docker $user
# for RKE
# iptables -I INPUT -j ACCEPT

7
test.env Normal file
View File

@@ -0,0 +1,7 @@
export DIGITALOCEAN_ACCESS_TOKEN=
export RAMNODE_USERNAME=
export RAMNODE_PASSWORD=
export HOME_USERNAME=
export HOME_PASSWORD=
export ZEROTIER_NETWORK=
export CLOUDFLARE_API=