1
0
mirror of https://github.com/jcwimer/startup-infrastructure synced 2026-03-24 14:24:43 +00:00

Switched from docker swarm to Kubernetes with rke

This commit is contained in:
2020-05-11 19:39:13 -04:00
parent 01f384ac14
commit 1e5724ec1d
30 changed files with 458 additions and 488 deletions

View File

@@ -11,4 +11,4 @@ name = "pypi"
[packages]
ansible = "==2.4.0.0"
ansible = "==2.9.0.0"

238
Pipfile.lock generated
View File

@@ -1,238 +0,0 @@
{
"_meta": {
"hash": {
"sha256": "5e3ab379ececd07b53a4358359347ad15d8b52a1450667be72eb7c2a01c01487"
},
"pipfile-spec": 6,
"requires": {},
"sources": [
{
"name": "pypi",
"url": "https://pypi.python.org/simple",
"verify_ssl": true
}
]
},
"default": {
"ansible": {
"hashes": [
"sha256:1a276fee7f72d4e6601a7994879e8467edb763dacc3e215258cfe71350b77c76"
],
"index": "pypi",
"version": "==2.4.0.0"
},
"asn1crypto": {
"hashes": [
"sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87",
"sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49"
],
"version": "==0.24.0"
},
"bcrypt": {
"hashes": [
"sha256:01477981abf74e306e8ee31629a940a5e9138de000c6b0898f7f850461c4a0a5",
"sha256:054d6e0acaea429e6da3613fcd12d05ee29a531794d96f6ab959f29a39f33391",
"sha256:0872eeecdf9a429c1420158500eedb323a132bc5bf3339475151c52414729e70",
"sha256:09a3b8c258b815eadb611bad04ca15ec77d86aa9ce56070e1af0d5932f17642a",
"sha256:0f317e4ffbdd15c3c0f8ab5fbd86aa9aabc7bea18b5cc5951b456fe39e9f738c",
"sha256:2788c32673a2ad0062bea850ab73cffc0dba874db10d7a3682b6f2f280553f20",
"sha256:321d4d48be25b8d77594d8324c0585c80ae91ac214f62db9098734e5e7fb280f",
"sha256:346d6f84ff0b493dbc90c6b77136df83e81f903f0b95525ee80e5e6d5e4eef84",
"sha256:34dd60b90b0f6de94a89e71fcd19913a30e83091c8468d0923a93a0cccbfbbff",
"sha256:3b4c23300c4eded8895442c003ae9b14328ae69309ac5867e7530de8bdd7875d",
"sha256:43d1960e7db14042319c46925892d5fa99b08ff21d57482e6f5328a1aca03588",
"sha256:49e96267cd9be55a349fd74f9852eb9ae2c427cd7f6455d0f1765d7332292832",
"sha256:63e06ffdaf4054a89757a3a1ab07f1b922daf911743114a54f7c561b9e1baa58",
"sha256:67ed1a374c9155ec0840214ce804616de49c3df9c5bc66740687c1c9b1cd9e8d",
"sha256:6b662a5669186439f4f583636c8d6ea77cf92f7cfe6aae8d22edf16c36840574",
"sha256:6efd9ca20aefbaf2e7e6817a2c6ed4a50ff6900fafdea1bcb1d0e9471743b144",
"sha256:8569844a5d8e1fdde4d7712a05ab2e6061343ac34af6e7e3d7935b2bd1907bfd",
"sha256:8629ea6a8a59f865add1d6a87464c3c676e60101b8d16ef404d0a031424a8491",
"sha256:988cac675e25133d01a78f2286189c1f01974470817a33eaf4cfee573cfb72a5",
"sha256:9a6fedda73aba1568962f7543a1f586051c54febbc74e87769bad6a4b8587c39",
"sha256:9eced8962ce3b7124fe20fd358cf8c7470706437fa064b9874f849ad4c5866fc",
"sha256:a005ed6163490988711ff732386b08effcbf8df62ae93dd1e5bda0714fad8afb",
"sha256:ae35dbcb6b011af6c840893b32399252d81ff57d52c13e12422e16b5fea1d0fb",
"sha256:b1e8491c6740f21b37cca77bc64677696a3fb9f32360794d57fa8477b7329eda",
"sha256:c906bdb482162e9ef48eea9f8c0d967acceb5c84f2d25574c7d2a58d04861df1",
"sha256:cb18ffdc861dbb244f14be32c47ab69604d0aca415bee53485fcea4f8e93d5ef",
"sha256:cc2f24dc1c6c88c56248e93f28d439ee4018338567b0bbb490ea26a381a29b1e",
"sha256:d860c7fff18d49e20339fc6dffc2d485635e36d4b2cccf58f45db815b64100b4",
"sha256:d86da365dda59010ba0d1ac45aa78390f56bf7f992e65f70b3b081d5e5257b09",
"sha256:e22f0997622e1ceec834fd25947dc2ee2962c2133ea693d61805bc867abaf7ea",
"sha256:f2fe545d27a619a552396533cddf70d83cecd880a611cdfdbb87ca6aec52f66b",
"sha256:f425e925485b3be48051f913dbe17e08e8c48588fdf44a26b8b14067041c0da6",
"sha256:f7fd3ed3745fe6e81e28dc3b3d76cce31525a91f32a387e1febd6b982caf8cdb",
"sha256:f9210820ee4818d84658ed7df16a7f30c9fba7d8b139959950acef91745cc0f7"
],
"version": "==3.1.4"
},
"cffi": {
"hashes": [
"sha256:151b7eefd035c56b2b2e1eb9963c90c6302dc15fbd8c1c0a83a163ff2c7d7743",
"sha256:1553d1e99f035ace1c0544050622b7bc963374a00c467edafac50ad7bd276aef",
"sha256:1b0493c091a1898f1136e3f4f991a784437fac3673780ff9de3bcf46c80b6b50",
"sha256:2ba8a45822b7aee805ab49abfe7eec16b90587f7f26df20c71dd89e45a97076f",
"sha256:3bb6bd7266598f318063e584378b8e27c67de998a43362e8fce664c54ee52d30",
"sha256:3c85641778460581c42924384f5e68076d724ceac0f267d66c757f7535069c93",
"sha256:3eb6434197633b7748cea30bf0ba9f66727cdce45117a712b29a443943733257",
"sha256:495c5c2d43bf6cebe0178eb3e88f9c4aa48d8934aa6e3cddb865c058da76756b",
"sha256:4c91af6e967c2015729d3e69c2e51d92f9898c330d6a851bf8f121236f3defd3",
"sha256:57b2533356cb2d8fac1555815929f7f5f14d68ac77b085d2326b571310f34f6e",
"sha256:770f3782b31f50b68627e22f91cb182c48c47c02eb405fd689472aa7b7aa16dc",
"sha256:79f9b6f7c46ae1f8ded75f68cf8ad50e5729ed4d590c74840471fc2823457d04",
"sha256:7a33145e04d44ce95bcd71e522b478d282ad0eafaf34fe1ec5bbd73e662f22b6",
"sha256:857959354ae3a6fa3da6651b966d13b0a8bed6bbc87a0de7b38a549db1d2a359",
"sha256:87f37fe5130574ff76c17cab61e7d2538a16f843bb7bca8ebbc4b12de3078596",
"sha256:95d5251e4b5ca00061f9d9f3d6fe537247e145a8524ae9fd30a2f8fbce993b5b",
"sha256:9d1d3e63a4afdc29bd76ce6aa9d58c771cd1599fbba8cf5057e7860b203710dd",
"sha256:a36c5c154f9d42ec176e6e620cb0dd275744aa1d804786a71ac37dc3661a5e95",
"sha256:a6a5cb8809091ec9ac03edde9304b3ad82ad4466333432b16d78ef40e0cce0d5",
"sha256:ae5e35a2c189d397b91034642cb0eab0e346f776ec2eb44a49a459e6615d6e2e",
"sha256:b0f7d4a3df8f06cf49f9f121bead236e328074de6449866515cea4907bbc63d6",
"sha256:b75110fb114fa366b29a027d0c9be3709579602ae111ff61674d28c93606acca",
"sha256:ba5e697569f84b13640c9e193170e89c13c6244c24400fc57e88724ef610cd31",
"sha256:be2a9b390f77fd7676d80bc3cdc4f8edb940d8c198ed2d8c0be1319018c778e1",
"sha256:ca1bd81f40adc59011f58159e4aa6445fc585a32bb8ac9badf7a2c1aa23822f2",
"sha256:d5d8555d9bfc3f02385c1c37e9f998e2011f0db4f90e250e5bc0c0a85a813085",
"sha256:e55e22ac0a30023426564b1059b035973ec82186ddddbac867078435801c7801",
"sha256:e90f17980e6ab0f3c2f3730e56d1fe9bcba1891eeea58966e89d352492cc74f4",
"sha256:ecbb7b01409e9b782df5ded849c178a0aa7c906cf8c5a67368047daab282b184",
"sha256:ed01918d545a38998bfa5902c7c00e0fee90e957ce036a4000a88e3fe2264917",
"sha256:edabd457cd23a02965166026fd9bfd196f4324fe6032e866d0f3bd0301cd486f",
"sha256:fdf1c1dc5bafc32bc5d08b054f94d659422b05aba244d6be4ddc1c72d9aa70fb"
],
"version": "==1.11.5"
},
"cryptography": {
"hashes": [
"sha256:02602e1672b62e803e08617ec286041cc453e8d43f093a5f4162095506bc0beb",
"sha256:10b48e848e1edb93c1d3b797c83c72b4c387ab0eb4330aaa26da8049a6cbede0",
"sha256:17db09db9d7c5de130023657be42689d1a5f60502a14f6f745f6f65a6b8195c0",
"sha256:227da3a896df1106b1a69b1e319dce218fa04395e8cc78be7e31ca94c21254bc",
"sha256:2cbaa03ac677db6c821dac3f4cdfd1461a32d0615847eedbb0df54bb7802e1f7",
"sha256:31db8febfc768e4b4bd826750a70c79c99ea423f4697d1dab764eb9f9f849519",
"sha256:4a510d268e55e2e067715d728e4ca6cd26a8e9f1f3d174faf88e6f2cb6b6c395",
"sha256:6a88d9004310a198c474d8a822ee96a6dd6c01efe66facdf17cb692512ae5bc0",
"sha256:76936ec70a9b72eb8c58314c38c55a0336a2b36de0c7ee8fb874a4547cadbd39",
"sha256:7e3b4aecc4040928efa8a7cdaf074e868af32c58ffc9bb77e7bf2c1a16783286",
"sha256:8168bcb08403ef144ff1fb880d416f49e2728101d02aaadfe9645883222c0aa5",
"sha256:8229ceb79a1792823d87779959184a1bf95768e9248c93ae9f97c7a2f60376a1",
"sha256:8a19e9f2fe69f6a44a5c156968d9fc8df56d09798d0c6a34ccc373bb186cee86",
"sha256:8d10113ca826a4c29d5b85b2c4e045ffa8bad74fb525ee0eceb1d38d4c70dfd6",
"sha256:be495b8ec5a939a7605274b6e59fbc35e76f5ad814ae010eb679529671c9e119",
"sha256:dc2d3f3b1548f4d11786616cf0f4415e25b0fbecb8a1d2cd8c07568f13fdde38",
"sha256:e4aecdd9d5a3d06c337894c9a6e2961898d3f64fe54ca920a72234a3de0f9cb3",
"sha256:e79ab4485b99eacb2166f3212218dd858258f374855e1568f728462b0e6ee0d9",
"sha256:f995d3667301e1754c57b04e0bae6f0fa9d710697a9f8d6712e8cca02550910f"
],
"version": "==2.3.1"
},
"enum34": {
"hashes": [
"sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850",
"sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a",
"sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79",
"sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1"
],
"markers": "python_version < '3'",
"version": "==1.1.6"
},
"idna": {
"hashes": [
"sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e",
"sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16"
],
"version": "==2.7"
},
"ipaddress": {
"hashes": [
"sha256:64b28eec5e78e7510698f6d4da08800a5c575caa4a286c93d651c5d3ff7b6794",
"sha256:b146c751ea45cad6188dd6cf2d9b757f6f4f8d6ffb96a023e6f2e26eea02a72c"
],
"markers": "python_version < '3'",
"version": "==1.0.22"
},
"jinja2": {
"hashes": [
"sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd",
"sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4"
],
"version": "==2.10"
},
"markupsafe": {
"hashes": [
"sha256:a6be69091dac236ea9c6bc7d012beab42010fa914c459791d627dad4910eb665"
],
"version": "==1.0"
},
"paramiko": {
"hashes": [
"sha256:3c16b2bfb4c0d810b24c40155dbfd113c0521e7e6ee593d704e84b4c658a1f3b",
"sha256:a8975a7df3560c9f1e2b43dc54ebd40fd00a7017392ca5445ce7df409f900fcb"
],
"version": "==2.4.2"
},
"pyasn1": {
"hashes": [
"sha256:b9d3abc5031e61927c82d4d96c1cec1e55676c1a991623cfed28faea73cdd7ca",
"sha256:f58f2a3d12fd754aa123e9fa74fb7345333000a035f3921dbdaa08597aa53137"
],
"version": "==0.4.4"
},
"pycparser": {
"hashes": [
"sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3"
],
"version": "==2.19"
},
"pynacl": {
"hashes": [
"sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255",
"sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c",
"sha256:0d0a8171a68edf51add1e73d2159c4bc19fc0718e79dec51166e940856c2f28e",
"sha256:1c780712b206317a746ace34c209b8c29dbfd841dfbc02aa27f2084dd3db77ae",
"sha256:2424c8b9f41aa65bbdbd7a64e73a7450ebb4aa9ddedc6a081e7afcc4c97f7621",
"sha256:2d23c04e8d709444220557ae48ed01f3f1086439f12dbf11976e849a4926db56",
"sha256:30f36a9c70450c7878053fa1344aca0145fd47d845270b43a7ee9192a051bf39",
"sha256:37aa336a317209f1bb099ad177fef0da45be36a2aa664507c5d72015f956c310",
"sha256:4943decfc5b905748f0756fdd99d4f9498d7064815c4cf3643820c9028b711d1",
"sha256:57ef38a65056e7800859e5ba9e6091053cd06e1038983016effaffe0efcd594a",
"sha256:5bd61e9b44c543016ce1f6aef48606280e45f892a928ca7068fba30021e9b786",
"sha256:6482d3017a0c0327a49dddc8bd1074cc730d45db2ccb09c3bac1f8f32d1eb61b",
"sha256:7d3ce02c0784b7cbcc771a2da6ea51f87e8716004512493a2b69016326301c3b",
"sha256:a14e499c0f5955dcc3991f785f3f8e2130ed504fa3a7f44009ff458ad6bdd17f",
"sha256:a39f54ccbcd2757d1d63b0ec00a00980c0b382c62865b61a505163943624ab20",
"sha256:aabb0c5232910a20eec8563503c153a8e78bbf5459490c49ab31f6adf3f3a415",
"sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715",
"sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1",
"sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0"
],
"version": "==1.3.0"
},
"pyyaml": {
"hashes": [
"sha256:3d7da3009c0f3e783b2c873687652d83b1bbfd5c88e9813fb7e5b03c0dd3108b",
"sha256:3ef3092145e9b70e3ddd2c7ad59bdd0252a94dfe3949721633e41344de00a6bf",
"sha256:40c71b8e076d0550b2e6380bada1f1cd1017b882f7e16f09a65be98e017f211a",
"sha256:558dd60b890ba8fd982e05941927a3911dc409a63dcb8b634feaa0cda69330d3",
"sha256:a7c28b45d9f99102fa092bb213aa12e0aaf9a6a1f5e395d36166639c1f96c3a1",
"sha256:aa7dd4a6a427aed7df6fb7f08a580d68d9b118d90310374716ae90b710280af1",
"sha256:bc558586e6045763782014934bfaf39d48b8ae85a2713117d16c39864085c613",
"sha256:d46d7982b62e0729ad0175a9bc7e10a566fc07b224d2c79fafb5e032727eaa04",
"sha256:d5eef459e30b09f5a098b9cea68bebfeb268697f78d647bd255a085371ac7f3f",
"sha256:e01d3203230e1786cd91ccfdc8f8454c8069c91bee3962ad93b87a4b2860f537",
"sha256:e170a9e6fcfd19021dd29845af83bb79236068bf5fd4df3327c1be18182b2531"
],
"version": "==3.13"
},
"six": {
"hashes": [
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9",
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"
],
"version": "==1.11.0"
}
},
"develop": {}
}

View File

@@ -15,9 +15,10 @@ This is an Ansible playbook that configures a Kubernetes cluster and deploys a f
3. Pipenv
4. Docker
5. SSH access to all nodes you're deploying to.
6. A standard user with sudo access to all nodes
### Requirements for infrastructure (the machines you're deploying to)
1. Ubuntu
1. Ubuntu or Debian
2. Internet access
### Steps
@@ -38,12 +39,7 @@ You can easily run a lab environment with Vagrant.
2. Install [Vagrant](https://www.vagrantup.com/)
3. Run `vagrant up`
4. Run `vagrant ssh client -c 'bash /vagrant/tests/files/run-test-deploy.sh'`
5. Put the following in your `/etc/hosts` file:
```
192.168.254.2 kanban.test.com
192.168.254.2 mysql-orchestrator.test.com
```
6. Now navigate to any of the services at http://servicename.test.com
5. Now navigate to any of the services at http://servicename.192.168.254.3.xip.io (for example: http://ceph.192.168.254.3.xip.io)
# Development

55
Vagrantfile vendored
View File

@@ -1,36 +1,27 @@
machines = {
'client' => { :ip =>'192.168.254.4', :memory => '512', :cpus => 1, :client => true},
'master1' => { :ip => '192.168.254.2', :memory => '1536', :cpus => 2 },
'worker1' => { :ip => '192.168.254.3', :memory => '1536', :cpus => 2 },
'worker2' => { :ip => '192.168.254.5', :memory => '1536', :cpus => 2 },
'worker3' => { :ip => '192.168.254.6', :memory => '1536', :cpus => 2 },
# 'worker4' => { :ip => '192.168.254.7', :memory => '1536', :cpus => 1 },
}
Vagrant.configure("2") do |config|
config.vm.define "master1" do |bootstrap|
bootstrap.vm.box = "debian/stretch64"
bootstrap.vm.hostname = "bootstrap"
bootstrap.vm.network "private_network", ip: "192.168.254.2"
bootstrap.vm.provision :shell, path: "tests/files/provision-script.sh"
bootstrap.vm.provider "virtualbox" do |v|
v.memory = 2048
v.cpus = 2
end
end
config.vm.define "worker1" do |worker1|
worker1.vm.box = "debian/stretch64"
worker1.vm.hostname = "worker1"
worker1.vm.network "private_network", ip: "192.168.254.3"
worker1.vm.provision :shell, path: "tests/files/provision-script.sh"
worker1.vm.provider "virtualbox" do |v|
v.memory = 2048
v.cpus = 2
end
end
config.vm.define "client" do |client|
client.vm.box = "debian/stretch64"
client.vm.hostname = "client"
client.vm.network "private_network", ip: "192.168.254.4"
client.vm.provision :shell, path: "tests/files/install-pip.sh"
client.vm.provision :shell, path: "tests/files/provision-script.sh"
client.vm.provider "virtualbox" do |v|
v.memory = 512
v.cpus = 1
machines.each do | hostname, attrs|
config.vm.define hostname do |machine|
machine.vm.hostname = hostname
machine.vm.box = "debian/stretch64"
machine.vm.network :private_network, :ip => attrs[:ip], netmask: "255.255.255.0"
machine.vm.provider "virtualbox" do |v|
v.memory = attrs[:memory]
v.cpus = attrs[:cpus]
end
machine.vm.provision :shell, path: "tests/files/provision-script.sh"
if attrs[:client] == true
machine.vm.provision :shell, path: "tests/files/install-pip.sh"
end
end
end
end

View File

@@ -1,19 +1,56 @@
---
# Variables listed here are applicable to all host groups
### Software versions
docker_ce_version_to_install: 18.03.1
### rke variables
rke_directory: /root/rke
rke_ssh_key_location: /root/id_rsa
rke_node_directory: /opt/rke
rke_version: 0.3.1
rke_cluster_name: rke-k8s
### User stuff
standard_user: vagrant
################################ REQUIRED ################################
################################ User stuff
standard_user: ubuntu
chosen_timezone: "America/New_York"
# root domain for all services. You should have an A record for *.root_domain. For example, if your domain is test.com you should have an A record for *.test.com pointing to your node.
# this will allow automatic dns for for things like dokuwiki.test.com and portainer.test.com
root_domain: test.com
root_domain: test.com
# the directory on your localhost to store all deployment yaml for apps deployed
# this directory will be created but the standard_user will need permissions to write to create the directory
startup_infrastructure_directory: /home/{{ standard_user }}/startup-infrastructure
# the location on your localhost of the ssh key to log into all of your nodes
rke_ssh_key_location: /home/{{ standard_user }}/.ssh/id_rsa
################################ STORAGE CONFIG
# Only enable 1 of these
# 1 is required
# use True for the one you'd like to enable
# if you do not want to use cloud native block storage like ebs, cinder, etc then use this. Rook creates a local ceph cluster on your Kubernetes nodes.
#rook_enabled: False
# if you are not a fan of ceph, you can use Longhorn for block storage in your Kubernetes cluster.
#longhorn_enabled: False
################################ MYSQL CONFIG
mysql_root_password: password
mysql_replicas: 3
mysql_cluster_name: mysql-cluster
################################ OPTIONAL ################################
################################ Startup Infrastructure
################################ RKE (Kubernetes deployment) variables
# the directory on your localhost to download and deploy rke
#rke_directory: /home/{{ standard_user }}/rke
# the directory on all nodes where the rke statefile and kubeconfig will go
#rke_node_directory: /opt/rke
#rke_version: 1.0.8
#rke_cluster_name: rke-k8s
#kubernetes_version: 1.15.11
# if your kubernetes network will run over an interface that is not the default interface in the OS then set this value
#kubernetes_network_interface: eth1
################################ Software versions
#docker_ce_version_to_install: 19.03.5

View File

@@ -1,19 +1,7 @@
---
- name: Pre rke
- name: Deploy Kubernetes
hosts: kube-masters kube-workers
gather_facts: yes
serial: 100%
tasks:
- include: ../roles/kubernetes/tasks/pre-rke.yml
- name: Set up Kubernetes
hosts: localhost
tasks:
- include: ../roles/kubernetes/tasks/main.yml
- name: Post rke
hosts: kube-masters kube-workers
gather_facts: yes
serial: 100%
tasks:
- include: ../roles/kubernetes/tasks/post-rke.yml
roles:
- role: ../roles/kubernetes

View File

@@ -14,15 +14,19 @@
- name: Apply common configuration to all nodes
hosts: all
user: root
serial: 100%
tasks:
- include: ../roles/common/tasks/main.yml
roles:
- role: ../roles/common
- import_playbook: kubernetes.yml
- name: Deploy startup-infrastructure to kubernetes
hosts: localhost
connection: local
tasks:
- include: ../roles/startup-infrastructure/tasks/main.yml
roles:
- role: ../roles/startup-infrastructure
- role: ../roles/rook-ceph
when: rook_enabled is defined and rook_enabled | bool == True
- role: ../roles/longhorn
when: longhorn_enabled is defined and longhorn_enabled | bool == True
- role: ../roles/presslabs-mysql

View File

@@ -1 +0,0 @@
ansible==2.4.0.0

View File

@@ -0,0 +1,2 @@
---
docker_ce_version_to_install: 19.03.8

View File

@@ -7,6 +7,7 @@
# apt: upgrade=dist
- name: Update apt
apt: update_cache=yes
become: true
- name: Install standard programs
apt: name={{ item }} state=present force=yes
@@ -31,37 +32,50 @@
- build-essential
- tmux
- sudo
become: true
- name: Install required packages for Longhorn
apt: name={{ item }} state=present force=yes
with_items:
- open-iscsi
- curl
become: true
when: longhorn_enabled is defined and longhorn_enabled | bool == True
- name: Add docker key
apt_key:
url: https://download.docker.com/linux/{{ ansible_distribution|lower }}/gpg
state: present
become: true
- name: Add docker repo
apt_repository:
repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution|lower }} {{ ansible_distribution_release }} stable
state: present
become: true
- name: Update apt
apt: update_cache=yes
become: true
- name: Install docker-ce
shell: >
apt-get install -y -qq docker-ce=$(apt-cache madison docker-ce | grep "{{ docker_ce_version_to_install }}" | awk {'print $3'})
become: true
- name: Add standard_user to docker group
user:
name: "{{ standard_user }}"
groups: docker
append: yes
become: true
- name: Set timezone to NewYork
timezone:
name: "{{ chosen_timezone }}"
ignore_errors: true
- name: Replace sudoers file
template: src=../roles/common/templates/sudoers.j2 dest=/etc/sudoers
become: true
- name: Create /etc/docker
file:
@@ -70,16 +84,16 @@
owner: root
mode: 700
state: directory
become: true
- name: Replace docker daemon file
template: src=../roles/common/templates/docker-daemon.json.j2 dest=/etc/docker/daemon.json
register: dockerdaemon
become: true
- name: Restart docker if daemon changes
service:
name: docker
state: restarted
when: dockerdaemon.changed
- name: Creates directory
file: path=/data state=directory
become: true

View File

@@ -1,30 +0,0 @@
#
# This file MUST be edited with the 'visudo' command as root.
#
# Please consider adding local content in /etc/sudoers.d/ instead of
# directly modifying this file.
#
# See the man page for details on how to write a sudoers file.
#
Defaults env_reset
Defaults mail_badpass
Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# Host alias specification
# User alias specification
# Cmnd alias specification
# User privilege specification
root ALL=(ALL:ALL) ALL
# Members of the admin group may gain root privileges
%admin ALL=(ALL) ALL
# Allow members of group sudo to execute any command
%sudo ALL=(ALL) NOPASSWD:ALL
# See sudoers(5) for more information on "#include" directives:
#includedir /etc/sudoers.d

View File

@@ -0,0 +1,6 @@
---
rke_directory: /home/{{ standard_user }}/rke
rke_node_directory: /opt/rke
rke_version: 1.0.8
rke_cluster_name: rke-k8s
kubernetes_version: 1.15.11

View File

@@ -0,0 +1,70 @@
---
- name: Create RKE directory
file:
path: "{{ rke_directory }}"
state: directory
mode: '0774'
owner: "{{ standard_user }}"
group: "{{ standard_user }}"
delegate_to: localhost
run_once: true
- name: Create RKE Configs directory
file:
path: "{{ rke_directory }}/configs"
state: directory
delegate_to: localhost
run_once: true
- name: Install RKE
get_url:
dest: "{{ rke_directory }}/rke"
url: https://github.com/rancher/rke/releases/download/v{{ rke_version }}/rke_linux-amd64
delegate_to: localhost
run_once: true
- name: Make RKE executable
file:
dest: "{{ rke_directory }}/rke"
mode: +x
delegate_to: localhost
run_once: true
- name: Put RKE cluster config in place
template:
src: ../templates/rke-cluster-deployment.yaml
dest: "{{ rke_directory }}/{{ rke_cluster_name }}.yaml"
delegate_to: localhost
run_once: true
- name: Put RKE configs in place
template:
src: ../templates/rke-configs/{{ item }}.j2
dest: "{{ rke_directory }}/configs/{{ item }}"
with_items:
- kube-state-metrics-deployment.yaml
- kube-state-metrics-service.yaml
- kube-state-metrics-rbac.yaml
delegate_to: localhost
run_once: true
- name: Run RKE
shell: >
bash -c "{{ rke_directory }}/rke up --config {{ rke_directory }}/{{ rke_cluster_name }}.yaml"
delegate_to: localhost
run_once: true
retries: 5
delay: 5
register: rke_install
until: rke_install.rc == 0
- name: Set permissions on rke directory
file:
path: "{{ rke_directory }}"
state: directory
mode: '0774'
owner: "{{ standard_user }}"
group: "{{ standard_user }}"
recurse: yes
delegate_to: localhost
run_once: true

View File

@@ -1,74 +1,9 @@
---
- name: Create RKE directory
file:
path: "{{ rke_directory }}"
state: directory
mode: '0774'
owner: "{{ standard_user }}"
group: "{{ standard_user }}"
delegate_to: localhost
run_once: true
become: true
- name: RKE Pre Tasks
import_tasks: pre-rke.yml
- name: Create RKE Configs directory
file:
path: "{{ rke_directory }}/configs"
state: directory
delegate_to: localhost
run_once: true
become: true
- name: RKE Deploy Kubernetes
import_tasks: deploy-rke.yml
- name: Install RKE
get_url:
dest: "{{ rke_directory }}/rke"
url: https://github.com/rancher/rke/releases/download/v{{ rke_version }}/rke_linux-amd64
delegate_to: localhost
run_once: true
become: true
- name: Make RKE executable
file:
dest: "{{ rke_directory }}/rke"
mode: +x
delegate_to: localhost
run_once: true
become: true
- name: Put RKE cluster config in place
template:
src: ../templates/rke-cluster-deployment.yaml.j2
dest: "{{ rke_directory }}/{{ rke_cluster_name }}.yaml"
delegate_to: localhost
run_once: true
become: true
- name: Put RKE configs in place
template:
src: ../templates/rke-configs/{{ item }}.j2
dest: "{{ rke_directory }}/configs/{{ item }}"
with_items:
- kube-state-metrics-deployment.yaml
- kube-state-metrics-service.yaml
- kube-state-metrics-rbac.yaml
delegate_to: localhost
run_once: true
become: true
- name: Run RKE
shell: >
bash -c "{{ rke_directory }}/rke up --config {{ rke_directory }}/{{ rke_cluster_name }}.yaml"
delegate_to: localhost
run_once: true
become: true
- name: Set permissions on rke directory
file:
path: "{{ rke_directory }}"
state: directory
mode: '0774'
owner: "{{ standard_user }}"
group: "{{ standard_user }}"
recurse: yes
delegate_to: localhost
run_once: true
become: true
- name: RKE Post Tasks
import_tasks: post-rke.yml

View File

@@ -9,4 +9,18 @@
copy:
src: "{{ rke_directory }}/{{ rke_cluster_name }}.rkestate"
dest: "{{ rke_node_directory }}/{{ rke_cluster_name }}.rkestate"
become: true
become: true
- name: Create .kube folder in standard users home directory
file:
path: /home/{{ standard_user }}/.kube
state: directory
become: true
delegate_to: localhost
- name: Copy kube config to standard users home directory
copy:
src: "{{ rke_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
dest: /home/{{ standard_user }}/.kube/config
become: true
delegate_to: localhost

View File

@@ -41,6 +41,36 @@
group: "{{ standard_user }}"
become: true
- name: Create flex volume folder
file:
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
state: directory
become: true
- name: Create Rook storage Directory
file:
path: /var/lib/rook
state: directory
mode: '0774'
become: true
when: rook_enabled is defined and rook_enabled | bool == True
- name: Create Longhorn Storage Directory
file:
path: /var/lib/rancher/longhorn
state: directory
mode: '0774'
become: true
when: longhorn_enabled is defined and longhorn_enabled | bool == True
- name: Create Longhorn Setting Directory
file:
path: /var/lib/longhorn-setting
state: directory
mode: '0774'
become: true
when: longhorn_enabled is defined and longhorn_enabled | bool == True
- name: Check if RKE cluster state file exists
stat:
path: "{{ rke_node_directory }}/{{ rke_cluster_name }}.rkestate"

View File

@@ -0,0 +1,50 @@
---
ssh_key_path: {{ rke_ssh_key_location }}
cluster_name: {{ rke_cluster_name }}
ignore_docker_version: true
system_images:
kubernetes: rancher/hyperkube:v{{ kubernetes_version }}-rancher1
{% if (rook_enabled is defined and rook_enabled | bool == True) or (longhorn_enabled is defined and longhorn_enabled | bool == True) %}
services:
kubelet:
extra_args:
volume-plugin-dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
extra_binds:
- /usr/libexec/kubernetes/kubelet-plugins/volume/exec:/usr/libexec/kubernetes/kubelet-plugins/volume/exec
{% endif %}
network:
plugin: canal
{% if (kubernetes_network_interface is defined) %}
options:
canal_iface: {{ kubernetes_network_interface }}
{% endif %}
nodes:
{% for node in groups['kube-masters'] %}
- address: {{node}}
name: {{node}}
user: {{standard_user}}
role:
- controlplane
- etcd
{% endfor %}
{% for node in groups['kube-workers'] %}
- address: {{node}}
name: {{node}}
user: {{standard_user}}
role:
- worker
{% endfor %}
authentication:
strategy: x509
sans:
- "kubernetes.{{ root_domain }}"

View File

@@ -1,32 +0,0 @@
---
ssh_key_path: {{ rke_ssh_key_location }}
cluster_name: rke_cluster_name
ignore_docker_version: true
system_images:
kubernetes: rancher/hyperkube:v1.15.5-rancher1
nodes:
{% for node in groups['kube-masters'] %}
- address: {{node}}
name: {{node}}
user: {{standard_user}}
role:
- controlplane
- etcd
{% endfor %}
{% for node in groups['kube-workers'] %}
- address: {{node}}
name: {{node}}
user: {{standard_user}}
role:
- worker
{% endfor %}
authentication:
strategy: x509
sans:
- "kubernetes.{{ root_domain }}"

View File

@@ -0,0 +1,2 @@
---
longhorn_version: v0.8.1

View File

@@ -0,0 +1,27 @@
---
- name: Create Longhorn config Directory
file:
path: "{{ startup_infrastructure_directory }}/longhorn"
state: directory
- name: Get Longhorn Install
get_url:
url: https://raw.githubusercontent.com/longhorn/longhorn/{{ longhorn_version }}/deploy/longhorn.yaml
dest: "{{ startup_infrastructure_directory }}/longhorn/longhorn.yaml"
- name: Put other Longhorn yaml in place
template:
src: ../templates/{{ item }}.j2
dest: "{{ startup_infrastructure_directory }}/longhorn/{{ item }}"
with_items:
- longhorn-ingress.yaml
- name: Install Longhorn
command: kubectl apply -f {{ startup_infrastructure_directory }}/longhorn/longhorn.yaml
- name: Install Longhorn Extras
command: kubectl apply -f {{ startup_infrastructure_directory }}/longhorn/
- name: Set default storageclass
command: >
kubectl patch storageclass longhorn -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

View File

@@ -0,0 +1,22 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: longhorn-ingress
namespace: longhorn-system
annotations:
# cert-manager.io/cluster-issuer: "letsencrypt-prod"
kubernetes.io/ingress.class: "nginx"
# kubernetes.io/tls-acme: "true"
spec:
rules:
- host: longhorn.{{ root_domain }}
http:
paths:
- path: /
backend:
serviceName: longhorn-frontend
servicePort: 80
#tls:
# - hosts:
# - longhorn.{{ root_domain }}
# secretName: longhorn-ssl

View File

@@ -0,0 +1,4 @@
---
mysql_root_password: password
mysql_replicas: 3
mysql_cluster_name: mysql-cluster

View File

@@ -0,0 +1,45 @@
---
- name: Create Mysql Directory
file:
path: "{{ startup_infrastructure_directory }}/presslabs-mysql-operator"
state: directory
- name: Add presslabs helm repo
command: helm repo add presslabs https://presslabs.github.io/charts
- name: Install the presslabs mysql operator
command: helm install presslabs/mysql-operator --name presslabs-mysql-operator
- name: Wait for mysql-operator to be Running
command: >
bash -c "kubectl get pods --all-namespaces | grep presslabs-mysql-operator | grep Running"
retries: 10
delay: 30
register: mysql_operator_init
until: mysql_operator_init.rc == 0
- name: Put mysql cluster yaml in place
template:
src: presslabs-mysql-cluster.yaml.j2
dest: "{{ startup_infrastructure_directory }}/presslabs-mysql-operator/presslabs-mysql-cluster.yaml"
- name: Deploy mysql cluster
command: >
kubectl apply -f {{ startup_infrastructure_directory }}/presslabs-mysql-operator/presslabs-mysql-cluster.yaml
- name: Wait for cluster to be Running
command: >
bash -c "kubectl get mysql | grep {{ mysql_cluster_name }} | grep True"
retries: 10
delay: 60
register: mysql_cluster_init
until: mysql_cluster_init.rc == 0
- name: Test a query on the cluster
command: >
kubectl run mysql-client --image=mysql:5.7 -it --rm --restart=Never \
-- mysql -h {{ mysql_cluster_name }}-mysql -u root -p{{ mysql_root_password }} -e "show databases; show slave status;"
retries: 10
delay: 20
register: mysql_cluster_query
until: mysql_cluster_query.rc == 0

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ mysql_cluster_name }}-secret
type: Opaque
# use data if password is base64 encoded
#data:
# ROOT_PASSWORD: {{ mysql_root_password }}
stringData:
ROOT_PASSWORD: {{ mysql_root_password }}
---
apiVersion: mysql.presslabs.org/v1alpha1
kind: MysqlCluster
metadata:
name: {{ mysql_cluster_name }}
spec:
replicas: {{ mysql_replicas }}
secretName: {{ mysql_cluster_name }}-secret

View File

@@ -1,13 +1,12 @@
---
- name: Create Startup Infrastructure Directory
file:
path: /opt/startup-infrastructure
path: "{{ startup_infrastructure_directory }}"
state: directory
become: true
- name: Download Helm
get_url:
url: https://get.helm.sh/helm-v2.14.3-linux-amd64.tar.gz
url: https://get.helm.sh/helm-v2.16.6-linux-amd64.tar.gz
dest: /tmp/helm.tar.gz
become: true
@@ -42,21 +41,29 @@
- name: Create Tiller serviceaccount
command: kubectl -n kube-system create serviceaccount tiller
become: true
environment:
KUBECONFIG: "{{ rke_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
ignore_errors: true
- name: Create Tiller rbac
become: true
command: >
kubectl create clusterrolebinding tiller \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:tiller
environment:
KUBECONFIG: "{{ rke_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
- name: Initialize Tiller
command: helm init --service-account tiller
become: true
environment:
KUBECONFIG: "{{ rke_directory }}/kube_config_{{ rke_cluster_name }}.yaml"
- name: Wait for Tiller to be running
command: >
bash -c "kubectl -n kube-system get pods | grep tiller | grep Running"
retries: 10
delay: 30
register: tiller_running
until: tiller_running.rc == 0
- name: Wait for Tiller to be initialized
command: >
bash -c "kubectl rollout status -w deployment/tiller-deploy --namespace=kube-system | grep "successfully rolled out""
retries: 10
delay: 30
register: tiller_init
until: tiller_init.rc == 0

View File

@@ -1,19 +1,18 @@
---
# Variables listed here are applicable to all host groups
### Software versions
docker_ce_version_to_install: 18.03.1
### rke variables
rke_directory: /opt/rke
rke_ssh_key_location: /vagrant/tests/files/test_rsa
rke_node_directory: /opt/rke
rke_version: 0.3.1
rke_cluster_name: rke-k8s
### User stuff
################################ REQUIRED ################################
################################ User stuff
standard_user: vagrant
chosen_timezone: "America/New_York"
# root domain for all services. You should have an A record for *.root_domain. For example, if your domain is test.com you should have an A record for *.test.com pointing to your node.
# this will allow automatic dns for for things like dokuwiki.test.com and portainer.test.com
root_domain: test.com
root_domain: 192.168.254.3.xip.io
rke_ssh_key_location: /home/vagrant/test_rsa
# the directory on your localhost to store all deployment yaml for apps deployed
startup_infrastructure_directory: /home/{{ standard_user }}/startup-infrastructure
################################ STORAGE CONFIG
longhorn_enabled: True
################################ OPTIONAL ################################
################################ RKE (Kubernetes deployment) variables
kubernetes_network_interface: eth1

View File

@@ -1,14 +1,18 @@
#!/bin/bash
mkdir -p /root/.ssh
# Putting test_rsa.pub into root and vagrant authorized keys
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYa9zstumlg7XkKoNrJMlIN/zteqMA9J4GjuZA7r0xfMPrz4CglxzYKd/BhBpwp/HhU+vSR6vBa15kRODHdPZ+T1oXzMXAmMT3R2ZJRqF280Hsx9sK0X+FZWM84e4a1zQUrxuWyWJ4kKIiaX6DBAmhy8zHNvQ0c4Nk1exfwRicojaze71qrexSas4FHWaI4usC/g3mMKfiML/QX0UWW/G+D8qrg3cK3zClG916XlY/p1h9SWantqz75ea33TtmDNW6iCraKSjVeDGfzhshJsmQ7+/Rr/L4/s7hdpwTqdjSlJTIi61eBxcpDfMWBmsHOMZgnsTZ3wrdYXo70k44moA7 vagrant@test" >> /home/vagrant/.ssh/authorized_keys
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYa9zstumlg7XkKoNrJMlIN/zteqMA9J4GjuZA7r0xfMPrz4CglxzYKd/BhBpwp/HhU+vSR6vBa15kRODHdPZ+T1oXzMXAmMT3R2ZJRqF280Hsx9sK0X+FZWM84e4a1zQUrxuWyWJ4kKIiaX6DBAmhy8zHNvQ0c4Nk1exfwRicojaze71qrexSas4FHWaI4usC/g3mMKfiML/QX0UWW/G+D8qrg3cK3zClG916XlY/p1h9SWantqz75ea33TtmDNW6iCraKSjVeDGfzhshJsmQ7+/Rr/L4/s7hdpwTqdjSlJTIi61eBxcpDfMWBmsHOMZgnsTZ3wrdYXo70k44moA7 vagrant@test" >> /root/.ssh/authorized_keys
# Setting A record
echo "192.168.254.2 swarm.test.com" >> /etc/hosts
echo "192.168.254.2 portainer.test.com" >> /etc/hosts
# echo "192.168.254.2 mysql-orchestrator.test.com" >> /etc/hosts
# echo "192.168.254.2 kanban.test.com" >> /etc/hosts
cp /vagrant/tests/files/test_rsa /home/vagrant/test_rsa
chmod 600 /home/vagrant/test_rsa
chown vagrant:vagrant /home/vagrant/test_rsa
chown vagrant:vagrant /home/vagrant/test_rsa
apt-get update -qq
apt-get install -y -qq lvm2 curl

3
tests/lib/mysql-query.sh Normal file
View File

@@ -0,0 +1,3 @@
#!/bin/bash
kubectl run mysql-client --image=mysql:5.7 -it --rm --restart=Never \
-- mysql -h mysql-cluster-mysql -u root -ppassword -e "show databases; show slave status;"

View File

@@ -9,7 +9,7 @@ function testbash() {
local name="${1}"
shift
local command="${@}"
eval $command
eval "${command}"
local return=$?
if [[ ! $return -eq 0 ]]; then
echo "${red}FAILED: ${name}${reset}"

View File

@@ -1,4 +1,5 @@
#!/bin/bash
set -eEuo pipefail
project_dir="$(dirname $( dirname $(readlink -f ${BASH_SOURCE[0]})))"
source ${project_dir}/tests/lib/test-function.sh
@@ -24,11 +25,13 @@ function run-tests {
"vagrant ssh client -c 'bash /vagrant/tests/files/run-test-deploy.sh'"
testbash "Running kubectl should not fail" \
"vagrant ssh client -c 'export KUBECONFIG=/opt/rke/kube_config_rke-k8s.yaml; kubectl get nodes'"
"vagrant ssh client -c 'kubectl get nodes'"
# testbash "Portainer was deployed and admin account was initialized" \
# "vagrant ssh client -c 'curl --silent -I \
# -X GET \"http://portainer.test.com/api/users/admin/check\" -H \"accept: application/json\"' | grep 204"
testbash "Longhorn dashboard is running" \
"vagrant ssh client -c 'curl --silent -I -X GET http://longhorn.192.168.254.3.xip.io/dashboard' | grep '200 OK'"
testbash "Running a query on the mysql cluster should not fail" \
"vagrant ssh client -c 'bash /vagrant/tests/lib/mysql-query.sh'"
}
function destroy-infrastructure {