1
0
mirror of https://github.com/jcwimer/docker-swarm-autoscaler synced 2026-03-24 15:04:42 +00:00

First release with tests

This commit is contained in:
2019-12-18 13:35:37 -05:00
parent 4c4c972dbd
commit 0f2424221a
15 changed files with 797 additions and 44 deletions

View File

@@ -1,10 +1,13 @@
# docker-swarm-autoscaler
## Current Release: 0.1.0
This project is intended to bring auto service staling to Docker Swarm. This script uses prometheus paired with cadvisor metrics to determine cpu usage. It then uses a manager node to determine if a service wants to be autoscaled and uses a manager node to scale the service.
Currently the project only uses cpu to autoscale. If cpu usage reaches 85% the service will scale up, if it reaches 25% it will scale down.
## Usage
1. You can deploy prometheus, cadvisor, and docker-swarm-autoscaler by running `docker stack deploy -c swarm-autoscaler-stack.yml autoscaler`.
1. You can deploy prometheus, cadvisor, and docker-swarm-autoscaler by running `docker stack deploy -c swarm-autoscaler-stack.yml autoscaler` from the root of this repo.
* You can also utilize an already deploy prometheus and cadvisor by specifying the `PROMETHEUS_URL` in docker-swarm-autoscaler environment. `swarm-autoscaler-stack.yml` shows an example of this.
* docker-swarm-autoscale needs a placement contstraint to deploy to a manager. `swarm-autoscaler-stack.yml` shows an example of this.
2. For services you want to autoscale you will need a deploy label `swarm.autoscaler=true`.
@@ -25,7 +28,6 @@ deploy:
memory: 512M
limits:
cpus: '0.50'
```
## Configuration
@@ -34,3 +36,8 @@ deploy:
| `swarm.autoscaler` | `true` | Required. This enables autoscaling for a service. Anything other than `true` will not enable it |
| `swarm.autoscaler.minimum` | Integer | Optional. This is the minimum number of replicas wanted for a service. The autoscaler will not downscale below this number |
| `swarm.autoscaler.maximum` | Integer | Optional. This is the maximum number of replicas wanted for a service. The autoscaler will not scale up past this number |
## Test
You can deploy a test app with the following commands below. Helloworld is initially only 1 replica. The autoscaler will scale to the minimum 3 replicas.
1. `docker stack deploy -c swarm-autoscaler-stack.yml autoscaler`
2. `docker stack deploy -c helloworld.yml hello`

View File

@@ -7,11 +7,12 @@ RUN apt-get update -qq \
ca-certificates \
curl \
software-properties-common \
dnsutils \
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
&& add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable" \
&& apt-get update -qq \
&& apt-get install -y -qq \
docker-ce=18.03.0~ce-0~ubuntu \
docker-ce=5:19.03.5* \
&& apt-get -qq clean \
&& apt-get autoremove -y \
&& rm -rf \
@@ -22,5 +23,4 @@ RUN apt-get update -qq \
COPY auto-scale.sh /auto-scale.sh
RUN chmod a+x /auto-scale.sh
ENTRYPOINT ["/bin/bash"]
CMD ["/auto-scale.sh"]
CMD ["/auto-scale.sh"]

View File

@@ -1,36 +1,114 @@
#!/bin/bash
LOOP=${LOOP:='yes'}
CPU_PERCENTAGE_UPPER_LIMIT=85
CPU_PERCENTAGE_LOWER_LIMIT=25
PROMETHEUS_API="api/v1/query?query="
PROMETHEUS_QUERY="sum(rate(container_cpu_usage_seconds_total%7Bcontainer_label_com_docker_swarm_task_name%3D~%27.%2B%27%7D%5B5m%5D))BY(container_label_com_docker_swarm_service_name%2Cinstance)*100"
while ls > /dev/null; do
#scale up
for service in $(curl --silent "${PROMETHEUS_URL}/${PROMETHEUS_API}${PROMETHEUS_QUERY}>${CPU_PERCENTAGE_UPPER_LIMIT}" | jq ".data.result[].metric | .container_label_com_docker_swarm_service_name" | sort | uniq); do
service_name=$(echo $service | sed 's/\"//g')
auto_scale_label=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler"]')
replica_maximum=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler.maximum"]' | sed 's/\"//g')
if [[ "${auto_scale_label}" == "\"true\"" ]]; then
current_replicas=$(docker service inspect $service_name | jq ".[].Spec.Mode.Replicated | .Replicas")
new_replicas=$(expr $current_replicas + 1)
if [[ $replica_maximum -ge $new_replicas ]]; then
echo scale up $service_name to $new_replicas
docker service scale $service_name=$new_replicas
fi
fi
get_high_cpu_services () {
local prometheus_results="${1}"
local services=""
for service in $(printf "%s$prometheus_results" | jq ".data.result[] | select( all(.value[1]|tonumber; . > $CPU_PERCENTAGE_UPPER_LIMIT) ) | .metric.container_label_com_docker_swarm_service_name" | sed 's/"//g' | sort | uniq); do
services="$services $service"
done
echo $services
}
#scale down
for service in $(curl --silent "${PROMETHEUS_URL}/${PROMETHEUS_API}${PROMETHEUS_QUERY}<${CPU_PERCENTAGE_LOWER_LIMIT}" | jq ".data.result[].metric | .container_label_com_docker_swarm_service_name" | sort | uniq); do
service_name=$(echo $service | sed 's/\"//g')
auto_scale_label=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler"]')
replica_minimum=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler.minimum"]' | sed 's/\"//g')
if [[ "${auto_scale_label}" == "\"true\"" ]]; then
current_replicas=$(docker service inspect $service_name | jq ".[].Spec.Mode.Replicated | .Replicas")
new_replicas=$(expr $current_replicas - 1)
if [[ $replica_minimum -le $new_replicas ]]; then
echo scale down $service_name to $new_replicas
docker service scale $service_name=$new_replicas
fi
fi
get_all_services () {
local prometheus_results="${1}"
local services=""
for service in $(printf "%s$prometheus_results" | jq ".data.result[].metric.container_label_com_docker_swarm_service_name" | sed 's/"//g' | sort | uniq); do
services="$services $service"
done
done
echo $services
}
get_low_cpu_services () {
local prometheus_results="${1}"
local services=""
for service in $(printf "%s$prometheus_results" | jq ".data.result[] | select( all(.value[1]|tonumber; . < $CPU_PERCENTAGE_LOWER_LIMIT) ) | .metric.container_label_com_docker_swarm_service_name" | sed 's/"//g' | sort | uniq); do
services="$services $service"
done
echo $services
}
default_scale () {
service_name=$1
auto_scale_label=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler"]')
replica_minimum=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler.minimum"]' | sed 's/\"//g')
replica_maximum=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler.maximum"]' | sed 's/\"//g')
if [[ "${auto_scale_label}" == "\"true\"" ]]; then
echo Service $service has an autoscale label.
current_replicas=$(docker service inspect $service_name | jq ".[].Spec.Mode.Replicated | .Replicas")
if [[ $replica_minimum -gt $current_replicas ]]; then
echo Service $service_name is below the minimum. Scaling to the minimum of $replica_minimum
docker service scale $service_name=$replica_minimum
elif [[ $current_replicas -gt $replica_maximum ]]; then
echo Service $service_name is above the maximum. Scaling to the maximum of $replica_maximum
docker service scale $service_name=$replica_maximum
fi
else
echo Service $service does not have an autoscale label.
fi
}
scale_down () {
service_name=$1
auto_scale_label=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler"]')
replica_minimum=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler.minimum"]' | sed 's/\"//g')
if [[ "${auto_scale_label}" == "\"true\"" ]]; then
current_replicas=$(docker service inspect $service_name | jq ".[].Spec.Mode.Replicated | .Replicas")
new_replicas=$(expr $current_replicas - 1)
if [[ $replica_minimum -le $new_replicas ]]; then
echo Scaling down the service $service_name to $new_replicas
docker service scale $service_name=$new_replicas
elif [[ $current_replicas -eq $replica_minimum ]]; then
echo Service $service_name has the minumum number of replicas.
fi
fi
}
scale_up () {
service_name=$1
auto_scale_label=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler"]')
replica_maximum=$(docker service inspect $service_name | jq '.[].Spec.Labels["swarm.autoscaler.maximum"]' | sed 's/\"//g')
if [[ "${auto_scale_label}" == "\"true\"" ]]; then
current_replicas=$(docker service inspect $service_name | jq ".[].Spec.Mode.Replicated | .Replicas")
new_replicas=$(expr $current_replicas + 1)
if [[ $current_replicas -eq $replica_maximum ]]; then
echo Service $service already has the maximum of $replica_maximum replicas
elif [[ $replica_maximum -ge $new_replicas ]]; then
echo Scaling up the service $service_name to $new_replicas
docker service scale $service_name=$new_replicas
fi
fi
}
main () {
prometheus_initial_results=$(curl --silent "${PROMETHEUS_URL}/${PROMETHEUS_API}${PROMETHEUS_QUERY}" | jq .)
echo Prometheus results
echo $prometheus_initial_results
for service in $(get_all_services "${prometheus_initial_results}"); do
default_scale $service
done
echo Checking for high cpu services
for service in $(get_high_cpu_services "${prometheus_initial_results}"); do
echo Service $service is above $CPU_PERCENTAGE_UPPER_LIMIT percent cpu usage.
scale_up $service
done
echo Checking for low cpu services
for service in $(get_low_cpu_services "${prometheus_initial_results}"); do
echo Service $service is below $CPU_PERCENTAGE_LOWER_LIMIT percent cpu usage.
scale_down $service
done
}
main
while [[ $LOOP == 'yes' ]]; do
echo Waiting 60 seconds for the next test
sleep 60s
main
done

13
helloworld.yml Normal file
View File

@@ -0,0 +1,13 @@
version: '3.3'
services:
helloworld:
image: tutum/hello-world
ports:
- 8080:80
logging:
driver: json-file
deploy:
labels:
swarm.autoscaler: 'true'
swarm.autoscaler.maximum: '4'
swarm.autoscaler.minimum: '3'

View File

@@ -1,2 +0,0 @@
FROM prom/prometheus
COPY prometheus.yml /etc/prometheus/prometheus.yml

View File

@@ -1,11 +1,15 @@
version: "3"
version: "3.7"
networks:
autoscale:
configs:
prometheus_config:
file: ./prometheus.yml
services:
docker-swarm-autoscaler:
image: jcwimer/docker-swarm-autoscaler
image: jcwimer/docker-swarm-autoscaler:0.1.0
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
@@ -31,10 +35,11 @@ services:
- autoscale
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /:/rootfs
- /var/run:/var/run
- /sys:/sys
- /var/lib/docker/:/var/lib/docker
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
deploy:
mode: global
resources:
@@ -46,16 +51,19 @@ services:
memory: 64M
prometheus:
image: jcwimer/prometheus-swarm-autoscaler
image: prom/prometheus:v2.12.0
networks:
- autoscale
command: --storage.tsdb.retention 1d --config.file=/etc/prometheus/prometheus.yml
command: ["--storage.tsdb.retention.size=1GB", "--config.file=/etc/prometheus/prometheus.yml", "--web.console.libraries=/etc/prometheus/console_libraries", "--web.console.templates=/etc/prometheus/consoles", "--web.enable-lifecycle"]
configs:
- source: prometheus_config
target: /etc/prometheus/prometheus.yml
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == worker
- node.role == manager
resources:
limits:
cpus: '0.50'

18
tests/Dockerfile Normal file
View File

@@ -0,0 +1,18 @@
FROM ruby:2.6.3
RUN apt-get update -qq && \
apt-get install -y -qq \
jq \
make \
gcc \
bash
RUN echo 'gem: --no-rdoc --no-ri' > /root/.gemrc
RUN gem install bundler
RUN gem update --system
RUN mkdir -p /gemfile
#Cache gems so they don't install on every code change
WORKDIR /gemfile
COPY Gemfile Gemfile
COPY Gemfile.lock Gemfile.lock
RUN bundle install --jobs 4

8
tests/Gemfile Normal file
View File

@@ -0,0 +1,8 @@
source 'https://rubygems.org'
ruby '2.6.3'
gem 'awesome_print', '~> 1.8'
gem 'guard-rspec', require: false
gem 'pry-byebug', '~> 3.7'
gem 'rspec-shell-expectations'
gem 'rake'
gem 'rspec'

75
tests/Gemfile.lock Normal file
View File

@@ -0,0 +1,75 @@
GEM
remote: https://rubygems.org/
specs:
awesome_print (1.8.0)
byebug (11.0.1)
coderay (1.1.2)
diff-lcs (1.3)
ffi (1.11.3)
formatador (0.2.5)
guard (2.16.1)
formatador (>= 0.2.4)
listen (>= 2.7, < 4.0)
lumberjack (>= 1.0.12, < 2.0)
nenv (~> 0.1)
notiffany (~> 0.0)
pry (>= 0.9.12)
shellany (~> 0.0)
thor (>= 0.18.1)
guard-compat (1.2.1)
guard-rspec (4.7.3)
guard (~> 2.1)
guard-compat (~> 1.1)
rspec (>= 2.99.0, < 4.0)
listen (3.2.1)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
lumberjack (1.0.13)
method_source (0.9.2)
nenv (0.3.0)
notiffany (0.1.3)
nenv (~> 0.1)
shellany (~> 0.0)
pry (0.12.2)
coderay (~> 1.1.0)
method_source (~> 0.9.0)
pry-byebug (3.7.0)
byebug (~> 11.0)
pry (~> 0.10)
rake (13.0.1)
rb-fsevent (0.10.3)
rb-inotify (0.10.0)
ffi (~> 1.0)
rspec (3.9.0)
rspec-core (~> 3.9.0)
rspec-expectations (~> 3.9.0)
rspec-mocks (~> 3.9.0)
rspec-core (3.9.0)
rspec-support (~> 3.9.0)
rspec-expectations (3.9.0)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.9.0)
rspec-mocks (3.9.0)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.9.0)
rspec-shell-expectations (1.3.0)
rspec-support (3.9.0)
shellany (0.0.1)
thor (1.0.1)
PLATFORMS
ruby
DEPENDENCIES
awesome_print (~> 1.8)
guard-rspec
pry-byebug (~> 3.7)
rake
rspec
rspec-shell-expectations
RUBY VERSION
ruby 2.6.3p62
BUNDLED WITH
2.1.1

14
tests/README.md Normal file
View File

@@ -0,0 +1,14 @@
# docker-swarm-autoscaler unit tests
### Dependencies (packaged into docker with ./run-tests.sh)
#### [uses rspec-shell-expectations](https://github.com/matthijsgroen/rspec-shell-expectations)
------
1. Ruby 2.6.3
2. jq needs installed
### Gotchas
------
1. Do not use ${0} in scripts. Instead use ${BASH_SOURCE[0]}
2. Many times it is necessary to see the stdout and stderr in your test to see what you forgot to mock. For example, if your test is failing
you can do `expect(stdout).to eq ''` and rspec will fail and give you the stdout message. Same with stderr. This will help you mock the things
you might have overlooked.

15
tests/Rakefile Normal file
View File

@@ -0,0 +1,15 @@
require 'rake'
require 'rspec/core/rake_task'
task :default => :test
desc 'Run specs'
RSpec::Core::RakeTask.new(:spec) do |t|
t.pattern = Dir.glob('spec/**/*_spec.rb')
t.rspec_opts = '--format documentation'
t.rspec_opts << ' --color'
end
task :test => [
:spec,
]

35
tests/run-tests.sh Normal file
View File

@@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -ex
function main {
build-image
run-ruby-tests
}
function cd-to-top-of-repo {
cd "$(git rev-parse --show-toplevel)"
}
function build-image {
cd-to-top-of-repo
docker build -t docker-swarm-autoscaler-tests ./tests
}
function run-ruby-tests {
cd-to-top-of-repo
echo 'INFO: Running rspec unit tests...'
local -r container_id=$(
docker create --rm \
-v /var/run/docker.sock:/var/run/docker.sock \
docker-swarm-autoscaler-tests \
bash -c "cd /root/tests && \
bundle exec rake spec"
)
docker cp . "${container_id}:/root/"
trap "docker rm ${container_id}" SIGHUP
docker start --attach --interactive "${container_id}"
}
[[ "${0}" == "${BASH_SOURCE[0]}" ]] && main "${@}"

View File

@@ -0,0 +1,71 @@
require 'spec_helper'
current_dir=Dir.pwd
# tests dir is current
autoscale="#{current_dir}/../docker-swarm-autoscaler/auto-scale.sh"
describe 'auto-scale.sh' do
create_standard_mocks
context 'scaling docker swarm services' do
it 'scales a service with lower than the minimum replicas' do
set_standard_mock_outputs
stdout, stderr, status = stubbed_env.execute("/bin/bash #{autoscale}", {'LOOP' => 'false'})
expect(stdout).to include("Service hello_helloworld_too_low_cpu has an autoscale label.")
expect(stdout).to include("Service hello_helloworld_too_low_cpu is below the minimum. Scaling to the minimum of 3")
expect(status.exitstatus).to eq 0
end
it 'scales a service with low cpu down by 1 replica' do
set_standard_mock_outputs
stdout, stderr, status = stubbed_env.execute("/bin/bash #{autoscale}", {'LOOP' => 'false'})
expect(stdout).to include("Service hello_helloworld_low_cpu has an autoscale label.")
expect(stdout).to include("Scaling down the service hello_helloworld_low_cpu to 3")
expect(status.exitstatus).to eq 0
end
it 'does not scale a service with low cpu when the minimum replicas is reached' do
set_standard_mock_outputs
stdout, stderr, status = stubbed_env.execute("/bin/bash #{autoscale}", {'LOOP' => 'false'})
expect(stdout).to include("Service hello_helloworld_min_replicas_low_cpu has an autoscale label.")
expect(stdout).to include("Service hello_helloworld_min_replicas_low_cpu has the minumum number of replicas.")
expect(status.exitstatus).to eq 0
end
it 'scales a service with high cpu up by 1 replica' do
set_standard_mock_outputs
stdout, stderr, status = stubbed_env.execute("/bin/bash #{autoscale}", {'LOOP' => 'false'})
expect(stdout).to include("Service hello_helloworld_high_cpu has an autoscale label.")
expect(stdout).to include("Scaling up the service hello_helloworld_high_cpu to 4")
expect(status.exitstatus).to eq 0
end
it 'does not scale a service with high cpu when the max replicas is reached' do
set_standard_mock_outputs
stdout, stderr, status = stubbed_env.execute("/bin/bash #{autoscale}", {'LOOP' => 'false'})
expect(stdout).to include("Service hello_helloworld_high_cpu_full_replicas has an autoscale label.")
expect(stdout).to include("Service hello_helloworld_high_cpu_full_replicas already has the maximum of 4 replicas")
expect(status.exitstatus).to eq 0
end
it 'scales a service with more than the maximum number of replicas' do
set_standard_mock_outputs
stdout, stderr, status = stubbed_env.execute("/bin/bash #{autoscale}", {'LOOP' => 'false'})
expect(stdout).to include("Service hello_helloworld_high_cpu_too_many_replicas has an autoscale label.")
expect(stdout).to include("Service hello_helloworld_high_cpu_too_many_replicas is above the maximum. Scaling to the maximum of 4")
expect(status.exitstatus).to eq 0
end
it 'does not scale a service without an autoscale label' do
set_standard_mock_outputs
stdout, stderr, status = stubbed_env.execute("/bin/bash #{autoscale}", {'LOOP' => 'false'})
expect(stdout).to include("Service autoscale_docker-swarm-autoscaler does not have an autoscale label.")
expect(status.exitstatus).to eq 0
end
end
end

413
tests/spec/spec_helper.rb Normal file
View File

@@ -0,0 +1,413 @@
require 'ap'
require 'pry'
require 'rspec/shell/expectations'
RSpec.configure do |c|
c.include Rspec::Shell::Expectations
end
def create_standard_mocks
let(:stubbed_env) { create_stubbed_env }
let(:curl_mock) { stubbed_env.stub_command('curl') }
let(:docker_mock) { stubbed_env.stub_command('docker') }
end
def set_standard_mock_outputs
# If you have something non standard need to be output, define your output before running this function. Outputs are stacked to stdout with new lines \n. Thus defining your non standard output first will output will be on top.
# If your non standard mock output is an exit code, define it after this function. Exit codes can be overwritten and whichever is deined last is what the test will use.
standard_prometheus_output='{
"status": "success",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"container_label_com_docker_swarm_service_name": "autoscale_docker-swarm-autoscaler",
"instance": "10.0.0.6:8080"
},
"value": [
1576602885.053,
"0.41103154419335"
]
},
{
"metric": {
"container_label_com_docker_swarm_service_name": "hello_helloworld_low_cpu",
"instance": "10.0.0.6:8080"
},
"value": [
1576602885.053,
"0.011596642816404852"
]
},
{
"metric": {
"container_label_com_docker_swarm_service_name": "hello_helloworld_too_low_cpu",
"instance": "10.0.0.6:8080"
},
"value": [
1576602885.053,
"0.011596642816404852"
]
},
{
"metric": {
"container_label_com_docker_swarm_service_name": "hello_helloworld_high_cpu",
"instance": "10.0.0.6:8080"
},
"value": [
1576602885.053,
"86.4"
]
},
{
"metric": {
"container_label_com_docker_swarm_service_name": "hello_helloworld_high_cpu_full_replicas",
"instance": "10.0.0.6:8080"
},
"value": [
1576602885.053,
"86.4"
]
},
{
"metric": {
"container_label_com_docker_swarm_service_name": "hello_helloworld_min_replicas_low_cpu",
"instance": "10.0.0.6:8080"
},
"value": [
1576602885.053,
"0.01"
]
},
{
"metric": {
"container_label_com_docker_swarm_service_name": "hello_helloworld_high_cpu_too_many_replicas",
"instance": "10.0.0.6:8080"
},
"value": [
1576602885.053,
"86.4"
]
}
]
}
}
'
helloworld_high_cpu_too_many_replicas_docker_inspect_output='[
{
"Spec": {
"Name": "hello_helloworld_high_cpu_too_many_replicas",
"Labels": {
"com.docker.stack.image": "tutum/hello-world",
"com.docker.stack.namespace": "hello",
"swarm.autoscaler": "true",
"swarm.autoscaler.maximum": "4",
"swarm.autoscaler.minimum": "3"
},
"Mode": {
"Replicated": {
"Replicas": 5
}
},
"UpdateConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"RollbackConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"EndpointSpec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 80,
"PublishedPort": 8080,
"PublishMode": "ingress"
}
]
}
}
}
]'
helloworld_high_cpu_docker_inspect_output='[
{
"Spec": {
"Name": "hello_helloworld_high_cpu",
"Labels": {
"com.docker.stack.image": "tutum/hello-world",
"com.docker.stack.namespace": "hello",
"swarm.autoscaler": "true",
"swarm.autoscaler.maximum": "4",
"swarm.autoscaler.minimum": "3"
},
"Mode": {
"Replicated": {
"Replicas": 3
}
},
"UpdateConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"RollbackConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"EndpointSpec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 80,
"PublishedPort": 8080,
"PublishMode": "ingress"
}
]
}
}
}
]'
helloworld_high_cpu_full_replicas_docker_inspect_output='[
{
"Spec": {
"Name": "hello_helloworld_high_cpu_full_replicas",
"Labels": {
"com.docker.stack.image": "tutum/hello-world",
"com.docker.stack.namespace": "hello",
"swarm.autoscaler": "true",
"swarm.autoscaler.maximum": "4",
"swarm.autoscaler.minimum": "3"
},
"Mode": {
"Replicated": {
"Replicas": 4
}
},
"UpdateConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"RollbackConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"EndpointSpec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 80,
"PublishedPort": 8080,
"PublishMode": "ingress"
}
]
}
}
}
]'
helloworld_low_cpu_docker_inspect_output='[
{
"Spec": {
"Name": "hello_helloworld_low_cpu",
"Labels": {
"com.docker.stack.image": "tutum/hello-world",
"com.docker.stack.namespace": "hello",
"swarm.autoscaler": "true",
"swarm.autoscaler.maximum": "4",
"swarm.autoscaler.minimum": "3"
},
"Mode": {
"Replicated": {
"Replicas": 4
}
},
"UpdateConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"RollbackConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"EndpointSpec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 80,
"PublishedPort": 8080,
"PublishMode": "ingress"
}
]
}
}
}
]'
helloworld_too_low_cpu_docker_inspect_output='[
{
"Spec": {
"Name": "hello_helloworld_too_low_cpu",
"Labels": {
"com.docker.stack.image": "tutum/hello-world",
"com.docker.stack.namespace": "hello",
"swarm.autoscaler": "true",
"swarm.autoscaler.maximum": "4",
"swarm.autoscaler.minimum": "3"
},
"Mode": {
"Replicated": {
"Replicas": 1
}
},
"UpdateConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"RollbackConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"EndpointSpec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 80,
"PublishedPort": 8080,
"PublishMode": "ingress"
}
]
}
}
}
]'
docker_swarm_autoscaler_docker_inspect_output='[
{
"Spec": {
"Name": "autoscale_docker-swarm-autoscaler",
"Labels": {
"com.docker.stack.image": "tutum/hello-world",
"com.docker.stack.namespace": "autoscale"
},
"Mode": {
"Replicated": {
"Replicas": 1
}
},
"UpdateConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"RollbackConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"EndpointSpec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 80,
"PublishedPort": 8080,
"PublishMode": "ingress"
}
]
}
}
}
]'
hello_helloworld_min_replicas_low_cpu_docker_inspect_output='[
{
"Spec": {
"Name": "hello_helloworld_min_replicas_low_cpu",
"Labels": {
"com.docker.stack.image": "tutum/hello-world",
"com.docker.stack.namespace": "hello",
"swarm.autoscaler": "true",
"swarm.autoscaler.maximum": "4",
"swarm.autoscaler.minimum": "3"
},
"Mode": {
"Replicated": {
"Replicas": 3
}
},
"UpdateConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"RollbackConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"EndpointSpec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 80,
"PublishedPort": 8080,
"PublishMode": "ingress"
}
]
}
}
}
]'
curl_mock.with_args('--silent').outputs(standard_prometheus_output, to: :stdout)
docker_mock.with_args('service','inspect','hello_helloworld_high_cpu').outputs(helloworld_high_cpu_docker_inspect_output, to: :stdout)
docker_mock.with_args('service','inspect','hello_helloworld_low_cpu').outputs(helloworld_low_cpu_docker_inspect_output, to: :stdout)
docker_mock.with_args('service','inspect','hello_helloworld_too_low_cpu').outputs(helloworld_too_low_cpu_docker_inspect_output, to: :stdout)
docker_mock.with_args('service','inspect','autoscale_docker-swarm-autoscaler').outputs(docker_swarm_autoscaler_docker_inspect_output, to: :stdout)
docker_mock.with_args('service','inspect','hello_helloworld_high_cpu_full_replicas').outputs(helloworld_high_cpu_full_replicas_docker_inspect_output, to: :stdout)
docker_mock.with_args('service','inspect','hello_helloworld_high_cpu_too_many_replicas').outputs(helloworld_high_cpu_too_many_replicas_docker_inspect_output, to: :stdout)
docker_mock.with_args('service','inspect','hello_helloworld_min_replicas_low_cpu').outputs(hello_helloworld_min_replicas_low_cpu_docker_inspect_output, to: :stdout)
docker_mock.with_args('service', 'scale').returns_exitstatus(0)
end