Autoscaler Deployment
Complete deployment guide for the Productify Autoscaler components.
Components
The Autoscaler consists of two components:
- Nomadscaler Plugin - Go-based Nomad Autoscaler plugin
- Optimizer Service - Python-based MILP optimization service
Optimizer Deployment
Docker
bash
docker run -d \
--name optimizer \
-p 8000:8000 \
-e CACHE_SIZE=10 \
-e FORECAST_HORIZON=60 \
ghcr.io/productifyfw/optimizer:latestDocker Compose
yaml
version: "3.8"
services:
optimizer:
image: ghcr.io/productifyfw/optimizer:latest
ports:
- "8000:8000"
- "9090:9090" # Prometheus metrics
environment:
CACHE_SIZE: "10"
FORECAST_HORIZON: "60"
LOG_LEVEL: "INFO"
volumes:
- ./config.ini:/app/config.ini:ro
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3Nomad
hcl
job "optimizer" {
datacenters = ["dc1"]
type = "service"
group "optimizer" {
count = 2 # For high availability
network {
port "http" {
to = 8000
}
port "metrics" {
to = 9090
}
}
service {
name = "optimizer"
port = "http"
tags = ["productify", "autoscaler"]
check {
type = "http"
path = "/health"
interval = "10s"
timeout = "2s"
}
}
task "server" {
driver = "docker"
config {
image = "ghcr.io/productifyfw/optimizer:latest"
ports = ["http", "metrics"]
}
env {
CACHE_SIZE = "10"
FORECAST_HORIZON = "60"
LOG_LEVEL = "INFO"
}
resources {
cpu = 1000
memory = 1024
}
}
}
}Future Support
Kubernetes deployment manifests will be added in a future release.
Nomadscaler Plugin Deployment
Installation
bash
# Build plugin
cd nomadscaler
./build.sh
# Install to Nomad Autoscaler
sudo cp ./bin/nomadscaler /opt/nomad-autoscaler/plugins/
sudo chmod +x /opt/nomad-autoscaler/plugins/nomadscalerNomad Autoscaler Configuration
hcl
# autoscaler.hcl
plugin_dir = "/opt/nomad-autoscaler/plugins"
nomad {
address = "http://localhost:4646"
}
policy_dir = "/etc/nomad-autoscaler/policies"
http {
bind_address = "0.0.0.0"
bind_port = 8080
}
log {
level = "INFO"
}Scaling Policy
hcl
# /etc/nomad-autoscaler/policies/web-app.hcl
scaling "horizontal" {
enabled = true
min = 2
max = 20
policy {
evaluation_interval = "10s"
cooldown = "30s"
target "nomadscaler" {
namespace = "production"
job = "web-app"
group = "frontend"
optimizer_url = "http://optimizer:8000"
cache_size = 10
}
check "cpu" {
source = "nomad-apm"
query = "avg_cpu_percent"
strategy "target-value" {
target = 75
}
}
}
}Running Nomad Autoscaler
bash
nomad-autoscaler agent \
-config=/etc/nomad-autoscaler/autoscaler.hcl \
-plugin-dir=/opt/nomad-autoscaler/pluginsComplete Stack Deployment
Docker Compose
yaml
version: "3.8"
services:
optimizer:
image: ghcr.io/productifyfw/optimizer:latest
ports:
- "8000:8000"
environment:
CACHE_SIZE: "10"
FORECAST_HORIZON: "60"
restart: unless-stopped
nomad-autoscaler:
image: hashicorp/nomad-autoscaler:latest
volumes:
- ./autoscaler.hcl:/etc/autoscaler.hcl:ro
- ./policies:/policies:ro
- ./plugins:/plugins:ro
command: agent -config=/etc/autoscaler.hcl
environment:
NOMAD_ADDR: http://nomad:4646
depends_on:
- optimizer
restart: unless-stoppedNomad Job (Complete)
hcl
job "autoscaler-stack" {
datacenters = ["dc1"]
group "optimizer" {
count = 2
network {
port "http" { to = 8000 }
port "metrics" { to = 9090 }
}
service {
name = "optimizer"
port = "http"
check {
type = "http"
path = "/health"
interval = "10s"
}
}
task "server" {
driver = "docker"
config {
image = "ghcr.io/productifyfw/optimizer:latest"
ports = ["http", "metrics"]
}
env {
CACHE_SIZE = "10"
FORECAST_HORIZON = "60"
}
resources {
cpu = 200
memory = 256
}
}
}
group "autoscaler" {
count = 1
network {
port "http" { to = 8080 }
}
task "agent" {
driver = "docker"
config {
image = "hashicorp/nomad-autoscaler:latest"
volumes = [
"local/config.hcl:/etc/autoscaler.hcl",
"local/policies:/policies",
"local/plugins:/plugins"
]
args = ["agent", "-config=/etc/autoscaler.hcl"]
}
template {
data = file("autoscaler.hcl")
destination = "local/config.hcl"
}
resources {
cpu = 200
memory = 256
}
}
}
}Configuration
Optimizer Configuration
See Optimizer Configuration for details.
Key settings:
cache_size- Prediction cache size (default: 10)forecast_horizon- Forecast window (default: 60)sarimax_order- Time-series model parameters
Plugin Configuration
See Scaling Policies for details.
Key settings:
optimizer_url- Optimizer service URLcache_size- Local cache sizeevaluation_interval- How often to evaluatecooldown- Minimum time between scaling actions
Health Checks
Optimizer Health
bash
curl http://localhost:8000/healthNomad Autoscaler Status
bash
curl http://localhost:8080/v1/healthPlugin Status
bash
nomad-autoscaler policy listMonitoring
Optimizer Metrics
Prometheus metrics available at /metrics:
optimizer_predictions_total
optimizer_forecast_latency_seconds
optimizer_optimization_latency_seconds
optimizer_errors_totalNomad Autoscaler Metrics
nomad_autoscaler_policy_evaluation_total
nomad_autoscaler_scaling_actions_total
nomad_autoscaler_policy_evaluation_duration_secondsTroubleshooting
Optimizer Not Responding
Check:
- Service is running
- Port 8000 is accessible
- Health endpoint returns 200
- Logs for errors
Plugin Not Loading
Verify:
- Plugin binary in correct directory
- Binary has execute permissions
- Plugin version matches autoscaler version
- Logs show plugin registration
No Scaling Actions
Debug:
- Policy is enabled
- Metrics are being collected
- Optimizer is reachable
- Cooldown period hasn't been triggered
- Min/max bounds allow scaling