Skip to main content

Overview

Deploy UltraBalancer on major cloud platforms with platform-specific optimizations, auto-scaling, and managed services integration.

AWS

EC2, ECS, EKS, and Lambda

Google Cloud

Compute Engine, GKE, Cloud Run

Azure

VMs, AKS, Container Instances

DigitalOcean

Droplets and Kubernetes

AWS Deployment

EC2 Instance

user-data.sh
#!/bin/bash
# AWS EC2 User Data Script

# Update system
yum update -y

# Install UltraBalancer
curl -LO https://github.com/bas3line/ultrabalancer/releases/latest/download/ultrabalancer-linux-x86_64.tar.gz
tar -xzf ultrabalancer-linux-x86_64.tar.gz
mv ultrabalancer /usr/local/bin/
chmod +x /usr/local/bin/ultrabalancer

# Create configuration
mkdir -p /etc/ultrabalancer
cat > /etc/ultrabalancer/config.yaml << 'EOF'
listen_address: "0.0.0.0"
listen_port: 80
algorithm: "least-connections"

backends:
  - host: "10.0.1.10"  # Private IP
    port: 8080
  - host: "10.0.1.11"
    port: 8080

health_check:
  enabled: true
  interval_ms: 5000

logging:
  level: "info"
  format: "json"
  output: "/var/log/ultrabalancer/access.log"
EOF

# Create systemd service
cat > /etc/systemd/system/ultrabalancer.service << 'EOF'
[Unit]
Description=UltraBalancer
After=network.target

[Service]
Type=simple
ExecStart=/usr/local/bin/ultrabalancer -c /etc/ultrabalancer/config.yaml
Restart=always

[Install]
WantedBy=multi-user.target
EOF

# Start service
systemctl daemon-reload
systemctl enable ultrabalancer
systemctl start ultrabalancer

# Configure CloudWatch Logs
yum install -y amazon-cloudwatch-agent

ECS Fargate

ecs-task-definition.json
{
  "family": "ultrabalancer",
  "networkMode": "awsvpc",
  "requiresCompatibilities": ["FARGATE"],
  "cpu": "1024",
  "memory": "2048",
  "containerDefinitions": [
    {
      "name": "ultrabalancer",
      "image": "ultrabalancer/ultrabalancer:latest",
      "essential": true,
      "portMappings": [
        {
          "containerPort": 8080,
          "protocol": "tcp"
        }
      ],
      "environment": [
        {
          "name": "ULTRA_ALGORITHM",
          "value": "least-connections"
        },
        {
          "name": "ULTRA_BACKENDS",
          "value": "backend1.local:8080,backend2.local:8080"
        }
      ],
      "logConfiguration": {
        "logDriver": "awslogs",
        "options": {
          "awslogs-group": "/ecs/ultrabalancer",
          "awslogs-region": "us-east-1",
          "awslogs-stream-prefix": "ecs"
        }
      }
    }
  ]
}

EKS (Kubernetes)

eks-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: ultrabalancer
spec:
  replicas: 3
  selector:
    matchLabels:
      app: ultrabalancer
  template:
    metadata:
      labels:
        app: ultrabalancer
    spec:
      serviceAccountName: ultrabalancer
      containers:
      - name: ultrabalancer
        image: ultrabalancer/ultrabalancer:latest
        ports:
        - containerPort: 8080
        resources:
          requests:
            cpu: 200m
            memory: 256Mi
          limits:
            cpu: 1000m
            memory: 1Gi
---
apiVersion: v1
kind: Service
metadata:
  name: ultrabalancer
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-type: nlb
spec:
  type: LoadBalancer
  selector:
    app: ultrabalancer
  ports:
  - port: 80
    targetPort: 8080

Google Cloud Platform

Compute Engine

# Create instance with startup script
gcloud compute instances create ultrabalancer \
  --machine-type=n1-standard-2 \
  --zone=us-central1-a \
  --image-family=ubuntu-2004-lts \
  --image-project=ubuntu-os-cloud \
  --boot-disk-size=20GB \
  --tags=http-server,https-server \
  --metadata-from-file=startup-script=startup.sh

# Create firewall rules
gcloud compute firewall-rules create allow-http \
  --allow=tcp:80 \
  --target-tags=http-server

gcloud compute firewall-rules create allow-https \
  --allow=tcp:443 \
  --target-tags=https-server

GKE (Kubernetes)

gke-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: ultrabalancer
spec:
  replicas: 3
  selector:
    matchLabels:
      app: ultrabalancer
  template:
    metadata:
      labels:
        app: ultrabalancer
    spec:
      containers:
      - name: ultrabalancer
        image: ultrabalancer/ultrabalancer:latest
        ports:
        - containerPort: 8080
        resources:
          requests:
            cpu: 200m
            memory: 256Mi
          limits:
            cpu: 1000m
            memory: 1Gi
---
apiVersion: v1
kind: Service
metadata:
  name: ultrabalancer
  annotations:
    cloud.google.com/load-balancer-type: "Internal"
spec:
  type: LoadBalancer
  selector:
    app: ultrabalancer
  ports:
  - port: 80
    targetPort: 8080

Microsoft Azure

Virtual Machine

# Create resource group
az group create --name ultrabalancer-rg --location eastus

# Create VM
az vm create \
  --resource-group ultrabalancer-rg \
  --name ultrabalancer-vm \
  --image UbuntuLTS \
  --size Standard_D2s_v3 \
  --admin-username azureuser \
  --generate-ssh-keys \
  --custom-data cloud-init.txt

# Open ports
az vm open-port --resource-group ultrabalancer-rg --name ultrabalancer-vm --port 80
az vm open-port --resource-group ultrabalancer-rg --name ultrabalancer-vm --port 443

# Get public IP
az vm show --resource-group ultrabalancer-rg --name ultrabalancer-vm -d --query publicIps -o tsv

AKS (Kubernetes)

aks-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: ultrabalancer
spec:
  replicas: 3
  selector:
    matchLabels:
      app: ultrabalancer
  template:
    metadata:
      labels:
        app: ultrabalancer
    spec:
      containers:
      - name: ultrabalancer
        image: ultrabalancer/ultrabalancer:latest
        ports:
        - containerPort: 8080
        resources:
          requests:
            cpu: 200m
            memory: 256Mi
          limits:
            cpu: 1000m
            memory: 1Gi
---
apiVersion: v1
kind: Service
metadata:
  name: ultrabalancer
  annotations:
    service.beta.kubernetes.io/azure-load-balancer-internal: "true"
spec:
  type: LoadBalancer
  selector:
    app: ultrabalancer
  ports:
  - port: 80
    targetPort: 8080

DigitalOcean

Droplet

# Create droplet with doctl
doctl compute droplet create ultrabalancer \
  --image ubuntu-20-04-x64 \
  --size s-2vcpu-4gb \
  --region nyc1 \
  --ssh-keys YOUR_SSH_KEY_ID \
  --user-data-file cloud-init.yaml

# Or via Terraform
do-terraform.tf
resource "digitalocean_droplet" "ultrabalancer" {
  name   = "ultrabalancer"
  size   = "s-2vcpu-4gb"
  image  = "ubuntu-20-04-x64"
  region = "nyc1"
  user_data = file("cloud-init.yaml")

  tags = ["production", "load-balancer"]
}

resource "digitalocean_firewall" "ultrabalancer" {
  name = "ultrabalancer-fw"

  droplet_ids = [digitalocean_droplet.ultrabalancer.id]

  inbound_rule {
    protocol         = "tcp"
    port_range       = "80"
    source_addresses = ["0.0.0.0/0", "::/0"]
  }

  inbound_rule {
    protocol         = "tcp"
    port_range       = "443"
    source_addresses = ["0.0.0.0/0", "::/0"]
  }

  outbound_rule {
    protocol              = "tcp"
    port_range            = "1-65535"
    destination_addresses = ["0.0.0.0/0", "::/0"]
  }
}

output "ip_address" {
  value = digitalocean_droplet.ultrabalancer.ipv4_address
}

DigitalOcean Kubernetes

do-k8s-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: ultrabalancer
spec:
  replicas: 3
  selector:
    matchLabels:
      app: ultrabalancer
  template:
    metadata:
      labels:
        app: ultrabalancer
    spec:
      containers:
      - name: ultrabalancer
        image: ultrabalancer/ultrabalancer:latest
        ports:
        - containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
  name: ultrabalancer
  annotations:
    service.beta.kubernetes.io/do-loadbalancer-protocol: "http"
    service.beta.kubernetes.io/do-loadbalancer-algorithm: "least_connections"
spec:
  type: LoadBalancer
  selector:
    app: ultrabalancer
  ports:
  - port: 80
    targetPort: 8080

Cost Optimization

  • Use t3/t3a instances with burstable CPU
  • Enable Spot Instances for non-production
  • Use Reserved Instances for long-term savings
  • Enable auto-scaling to match demand
  • Use S3 for log archival
  • Use e2 machine types for cost savings
  • Enable preemptible VMs for dev/test
  • Use committed use discounts
  • Enable auto-scaling
  • Use Cloud Storage for logs
  • Use B-series VMs for variable workloads
  • Enable spot VMs for non-critical workloads
  • Use reserved instances
  • Enable auto-scaling
  • Use Azure Blob Storage for logs