CoreyPunches.net blog

This is my little piece of the fediverse to bore others.

Just One More Thing

Just one more thing

After getting my VPS going a couple of days ago Read about that here I realized I should have done a couple of more things in my Terraform scripts. I need to take snapshots daily of the root EBS volume, and I need to setup a weekly patching schedule to keep things up to date.

I also realized that anytime I recreated the instance I'd lose the previous public IP address, my DNS records would no longer be valid and my services would become unreachable. One reason to do this via infrastructure as code is so I don't need to do anything manually in the AWS console to make it work. So with that in mind here's what I did.

  • Modified my main.tf to point user_data to an external file that I can manage separately.
    • bootstrap.sh contains all the commands to install Tailscale, Ninx, Certbot, and ssm-agent
main.tf

terraform {
  required_version = ">= 1.11.0"
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = ">= 5.89.0"
    }
  }
}

provider "aws" {
  region = var.aws_region
}

# Get the latest Amazon Linux 2 AMI 
data "aws_ami" "amazon_linux" {
  most_recent = true
  owners      = ["amazon"]

  filter {
    name   = "name"
    values = ["al2023-ami-2023.*-x86_64"]
  }
  filter {
    name = "root-device-type"
    values = ["ebs"]
  }
  filter {
    name = "virtualization-type"
    values = ["hvm"]
  }
  filter {
    name = "architecture"
    values = ["x86_64"]
  }
}
 
# Create the EC2 instance
resource "aws_instance" "vps" {
  ami                         = data.aws_ami.amazon_linux.id
  instance_type       = var.instance_type
  subnet_id              = aws_subnet.main.id
  vpc_security_group_ids  = 
                   [aws_security_group.vpc-ssh.id, aws_security_group.vpc-web.id]
  key_name             = var.instance_keypair
  associate_public_ip_address = true

  # Explicitly define the root block device and tag it for backup
  root_block_device {
    volume_size    = 8      
    volume_type   = "gp3" 
    delete_on_termination = true
    tags = {
                 Backup = "true"
               }
  }

  tags = {
    Name   = "aws-vps-for-tailscale"
  }
 
  user_data = file("bootstrap.sh")

}
bootstrap.sh
#!/bin/bash
# Update system packages
sudo yum update -y
# Install SSM agent
sudo yum install -y amazon-ssm-agent
sudo systemctl enable amazon-ssm-agent
sudo systemctl start amazon-ssm-agent
# Update SSH configuration as above
sudo cp /etc/ssh/sshd_config /etc/ssh/sshd_config.bak
sudo sed -i '/^Port /d' /etc/ssh/sshd_config
echo "Port 2792" | sudo tee -a /etc/ssh/sshd_config
sudo systemctl restart sshd
# Install Tailscale
curl -fsSL https://tailscale.com/install.sh | sh
# Optional: Start Tailscale (replace YOUR_AUTH_KEY with your actual auth key)
tailscale up --authkey=tskey-auth-1234567890
# Install Nginx
sudo yum install -y nginx
sudo systemctl enable nginx
# Create the vps.conf file from embedded content
sudo cat <<'EOF_CONF' > /etc/nginx/conf.d/vps.conf
server {
    server_name mastodon.coreypunches.net;
    location / {
        proxy_pass https://<main-tailnet-IP>:8562;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
        proxy_cache_bypass $http_upgrade;
    }
    listen 80;
}
server {
    server_name pixelfed.coreypunches.net;
    location / {
        proxy_pass http://<main-tailnet-IP>:9750;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
        proxy_cache_bypass $http_upgrade;
    }
    listen 80;  
}
server {
    server_name writefreely.coreypunches.net;
    location / {
        proxy_pass http://<main-tailnet-IP>:9800;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
        proxy_cache_bypass $http_upgrade;
    }
    listen 80;    
}
EOF_CONF

sudo systemctl start nginx
# install certbot and get a certificates
sudo yum install -y certbot python3-certbot-nginx
sudo systemctl start certbot-renew.timer
sudo systemctl restart nginx
sudo certbot --nginx --non-interactive --agree-tos --email cpunches@nomail.com -d mastodon.coreypunches.net -d pixelfed.coreypunches.net -d writefreely.coreypunches.net
sudo systemctl reload nginx

  • Created dns.tf to manage the needed DNS updates automatically.
dns.tf
   # Lookup your Route 53 hosted zone
data "aws_route53_zone" "coreypunchesss" {
  name         = "coreypunchesss.net"
  private_zone = false
}

# Mastodon record
resource "aws_route53_record" "mastodon" {
  zone_id = data.aws_route53_zone.coreypunchesss.zone_id
  name    = "mastodon.coreypunches.net"
  type    = "A"
  ttl     = 300
  records = [aws_instance.vps.public_ip]
}

# Pixelfed record
resource "aws_route53_record" "pixelfed" {
  zone_id = data.aws_route53_zone.coreypunchesss.zone_id
  name    = "pixelfed.coreypunches.net"
  type    = "A"
  ttl     = 300
  records = [aws_instance.vps.public_ip]
}

# Writefreely record
resource "aws_route53_record" "writefreely" {
  zone_id = data.aws_route53_zone.coreypunchesss.zone_id
  name    = "writefreely.coreypunches.net"
  type    = "A"
  ttl     = 300
  records = [aws_instance.vps.public_ip]
}
  • Created snap.tf to setup the daily snapshots.
snap.tf
  resource "aws_dlm_lifecycle_policy" "weekly_snapshot" {
  description        = "Daily snapshots of EBS volumes"
  state              = "ENABLED"
  execution_role_arn = "arn:aws:iam::15551212:role/service-role/AWSDataLifecycleManagerDefaultRole"

  policy_details {
    resource_types = ["VOLUME"]
    target_tags = {
      Backup = "true"   # Volumes with this tag will be managed by DLM
    }

    schedule {
      name      = "DailySnapshot"
      copy_tags = true
      create_rule {
        interval      = 24
        interval_unit = "HOURS"
      }
      retain_rule {
        count = 2    # Retain the last 2 snapshots
      }
    }
  }
}
  • Created backpatch.tf to setup the weekly patch schedule.
backpatch.tf
   resource "aws_iam_role" "ssm_role" {
  name = "ssm_maintenance_role"
  assume_role_policy = jsonencode({
    Version = "2012-10-17",
    Statement = [
      {
        Effect = "Allow",
        Principal = { Service = "ssm.amazonaws.com" },
        Action = "sts:AssumeRole"
      }
    ]
  })
}

resource "aws_iam_role_policy_attachment" "ssm_policy_attachment" {
  role       = aws_iam_role.ssm_role.name
  policy_arn = "arn:aws:iam::aws:policy/AmazonSSMFullAccess"
}

# Create Patch Schedule
# Create a weekly maintenance window
resource "aws_ssm_maintenance_window" "weekly_patch" {
  name               = "WeeklyPatchMaintenanceWindow"
  schedule         = "cron(0 3 ? * SAT *)" # Every Saturday at 03:00 UTC
  duration          = 2     # Duration in hours
  cutoff              = 1     # Must complete at least 1 hour before the window ends
  allow_unassociated_targets   = false
}

# Optionally, define a target for the maintenance window (targeting your instance)
resource "aws_ssm_maintenance_window_target" "instance_target" {
  window_id     = aws_ssm_maintenance_window.weekly_patch.id
  name          = "InstanceTarget"
  resource_type = "INSTANCE"
  targets {
    key    = "InstanceIds"
    values = [aws_instance.vps.id]
  }
}

# Create the maintenance window task to run patching
resource "aws_ssm_maintenance_window_task" "patch_task" {
  window_id        = aws_ssm_maintenance_window.weekly_patch.id
  task_arn         = "AWS-RunPatchBaseline"
  service_role_arn = aws_iam_role.ssm_role.arn
  task_type        = "RUN_COMMAND"
  
  # Use the maintenance window target ID to identify which instance(s) to patch
  targets {
    key    = "WindowTargetIds"
    values = [aws_ssm_maintenance_window_target.instance_target.id]
  }
  
  task_invocation_parameters {
    run_command_parameters {
      # "Operation=Install" tells Patch Manager to apply missing patches
      parameter {
        name   = "Operation"
        values = ["Install"]
      }
    }
  }

  max_concurrency = "1"
  max_errors     = "1"
  priority       = 1
}

  • Added a couple more outputs to outputs.tf
outputs.tf
   # EC2 Instance ID
output "instance_id" {
    description = "The instance ID of the EC2 instance"
    value       = aws_instance.vps.id
}

# EC2 Instance Public IP
output "instance_public_ip" {
    description = "The public IP address of the EC2 instance"
    value       = aws_instance.vps.public_ip
}

# EC2 Instance Public DNS
output "instance_public_dns" {
    description = "EC2 Instance Public DNS"
    value = aws_instance.vps.public_dns
}

# Patch Maintenance Window ID
output "patch_maintenance_window_id" {
    description = "ID of the Patch Maintenance Window"
    value       = aws_ssm_maintenance_window.weekly_patch.id
}

# Root volume ID
output "vps_root_volume_id" {
    description = "ID of the root volume of the EC2 instance"
    value = aws_instance.vps.root_block_device[0].volume_id
}

When terraform applying this now it recreates the instance if necessary, grabs the new IP, updates the A records in Route 53 for the subdomains, creates a weekly patch schedule and setups daily snapshots of the root volume.

the end

Read Part 1

#selfhosting

This is AF5QH. 73 and out.

Contact me

or my fight against the evil CGNAT overlords

The fight

I woke up some time ago and decided I would utilize my Synology NAS for more than just storing backups. I discovered that I could easily run various docker containers and expose various services to my local LAN, and I hoped to the public internet.

I had also become a fan of decentralized social networks where I wasn't the product and/or revenue stream for some rich jackass that would be selling all my data.

Rich Jerk

So, installed Mastodon for my X and BlueSky fix, Pixelfed as an alternative to Instagram, and WriteFreely for my blogging pleasure in docker containers on my NAS. All of these application support other hosts in the fediverse so you can join others.

So, now I had the containers running but how to get out of my LAN? I use Starlink as my internet provider as my wife and I are full-time RV'ers and good Internet options are very limited. My Starlink plan doesn't provide a static IP so that was an issue, also I needed to give the instances a name vs a local IP address since my internal LAN addresses would not be available from the public internet.

I listed what I thought the options might be at the time.

  • Use a DDNS provider to match whatever my current public IP is to some DNS name.
    • Generate a Let's encrypt cert for the DDNS name.
    • Setup a reverse proxy to route the traffic to the correct container.

The NAS I have allows you to so all of those things with builtin options. So I set all that up and nothing worked from outside my LAN, and barely worked correctly inside because of the hostnames I had to give the applications.

For some reason no traffic from the internet could reach my reverse proxy. Hmm.. So, I thought “Oh, wait! I'm doubled NAT'd because I'm behind two routers!”. This was true as I had the Starlink receiver running into their provided wireless router (which you have to use) and then I connected my existing network router to the Starlink router. So I thought the solution would be to put the Starlink router into bypass mode since I wasn't using it for routing anyway. I did that and discovered that although I was no longer double NAT'd no traffic was making to the reverse proxy.

Thinking about it

Ok, let's check port-forwarding on the router. All good there. Ok maybe a firewall setting on the router or the NAS? Nope..

That's when I made an interesting discovery. While checking what my public IP was for the 100th time via IP Chicken I noticed that for some reason my reported public IP did not match the IP I was seeing on the Starlink gateway device. WTF!?

So a quick google search led me to the answer – Starlink (and other ISP's it turns out) use CGNAT which really sucks, because what the customer sees as their public IP is in fact not their public IP address but an address assigned by the ISP. It breaks port-forwarding and just generally sucks for the customer.

So I had another idea, I'll use Tailscale which I could run as an application on my NAS which would give me a publicly accessible end-point and has a cool feature that allows you to set up what they term a funnel that's basically a secure tunneled reverse proxy that I could point to my applications running in containers.

I did get this to work for Mastodon and Pixel with some trial and error, but it still wasn't what I wanted for a couple of reasons.

  • Funnels can only listen on 443, 8443, and 10000 – what if I have more than 3 services?
  • I was locked into using my tailnet domain for the sites (mastodon.ts123456.ts.net, etc).
  • 443 was a problem there were services I couldn't turn off on the NAS on that port.

Ok, there has to be a way to make this work right?

Dammit man

I had expressed my frustration around this on my mastodon instance with the messed up name and I got a great comment from someone that suggested that I should just setup a cheap VPS somewhere and then setup a VPN between the VPS and my NAS to solve this problem. Turns out that was a great idea!

I've had a personal AWS account for many years that I use mainly for training purposes. I work in Infrastructure in the real world so it's good to have a lab environment to try things out when learning about them. So here's what I decided to do.

  • Spin up an EC2 instance in my AWS account.
  • I have a domain I own registered in Route 53 so DNS was going to be easy.
  • Install Tailscale on the EC2 instance and add it to my tailnet already on the NAS.
    • Tailscale gives you 3 machines on a tailnet for free which is wonderful.
  • Use Terraform to script all the needed infrastructure – because why not?
    • VPC
    • Security groups
    • EC2 instance

So, the Terraform with some names changed to protect the innocent:

main.tf

   terraform {
  required_version = ">= 1.11.0"
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = ">= 5.89.0"
    }
  }
}
 
provider "aws" {
  region = var.aws_region
}

# Get the latest Amazon Linux 2 AMI (free tier eligible)
data "aws_ami" "amazon_linux" {
  most_recent = true
  owners      = ["amazon"]

  filter {
    name   = "name"
    values = ["al2023-ami-2023.*-x86_64"]
  }
  filter {
    name = "root-device-type"
    values = ["ebs"]
  }
  filter {
    name = "virtualization-type"
    values = ["hvm"]
  }
  filter {
    name = "architecture"
    values = ["x86_64"]
  }
}

# Create the EC2 instance (free tier eligible)
resource "aws_instance" "vps" {
  ami                         = data.aws_ami.amazon_linux.id
  instance_type               = var.instance_type
  subnet_id                   = aws_subnet.main.id
  vpc_security_group_ids      = [aws_security_group.vpc-ssh.id, aws_security_group.vpc-web.id]
  key_name                    = var.instance_keypair
  associate_public_ip_address = true

  tags = {
    Name = "aws-vps-for-tailscale"
  }

  user_data = <<-EOF
              #!/bin/bash
              # Update system packages
              yum update -y
              # Install Tailscale
              curl -fsSL https://tailscale.com/install.sh | sh
              # Optional: Start Tailscale (replace YOUR_AUTH_KEY with your actual auth key)
              tailscale up --authkey=tskey-n0m0r3s3cr3ts
              EOF
}

variables.tf

variable "aws_region" {
  description = "AWS region to deploy the resources"
  type        = string
  default     = "us-east-1"
}

variable "aws_availability_zone" {
  description = "AWS availability zone"
  type        = string
  default     = "us-east-1a"
}

# AWS EC2 Instance Type
variable "instance_type" {
description = "EC2 Instance Type"
type = string
default = "t3.micro"
}


# AWS EC2 Instance Key Pair
variable "instance_keypair" {
description = "AWS EC2 Key Pair that need to be associated with EC2 Instance"
type = string
default = "some-key"
}
 

vpc.tf

# Create a VPC
resource "aws_vpc" "main" {
  cidr_block = "10.1.0.0/16"
  tags = {
    Name = "vps-vpc"
  }
}

# Create a public subnet
resource "aws_subnet" "main" {
  vpc_id            = aws_vpc.main.id
  cidr_block        = "10.1.1.0/24"
  availability_zone = var.aws_availability_zone
  tags = {
    Name = "vps-subnet"
  }
}

# Create an Internet Gateway
resource "aws_internet_gateway" "gw" {
  vpc_id = aws_vpc.main.id
  tags = {
    Name = "vps-igw"
  }
}

# Create a public route table
resource "aws_route_table" "public" {
  vpc_id = aws_vpc.main.id

  route {
    cidr_block = "0.0.0.0/0"
    gateway_id = aws_internet_gateway.gw.id
  }

  tags = {
    Name = "vps-public-rt"
  }
}

# Associate the public route table with the subnet
resource "aws_route_table_association" "public_subnet" {
  subnet_id      = aws_subnet.main.id
  route_table_id = aws_route_table.public.id
} 

sg.tf

# Create Security Group - SSH Traffic
resource "aws_security_group" "vpc-ssh" {
name = "vpc-ssh"
description = "VPS VPC SSH"
vpc_id      = aws_vpc.main.id
ingress {
description = "Allow Port 22" #  CHANGE THIS
from_port = 22  # really, I mean it CHANGE THIS
to_port = 22.      # bruh, pay attention
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}


egress {
description = "Allow all ip and ports outbound" 
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}


tags = {
Name = "vpc-ssh"
}
}
 

# Create Security Group - Web Traffic
resource "aws_security_group" "vpc-web" {
name = "vpc-web"
description = "VPS VPC Web"
vpc_id      = aws_vpc.main.id
ingress {
description = "Allow Port 80"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "Allow Port 443"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
} 
egress {
description = "Allow all ip and ports outbound" 
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}


tags = {
Name = "vpc-web"
}
}

outputs.tf

output "instance_public_ip" {
    description = "The public IP address of the EC2 instance"
    value       = aws_instance.vps.public_ip
}

# EC2 Instance Public DNS
output "instance_public_dns" {
    description = "EC2 Instance Public DNS"
    value = aws_instance.vps.public_dns
}
 

So then a little

  1. terraform init
  2. terraform validate
  3. terraform plan
  4. terraform apply

A few minutes later

Cool so now we have our VPC, Security Groups, and EC2 instance all setup and running.

With all that in order we joined our new EC2 instance to our tailnet so we have a secure tunnel between them ready to go.

Next I updated the Route 53 DNS entries for my domain to point * https://mastodon.coreypunches.net * https://pixelfed.coreypunches.net * https://writefreely.coreypunches.net

All to my new EC2 instance.

Next I needed to install Nginx on my EC2 instance to work as reverse proxy so incoming traffic could be routed across my tailnet and to my patiently waiting containers running on the NAS.

One Nginx config coming right up.

server {
    server_name mastodon.coreypunches.net;

    location / {
        proxy_pass https://some-server:8562;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
        proxy_cache_bypass $http_upgrade;
    }

    listen 443 ssl; # managed by Certbot
    ssl_certificate /etc/letsencrypt/live/mastodon.coreypunches.net/fullchain.pem; 
    ssl_certificate_key /etc/letsencrypt/live/mastodon.coreypunches.net/privkey.pem; 
    include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
    ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
    if ($host = mastodon.coreypunches.net) {
        return 301 https://$host$request_uri;
    } # managed by Certbot

    listen 80;
    server_name mastodon.coreypunches.net;
    return 404; # managed by Certbot
}
server {
    server_name pixelfed.coreypunches.net;

    location / {
        proxy_pass http://some-server:9750;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
        proxy_cache_bypass $http_upgrade;
    }

    listen 443 ssl; # managed by Certbot
    ssl_certificate /etc/letsencrypt/live/mastodon.coreypunches.net/fullchain.pem; 
    ssl_certificate_key /etc/letsencrypt/live/mastodon.coreypunches.net/privkey.pem; 
    include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
    ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}

server {
    if ($host = pixelfed.coreypunches.net) {
        return 301 https://$host$request_uri;
    } # managed by Certbot

    listen 80;
    server_name pixelfed.coreypunches.net;
    return 404; # managed by Certbot
}
server {
    server_name writefreely.coreypunches.net;

    location / {
        proxy_pass http://some-server:9800;
        proxy_http_version 1.1;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "upgrade";
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto $scheme;
        proxy_cache_bypass $http_upgrade;
    }

    listen 443 ssl; # managed by Certbot
    ssl_certificate /etc/letsencrypt/live/mastodon.coreypunches.net/fullchain.pem; 
    ssl_certificate_key /etc/letsencrypt/live/mastodon.coreypunches.net/privkey.pem; 
    include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
    ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}

server {
    if ($host = writefreely.coreypunches.net) {
        return 301 https://$host$request_uri;
    } # managed by Certbot

    server_name writefreely.coreypunches.net;
    
    listen 80;
    return 404; # managed by Certbot
}

I also installed Certbot and enabled it to auto-renew the certs every 90 days.

So there you have it

Now I am free to add more services as needed from my NAS. I'm also thinking about self-hosting an email server for my domain on my NAS, I think I'll need to get AWS to allow port 25 outbound if I recall correctly. I've done all the DNS setup before and getting all the MX, DKIM, etc records all correct. But that is a project for another day.

Read Part 2

#selfhosting

This is AF5QH. 73 and out.

Contact me

Welcome travelers! Prepare to be regaled with posts of mundane derring-do and other boring exploits!

This is AF5QH. 73 and out.

Contact me