Just One More Thing
After getting my VPS going a couple of days ago Read about that here I realized I should have done a couple of more things in my Terraform scripts. I need to take snapshots daily of the root EBS volume, and I need to setup a weekly patching schedule to keep things up to date.
I also realized that anytime I recreated the instance I'd lose the previous public IP address, my DNS records would no longer be valid and my services would become unreachable. One reason to do this via infrastructure as code is so I don't need to do anything manually in the AWS console to make it work. So with that in mind here's what I did.
- Modified my main.tf to point user_data to an external file that I can manage separately.
- bootstrap.sh contains all the commands to install Tailscale, Ninx, Certbot, and ssm-agent
main.tf
terraform {
required_version = ">= 1.11.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 5.89.0"
}
}
}
provider "aws" {
region = var.aws_region
}
# Get the latest Amazon Linux 2 AMI
data "aws_ami" "amazon_linux" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["al2023-ami-2023.*-x86_64"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
}
# Create the EC2 instance
resource "aws_instance" "vps" {
ami = data.aws_ami.amazon_linux.id
instance_type = var.instance_type
subnet_id = aws_subnet.main.id
vpc_security_group_ids =
[aws_security_group.vpc-ssh.id, aws_security_group.vpc-web.id]
key_name = var.instance_keypair
associate_public_ip_address = true
# Explicitly define the root block device and tag it for backup
root_block_device {
volume_size = 8
volume_type = "gp3"
delete_on_termination = true
tags = {
Backup = "true"
}
}
tags = {
Name = "aws-vps-for-tailscale"
}
user_data = file("bootstrap.sh")
}
bootstrap.sh
#!/bin/bash
# Update system packages
sudo yum update -y
# Install SSM agent
sudo yum install -y amazon-ssm-agent
sudo systemctl enable amazon-ssm-agent
sudo systemctl start amazon-ssm-agent
# Update SSH configuration as above
sudo cp /etc/ssh/sshd_config /etc/ssh/sshd_config.bak
sudo sed -i '/^Port /d' /etc/ssh/sshd_config
echo "Port 2792" | sudo tee -a /etc/ssh/sshd_config
sudo systemctl restart sshd
# Install Tailscale
curl -fsSL https://tailscale.com/install.sh | sh
# Optional: Start Tailscale (replace YOUR_AUTH_KEY with your actual auth key)
tailscale up --authkey=tskey-auth-1234567890
# Install Nginx
sudo yum install -y nginx
sudo systemctl enable nginx
# Create the vps.conf file from embedded content
sudo cat <<'EOF_CONF' > /etc/nginx/conf.d/vps.conf
server {
server_name mastodon.coreypunches.net;
location / {
proxy_pass https://<main-tailnet-IP>:8562;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
}
listen 80;
}
server {
server_name pixelfed.coreypunches.net;
location / {
proxy_pass http://<main-tailnet-IP>:9750;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
}
listen 80;
}
server {
server_name writefreely.coreypunches.net;
location / {
proxy_pass http://<main-tailnet-IP>:9800;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
}
listen 80;
}
EOF_CONF
sudo systemctl start nginx
# install certbot and get a certificates
sudo yum install -y certbot python3-certbot-nginx
sudo systemctl start certbot-renew.timer
sudo systemctl restart nginx
sudo certbot --nginx --non-interactive --agree-tos --email cpunches@nomail.com -d mastodon.coreypunches.net -d pixelfed.coreypunches.net -d writefreely.coreypunches.net
sudo systemctl reload nginx
- Created dns.tf to manage the needed DNS updates automatically.
dns.tf
# Lookup your Route 53 hosted zone
data "aws_route53_zone" "coreypunchesss" {
name = "coreypunchesss.net"
private_zone = false
}
# Mastodon record
resource "aws_route53_record" "mastodon" {
zone_id = data.aws_route53_zone.coreypunchesss.zone_id
name = "mastodon.coreypunches.net"
type = "A"
ttl = 300
records = [aws_instance.vps.public_ip]
}
# Pixelfed record
resource "aws_route53_record" "pixelfed" {
zone_id = data.aws_route53_zone.coreypunchesss.zone_id
name = "pixelfed.coreypunches.net"
type = "A"
ttl = 300
records = [aws_instance.vps.public_ip]
}
# Writefreely record
resource "aws_route53_record" "writefreely" {
zone_id = data.aws_route53_zone.coreypunchesss.zone_id
name = "writefreely.coreypunches.net"
type = "A"
ttl = 300
records = [aws_instance.vps.public_ip]
}
- Created snap.tf to setup the daily snapshots.
snap.tf
resource "aws_dlm_lifecycle_policy" "weekly_snapshot" {
description = "Daily snapshots of EBS volumes"
state = "ENABLED"
execution_role_arn = "arn:aws:iam::15551212:role/service-role/AWSDataLifecycleManagerDefaultRole"
policy_details {
resource_types = ["VOLUME"]
target_tags = {
Backup = "true" # Volumes with this tag will be managed by DLM
}
schedule {
name = "DailySnapshot"
copy_tags = true
create_rule {
interval = 24
interval_unit = "HOURS"
}
retain_rule {
count = 2 # Retain the last 2 snapshots
}
}
}
}
- Created backpatch.tf to setup the weekly patch schedule.
backpatch.tf
resource "aws_iam_role" "ssm_role" {
name = "ssm_maintenance_role"
assume_role_policy = jsonencode({
Version = "2012-10-17",
Statement = [
{
Effect = "Allow",
Principal = { Service = "ssm.amazonaws.com" },
Action = "sts:AssumeRole"
}
]
})
}
resource "aws_iam_role_policy_attachment" "ssm_policy_attachment" {
role = aws_iam_role.ssm_role.name
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMFullAccess"
}
# Create Patch Schedule
# Create a weekly maintenance window
resource "aws_ssm_maintenance_window" "weekly_patch" {
name = "WeeklyPatchMaintenanceWindow"
schedule = "cron(0 3 ? * SAT *)" # Every Saturday at 03:00 UTC
duration = 2 # Duration in hours
cutoff = 1 # Must complete at least 1 hour before the window ends
allow_unassociated_targets = false
}
# Optionally, define a target for the maintenance window (targeting your instance)
resource "aws_ssm_maintenance_window_target" "instance_target" {
window_id = aws_ssm_maintenance_window.weekly_patch.id
name = "InstanceTarget"
resource_type = "INSTANCE"
targets {
key = "InstanceIds"
values = [aws_instance.vps.id]
}
}
# Create the maintenance window task to run patching
resource "aws_ssm_maintenance_window_task" "patch_task" {
window_id = aws_ssm_maintenance_window.weekly_patch.id
task_arn = "AWS-RunPatchBaseline"
service_role_arn = aws_iam_role.ssm_role.arn
task_type = "RUN_COMMAND"
# Use the maintenance window target ID to identify which instance(s) to patch
targets {
key = "WindowTargetIds"
values = [aws_ssm_maintenance_window_target.instance_target.id]
}
task_invocation_parameters {
run_command_parameters {
# "Operation=Install" tells Patch Manager to apply missing patches
parameter {
name = "Operation"
values = ["Install"]
}
}
}
max_concurrency = "1"
max_errors = "1"
priority = 1
}
- Added a couple more outputs to outputs.tf
outputs.tf
# EC2 Instance ID
output "instance_id" {
description = "The instance ID of the EC2 instance"
value = aws_instance.vps.id
}
# EC2 Instance Public IP
output "instance_public_ip" {
description = "The public IP address of the EC2 instance"
value = aws_instance.vps.public_ip
}
# EC2 Instance Public DNS
output "instance_public_dns" {
description = "EC2 Instance Public DNS"
value = aws_instance.vps.public_dns
}
# Patch Maintenance Window ID
output "patch_maintenance_window_id" {
description = "ID of the Patch Maintenance Window"
value = aws_ssm_maintenance_window.weekly_patch.id
}
# Root volume ID
output "vps_root_volume_id" {
description = "ID of the root volume of the EC2 instance"
value = aws_instance.vps.root_block_device[0].volume_id
}
When terraform applying this now it recreates the instance if necessary, grabs the new IP, updates the A records in Route 53 for the subdomains, creates a weekly patch schedule and setups daily snapshots of the root volume.
This is AF5QH. 73 and out.