Commit 95c8fee7 authored by Seth Floyd's avatar Seth Floyd

First run

parent 55540b02
# Local .terraform directories
**/.terraform/*
*.tfvars
# Ignore any .tfstate files
*.tfstate
*.tfstate.*
crash.log
# SSH Keys
*.pem
# Backup files
*.bak
*.backup
# Mac system folders
.DS_Store
/*
resource "aws_acm_certificate" "YOUR_DOMAIN_cert" {
domain_name = "*.YOUR_DOMAIN.com"
validation_method = "DNS"
}
resource "aws_acm_certificate_validation" "YOUR_DOMAIN_cert" {
certificate_arn = aws_acm_certificate.YOUR_DOMAIN_cert.arn
validation_record_fqdns = ["${aws_route53_record.YOUR_DOMAIN_cert_validation.fqdn}"]
}
resource "aws_route53_record" "YOUR_DOMAIN_cert_validation" {
name = aws_acm_certificate.YOUR_DOMAIN_cert.domain_validation_options.0.resource_record_name
type = aws_acm_certificate.YOUR_DOMAIN_cert.domain_validation_options.0.resource_record_type
zone_id = var.YOUR_DOMAIN_HOSTED_ZONE_ID
records = ["${aws_acm_certificate.YOUR_DOMAIN_cert.domain_validation_options.0.resource_record_value}"]
ttl = 60
}
*/
#!/usr/bin/env python
import boto3
#This will use the credentials in your ~/.aws/credentials file found in your AWS profile...
#change to match your profile name or to 'default' for default profile:
session = boto3.session.Session(profile_name='default')
BUCKET = 'NAME_OF_BUCKET_GOES_HERE' #Set this to the name of the bucket you want to COMPLETELY EMPTY...even versions, and then delete
s3 = session.resource('s3')
bucket = s3.Bucket(BUCKET)
bucket.object_versions.delete()
#If you dont want to delete the bucket and only want to remove everything IN it comment out the following line
bucket.delete()
# Pritunl VPN
# Pritunl VPN
Pritunl VPN built in Terraform
\ No newline at end of file
---
## Pritunl VPN built in Terraform in its own VPC.
Pritunl Homepage: [https://pritunl.com/](https://pritunl.com/)
"Pritunl is the best open source alternative to proprietary commercial vpn products such as Aviatrix and Pulse Secure. Create larger cloud vpn networks supporting thousands of concurrent users and get more control over your vpn server without any per-user pricing"
Free Pritunl Client can be found here. I prefer it over Viscosity [https://client.pritunl.com/](https://client.pritunl.com/)
---
# OVERVIEW
This is a basic install of a VPC in AWS and an instance that sits in an ASG that runs Pritunl VPN behind a load balancer.
Pritunl documentation can be found here: [https://docs.pritunl.com/docs](https://docs.pritunl.com/docs)
---
# VARIABLES
You can add the AWS keys to the project Environment variables by going to `YourProject > Settings> CI/CD > Variables`. From here you will want to add the following for this project:
* `TF_VAR_AWS_ACCESS_KEY_ID`
* `TF_VAR_AWS_SECRET_ACCESS_KEY`
* `TERRAFORM_VERSION`
* You will also want to check variables that are supplied in the `pritunl-vpn.tf` file.
You put `TF_VAR_` on the front of any variables that Terraform will need to pass into the stack. `TERRAFORM_VERSION` is only used in the Gitlab CI file so no `TF_VAR_` needed
---
# INSTALL AND BACKUP SCRIPT
* Located at `pritunl-vpn/pritunl-instance.tpl` Comments are in-line and echo out what is happening. You can follow along by looking at the live system log that you can access through the AWS Console > EC2 > right click instance > System Log
* MongoDB version 4.2 - Look here for the latest version to use with Pritunl: [https://repo.mongodb.org/yum/amazon/2/mongodb-org](https://repo.mongodb.org/yum/amazon/2/mongodb-org) You can set this as a variable in `pritunl-vpn.tf`
---
# FILES TO EDIT
* `backend-state.tf` - Pretty obvious if you look at it.
* `outputs.tf` - Replace all of the `VPC_PRITUNL` entries with the name of your VPC that you also put in `vpc-utility.tf` Should be an easy copy and replace.
* `variables.tf` - Main setting for REGION is found here.
* `vpc.tf` - Be sure to change the options with comments
* `gitlab-ci.yml` - Rename this file to `.gitlab-ci.yml`. I added a DESTROY option at the end of the pipeline. If you plan on using this in any sort of "production" environment where you dont want anyone to accidentally rip the VPC out from under your stuff...delete this section. Dont just comment it out, DELETE it.
---
# ACM CERT
* A cert is needed for the ALB. The easiest way to do this is through Terraform using ACM. You will need to either create a cert through Terraform or import a cert into AWS and add its arn. If you wish to create the cert yourself refer to this URL for more info. It is out of the scope of this project.
* A starting point for you can be found in ACM.tf. Refer to this page for further help:[https://www.terraform.io/docs/providers/aws/r/acm_certificate_validation.html](https://www.terraform.io/docs/providers/aws/r/acm_certificate_validation.html)
The var for the Cert ARN needs to be added in the pritunl-vpn.tf file.
---
# DEPLOYING
* Gitlab will run the Init and Plan pipelines on push to a branch. Once this passes and you merge to master you will be able to see the Init and Plan pipelines run again. Once the Plan job is done you will need to manually run the Apply job. Gitlab offers you a chance to supply some variables here before you run the job but you wont need to do this for this project.
* A couple of things to remember...since you are creating a new Route53 Zone Record its will take time before the new DNS entry for the Pritunl instance resolves to the ALB DNS entry. If you wanted you can go directly to the ALB DNS url to verify that the instance was setup...it generaly takes about 5-10 minutes depending on what configs you add on to what I have here in the install. Read the Docs at the link above for more about that.
* You will need to SSH into the instance and run the command to reset the admin password so you can get into the UI. This command is given if you go to the ALB DNS url.
---
# DESTROY
If you wish to destroy all of the resources found in this stack you will need to run the `BucketCleaner.py` script for each bucket (`pritunl-us-east-1` and `pritunl-us-west-2`) before you run the detroy pipeline.
terraform {
backend "s3" {
bucket = "YOUR_BUCKET"
key = "pritunl.tfstate"
region = "us-east-1"
}
}
# Official image for Hashicorp's Terraform. It uses light image which is Alpine
# based as it is much...lighter.
#
# Entrypoint is also needed as image by default sets `terraform` binary as an
# entrypoint.
image:
name: hashicorp/terraform:$TERRAFORM_VERSION
entrypoint:
- '/usr/bin/env'
- 'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
# Default output file for Terraform plan
variables:
PLAN: plan.tfplan
DESTROYPLAN: destroyplan.tfplan
cache:
paths:
- .terraform
before_script:
- export AWS_ACCESS_KEY_ID=$TF_VAR_AWS_ACCESS_KEY_ID
- export AWS_SECRET_ACCESS_KEY=$TF_VAR_AWS_SECRET_ACCESS_KEY
- terraform --version
- terraform init -backend=true -get=true -input=false
stages:
- validate
- plan
- apply
- destroyplan
- destroy
validate:
stage: validate
script:
- terraform validate
plan:
stage: plan
script:
- terraform plan -out=$PLAN
artifacts:
name: plan
paths:
- .archive_files
- $PLAN
# Separate apply job for manual launching Terraform as it can be destructive action.
apply:
stage: apply
environment:
name: Utility Stack
script:
- terraform apply "$PLAN"
dependencies:
- plan
when: manual
only:
- master
# Separate Destroy job for manual launching Terraform as it can be destructive action.
destroyplan:
stage: destroyplan
environment:
name: Utility Stack
script:
- terraform init -backend=true -get=true -input=false
- terraform plan -destroy -out=$DESTROYPLAN
artifacts:
name: destroyplan
paths:
- $DESTROYPLAN
when: manual
only:
- master
destroy:
stage: destroy
environment:
name: Utility Stack
script:
- terraform init -backend=true -get=true -input=false
- terraform apply "$DESTROYPLAN"
dependencies:
- destroyplan
when: manual
only:
- master
#################
# VPC
#################
output "vpc_id" {
description = "The ID of the VPC"
value = module.VPC_PRITUNL.vpc_id
}
output "utility_vpc_cidr_block" {
description = "The CIDR block of the VPC"
value = module.VPC_PRITUNL.vpc_cidr_block
}
output "private_subnets" {
description = "List of IDs of private subnets"
value = module.VPC_PRITUNL.private_subnets
}
output "public_subnets" {
description = "List of IDs of public subnets"
value = module.VPC_PRITUNL.public_subnets
}
output "database_subnets" {
description = "List of IDs of database subnets"
value = module.VPC_PRITUNL.database_subnets
}
output "elasticache_subnets" {
description = "List of IDs of elasticache subnets"
value = module.VPC_PRITUNL.elasticache_subnets
}
output "redshift_subnets" {
description = "List of IDs of redshift subnets"
value = module.VPC_PRITUNL.redshift_subnets
}
# NAT gateways
output "nat_public_ips" {
description = "List of public Elastic IPs created for AWS NAT Gateway"
value = module.VPC_PRITUNL.nat_public_ips
}
output "availability_zones" {
description = "List of availability_zones available for the VPC"
value = module.VPC_PRITUNL.azs
}
output "pritunl_eip_public_ip" {
value = module.pritunl-vpn.pritunl_eip_public_ip
}
module "pritunl-vpn" {
source = "./pritunl-vpn"
region = var.region
access_key = var.AWS_ACCESS_KEY_ID
secret_key = var.AWS_SECRET_ACCESS_KEY
vpc_id = module.VPC_PRITUNL.vpc_id
public_subnets = module.VPC_PRITUNL.public_subnets
availability_zones = module.VPC_PRITUNL.azs
pritunl_bucket_prefix = "pritunl"
mongodb_version = "4.2"
ACM_CERT = var.YOUR_ACM_CERT_ARN_GOES_HERE #Or you can import it, pull in through remote state, Or create it in this stack using the ACM.tf file
HOSTED_ZONE_ID = var.YOUR_DOMAIN_HOSTED_ZONE_ID
}
resource "aws_iam_instance_profile" "pritunl_instance_profile" {
name = "pritunl_instance_profile"
role = aws_iam_role.pritunl_role.name
}
resource "aws_iam_role" "pritunl_role" {
name = "pritunl_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": [
"ec2.amazonaws.com"
]
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
tags = {
Name = "pritunl_role"
}
}
resource "aws_iam_role_policy_attachment" "pritunl_role_attach" {
role = aws_iam_role.pritunl_role.name
policy_arn = aws_iam_policy.pritunl_iam_policy.arn
}
resource "aws_iam_policy" "pritunl_iam_policy" {
name = "pritunl_iam_policy"
policy = data.aws_iam_policy_document.pritunl_policy_doc.json
}
data "aws_iam_policy_document" "pritunl_policy_doc" {
statement {
sid = "BucketAccess"
effect = "Allow"
actions = [
"s3:*"
]
resources = [
"${aws_s3_bucket.pritunl_bucket.arn}",
"${aws_s3_bucket.pritunl_bucket.arn}/*"
]
}
}
data "aws_ami" "pritunl" {
most_recent = true
filter {
name = "name"
values = ["amzn2-ami-hvm-*-x86_64-gp2"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = [137112412989] # Amazon AMI owner
}
data "template_file" "scripts" {
template = "${file("pritunl-vpn/pritunl-instance.tpl")}"
vars = {
pritunlbucket = aws_s3_bucket.pritunl_bucket.id
access_key = var.access_key
secret_key = var.secret_key
region = var.region
eip_id = aws_eip.pritunl_eip.id
mongodb_version = var.mongodb_version
}
}
resource "aws_launch_configuration" "pritunl_launch_config" {
name_prefix = "pritunl-lc-"
image_id = data.aws_ami.pritunl.id
instance_type = "t2.medium"
key_name = aws_key_pair.pritunl_key.key_name
security_groups = [aws_security_group.pritunl_sg.id]
associate_public_ip_address = true
iam_instance_profile = aws_iam_instance_profile.pritunl_instance_profile.id
lifecycle {
create_before_destroy = true
}
root_block_device {
volume_type = "standard"
volume_size = 20
}
user_data = data.template_file.scripts.rendered
}
resource "aws_autoscaling_group" "pritunl_main_asg" {
# We want this to explicitly depend on the launch config above
depends_on = [aws_launch_configuration.pritunl_launch_config, var.vpc_id]
name = "pritunl-asg"
# The chosen availability zones *must* match the AZs the VPC subnets are tied to.
# availability_zones = [var.availability_zones[0], var.availability_zones[1], var.availability_zones[2]]
vpc_zone_identifier = [var.public_subnets[0]]
target_group_arns = [aws_lb_target_group.Pritunl_ALB_Forward_TG_443.arn]
# Uses the ID from the launch config created above
launch_configuration = aws_launch_configuration.pritunl_launch_config.name
max_size = "1"
min_size = "1"
desired_capacity = "1"
health_check_type = "EC2"
tag {
key = "Name"
value = "pritunl"
propagate_at_launch = true
}
}
resource "aws_key_pair" "pritunl_key" {
key_name = "pritunl_key"
public_key = "The contenst of your public key goes here."
}
#!/bin/bash
#Associate EIP - This maintains the same IP for firewalls, vpn clients, etc.
sudo AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_key} aws ec2 associate-address --instance-id $(curl http://169.254.169.254/latest/meta-data/instance-id) --allocation-id ${eip_id} --allow-reassociation --region ${region}
#Install and setup Pritunl and MongoDB
echo "Install and setup Pritunl and MongoDB..."
sudo tee /etc/yum.repos.d/mongodb-org-4.0.repo << EOF
[mongodb-org-4.0]
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/amazon/2/mongodb-org/${mongodb_version}/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-${mongodb_version}.asc
EOF
sudo tee /etc/yum.repos.d/pritunl.repo << EOF
[pritunl]
name=Pritunl Repository
baseurl=https://repo.pritunl.com/stable/yum/amazonlinux/2/
gpgcheck=1
enabled=1
EOF
sudo rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
gpg --keyserver hkp://keyserver.ubuntu.com --recv-keys 7568D9BB55FF9E5287D586017AE645C0CF8E292A
gpg --armor --export 7568D9BB55FF9E5287D586017AE645C0CF8E292A > key.tmp; sudo rpm --import key.tmp; rm -f key.tmp
sudo yum -y install pritunl mongodb-org
sudo systemctl start mongod pritunl
sudo systemctl enable mongod pritunl
#Stop Pritunl
sudo service pritunl stop
sleep 10
#Set path for MongoDB:
echo "Set path for MongoDB..."
sudo pritunl set-mongodb mongodb://localhost:27017/pritunl
#Create Backup Script:
echo "Create Backup Script..."
mkdir pritunl
cd /pritunl
touch pritunl-backup.sh
chmod 755 pritunl-backup.sh
echo "#!/bin/bash" >> pritunl-backup.sh
echo "#The following command will create a dump/ dir." >> pritunl-backup.sh
echo "sudo mongodump" >> pritunl-backup.sh
echo "aws s3 cp --recursive /pritunl/dump s3://${pritunlbucket}/pritunl/dump/" >> pritunl-backup.sh
echo "sudo aws s3 cp /var/lib/pritunl/pritunl.uuid s3://${pritunlbucket}/pritunl/pritunl.uuid" >> pritunl-backup.sh
echo "exit 0" >> pritunl-backup.sh
sleep 10
#Create Restore Script:
echo "Create Restore Script..."
touch pritunl-restore.sh
chmod 755 pritunl-restore.sh
echo "#!/bin/bash" >> pritunl-restore.sh
echo "sudo service pritunl stop" >> pritunl-restore.sh
echo "sudo aws s3 cp --recursive s3://${pritunlbucket}/pritunl/dump/ /pritunl/dump/." >> pritunl-restore.sh
echo "sudo aws s3 cp s3://${pritunlbucket}/pritunl/pritunl.uuid /var/lib/pritunl/pritunl.uuid" >> pritunl-restore.sh
echo "sudo mongorestore --nsInclude '*' /pritunl/dump/" >> pritunl-restore.sh
echo "sudo service pritunl start" >> pritunl-restore.sh
echo "Restoring..."
sudo /pritunl/pritunl-restore.sh
#Setup CRON for backups:
echo "Setup CRON for backups..."
sudo echo "0 * * * * /pritunl/pritunl-backup.sh #Runs hourly on the :00" >> pritunlcronjobs
sudo crontab pritunlcronjobs
sleep 10
#Start the Pritunl server:
sudo service pritunl status
exit 0
resource "aws_lb" "pritunl_alb" {
name = "pritunl-alb"
internal = false
load_balancer_type = "application"
subnets = [var.public_subnets[0], var.public_subnets[1], var.public_subnets[2]]
enable_deletion_protection = false #If you set to true you will have to maually turn off the delete protection
security_groups = [aws_security_group.pritunl_alb_sg.id]
tags = {
Name = "pritunl_alb"
}
}
resource "aws_lb_listener" "Pritunl_ALB_Forward_443" {
load_balancer_arn = aws_lb.pritunl_alb.arn
port = "443"
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2015-05"
certificate_arn = var.ACM_CERT
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.Pritunl_ALB_Forward_TG_443.arn
}
}
resource "aws_lb_listener" "Pritunl_ALB_REDIRECT_80" {
load_balancer_arn = aws_lb.pritunl_alb.arn
port = "80"
protocol = "HTTP"
default_action {
type = "redirect"
redirect {
port = "443"
protocol = "HTTPS"
status_code = "HTTP_302"
}
}
}
resource "aws_lb_target_group" "Pritunl_ALB_Forward_TG_443" {
name = "Pritunl-ALB-Forward-TG-443"
port = 443
protocol = "HTTPS"
vpc_id = var.vpc_id
health_check {
protocol = "HTTPS"
path = "/"
matcher = "302"
}
}
resource "aws_eip" "pritunl_eip" {
vpc = true
tags = {
Name = "pritunl_eip"
}
}
output "pritunl_alb_dns_name" {
description = "The DNS record name for ALB"
value = "${aws_lb.pritunl_alb.dns_name}"
}
output "pritunl_alb_zone_id" {
description = "The zone_id for the ALB"
value = "${aws_lb.pritunl_alb.zone_id}"
}
output "pritunl_eip_public_ip" {
description = "The public IP address of the EIP"
value = "${aws_eip.pritunl_eip.public_ip}"
}
output "pritunl_instance_sg_id" {
description = "The ID of the instance security group"
value = "${aws_security_group.pritunl_sg.id}"
}
resource "aws_route53_record" "pritunl_www" {
zone_id = var.HOSTED_ZONE_ID
name = "pritunl.setheryops.com"
type = "CNAME"
ttl = "300"
records = [aws_lb.pritunl_alb.dns_name]
}
#Interesting note here: This is specifically NOT set as an alias because when Route 53 receives a DNS query for an alias record, Route 53 responds with the applicable value for that resource. Route 53 responds with one or more IP addresses for the load balancer when the alias specifies an ELB or ALB
#Second provider needed to replicate bucket to another region
provider "aws" {
access_key = var.access_key
secret_key = var.secret_key
alias = "west"
region = "us-west-2"
}
resource "aws_iam_role" "pritunl_replication_role" {
name = "pritunl_replication"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
POLICY
}
resource "aws_iam_policy" "pritunl_replication_policy" {
name = "pritunl_replication_role"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.pritunl_bucket.arn}"
]
},
{
"Action": [
"s3:GetObjectVersion",
"s3:GetObjectVersionAcl"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.pritunl_bucket.arn}/*"
]
},
{
"Action": [
"s3:ReplicateObject",
"s3:ReplicateDelete"
],
"Effect": "Allow",
"Resource": "${aws_s3_bucket.pritunl_destination.arn}/*"
}
]
}
POLICY
}
resource "aws_iam_policy_attachment" "pritunl_replication_attachment" {
name = "pritunl_replication_attachment"
roles = [aws_iam_role.pritunl_replication_role.name]
policy_arn = aws_iam_policy.pritunl_replication_policy.arn
}
resource "aws_s3_bucket" "pritunl_bucket" {
bucket = "${var.pritunl_bucket_prefix}-us-east-1"
acl = "private"
region = "us-east-1"
versioning {
enabled = true
}
replication_configuration {
role = aws_iam_role.pritunl_replication_role.arn
rules {
id = "replication_rule"
prefix = "" #leave as an empty string to replicate the whole bucket
status = "Enabled"