Commit f57a7ec9 authored by aegiacometti's avatar aegiacometti
Browse files

first commit public repo

parents
.terraform/
*.tfstate*
.idea/
secret-tips.txt
full_local_dev/.terraform/
id_rsa_aws.pem
*.hcl
data/
services_status.txt
\ No newline at end of file
variables:
TF_ROOT: ${CI_PROJECT_DIR}
TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/${CI_PROJECT_NAME}
AWS_ACCESS_KEY_ID: ${K8S_SECRET_AWS_ACCESS_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${K8S_SECRET_AWS_SECRET_ACCESS_KEY}
cache:
key: ${CI_PROJECT_NAME}
paths:
- ${TF_ROOT}/.terraform
before_script:
- cd ${TF_ROOT}
stages:
- prepare
- validate
- plan
- request_approval
- deploy
- notify_deploy
- notify_if_failure
- verify_deploy_and_notify
- check_services
# Verify environments can be built
init_terraform:
image: registry.gitlab.com/gitlab-org/terraform-images/stable:latest
stage: prepare
script:
- ./gitlab-terraform.sh init
# Verify environments can be built
init_python:
image: python:3.7
stage: prepare
script:
- pip install -r requirements.txt
# Validate Terraform files
validate_terraform:
image: registry.gitlab.com/gitlab-org/terraform-images/stable:latest
stage: validate
script:
- ./gitlab-terraform.sh init
- ./gitlab-terraform.sh validate
# Validate compliance of request and store the report for future reference
validate_compliance:
image: python:3.7
stage: validate
script:
- pip install -r requirements.txt
- python scripts/validate_compliance.py
artifacts:
paths:
- ${TF_ROOT}/compliance.txt
# Create Terraform plan and store it for future reference
plan:
image: registry.gitlab.com/gitlab-org/terraform-images/stable:latest
stage: plan
script:
- ./gitlab-terraform.sh plan -target=aws_security_group.custom_sg_public -target=aws_security_group.custom_sg_private
- ./gitlab-terraform.sh plan-txt -target=aws_security_group.custom_sg_public -target=aws_security_group.custom_sg_private
artifacts:
paths:
- ${TF_ROOT}/plan.cache
- ${TF_ROOT}/plan.txt
reports:
terraform: ${TF_ROOT}/plan.txt
# Send notifications via Slack
notify_slack:
image: curlimages/curl
stage: request_approval
script:
- curl -F title='Compliance report' -F initial_comment="*Deploy Request* ${CI_COMMIT_SHORT_SHA} - ${CI_COMMIT_TITLE}" --form-string channels=gitlab-aws-sec-rules -F file=@${TF_ROOT}/compliance.txt -F filename=compliance-${CI_COMMIT_REF_SLUG}.txt -F token=${SLACK_OAUTH_TOKEN} https://slack.com/api/files.upload
- curl -F title='Terraform Plan' -F initial_comment="Deploy Request ${CI_COMMIT_SHORT_SHA} - ${CI_COMMIT_TITLE} - *APPROVE DEPLOY* at ${CI_PIPELINE_URL}" --form-string channels=gitlab-aws-sec-rules -F file=@${TF_ROOT}/plan.txt -F filename=plan.txt -F token=${SLACK_OAUTH_TOKEN} https://slack.com/api/files.upload
dependencies:
- validate_compliance
- plan
only:
- master
# Requires manual approval
apply:
image: registry.gitlab.com/gitlab-org/terraform-images/stable:latest
stage: deploy
script:
- ./gitlab-terraform.sh apply -target=aws_security_group.custom_sg_public -target=aws_security_group.custom_sg_private -auto-approve
dependencies:
- plan
when: manual
allow_failure: false
only:
- master
# Notify deployment has finished
notify_deploy:
image: curlimages/curl
stage: notify_deploy
script:
- curl -X POST -d "token=${SLACK_OAUTH_TOKEN}&channel=#gitlab-aws-sec-rules&text=Deploy Request ${CI_COMMIT_SHORT_SHA} - ${CI_COMMIT_TITLE} - *APPROVED* and *DEPLOYED*" https://slack.com/api/chat.postMessage
only:
- master
# Verify consistency between the terraform states and AWS
verify_and_notify:
image: python:3.7
stage: verify_deploy_and_notify
script:
- pip install -r requirements.txt
- curl --header "Private-Token:${CI_USER_TOKEN}" --request GET "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/${CI_PROJECT_NAME}" > terraform.tfstate.json
- python scripts/verify_and_notify.py
- curl -F title='Report' -F initial_comment="Consistency check Terraform AWS - ${CI_COMMIT_SHORT_SHA} - ${CI_COMMIT_TITLE}" --form-string channels=gitlab-aws-sec-rules -F file=@${TF_ROOT}/consistency.txt -F filename=consistency-${CI_COMMIT_REF_SLUG}.txt -F token=${SLACK_OAUTH_TOKEN} https://slack.com/api/files.upload
only:
- master
notify_if_failure:
image: curlimages/curl
stage: notify_if_failure
when: on_failure
script:
- curl -X POST -d "token=${SLACK_OAUTH_TOKEN}&channel=#gitlab-aws-sec-rules&text=Deploy Request ${CI_COMMIT_SHORT_SHA} - ${CI_COMMIT_TITLE} - *FAILED* - Check pipeline at ${CI_PIPELINE_URL}" https://slack.com/api/chat.postMessage
only:
- master
verify_aws_services_reachability:
stage: check_services
variables:
AWS_ACCESS_KEY_ID: $AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $AWS_SECRET_ACCESS_KEY
trigger:
include: test-pipeline/verify-aws-services-gitlab-ci.yml
only:
- master
## **Deploy network ACLs in AWS with Terraform and GitLab CI (pipelines)**
### Directory Structure
- In directory **"aws_setup"** is the code to start the Lab setup, from your local machine.
- This setup will use GitLab Terraform managed states. In other words, the Terraform states will be
stored remotely at GitLab using the http backend. So you will need to setup this in order to
use those states when updating the security rules.
- The initialization setup will prepare everything but not open the HTTP port, to do that you
will have to run the pipeline.
- The pipelines will have to be manually approved in GitLab
- Check README.md in **"aws_setup"** directory for more details about the initial setup
- It uses the AWS credentials from $HOME/.aws default config
- When you finish playing don't forget to delete everythin executing `terraform destroy` in
**"aws_setup"** directory.
- In **main directory** is the code for deploying/modifying AWS security rules with
GitLab CI (pipelines), and using GitLab managed Terraform states.
- You can view the Terraform states at GitLab -> Operations -> Terraform
- You will need to create an access key in AWS and configure that key in GitLab.
- In this way GitLab will pass the AWS key to Terraform in the pipeline execution.
- Do this at AWS IAM -> users -> Security Credentials -> Access Keys -> generate access key
- And in GitLab at project -> settings -> CICD -> variables. There add the 2 variables as
`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
- In **scripts** there are a couple of Python scripts that will help in parsing and generating
outputs for the pipeline.
- In **test-pipeline** is located a pipeline that will check the status of the services when
the deploy is finished
####Other notes:
- In order to facilitate ssh connection an ssh key will be generated and installed in
the instances. Only for development purposes, that key will saved locally as `id_rsa_aws.pem`
in your PC and in the public server.
Change its permission to use with `chmod 666 id_rsa_aws.pem`
For future reference, you can also read it from the states files.
- To connect to the front/public server just run `ssh -i id_rsa_aws.pem ubuntu@public_ip`.
- Likewise to connect to the back/private server just run from the front/public
server `ssh -i id_rsa_aws.pem ubuntu@back_private_ip`.
provider "aws" {
profile = "default"
region = var.region
}
There are 2 ways to use GitLab managed Terraform state.
1. launching Terraform from the local machine and storing in GitLab with the http backend
2. use GitLab CI (pipelines)
This setup directory will use option 1 to setup the LAB, while the root
folder will use method 2, to read the Terraform states created at the setup
time, and modify security rules within the pipeline.
Below is an abstract on how to setup the environment.
Read the link below for more detalis on how to initialize and use each one.
https://docs.gitlab.com/ee/user/infrastructure/terraform_state.html
#### Option 1:
Use Terraform from local machine to setup the lab.
**1.- Setup the Terraform backend in your .tf file with**
```
terraform {
backend "http" {
}
}
```
**2.- Generate your personal GitLab access token**
Click you avatar upper-right -> preferences -> access tokens -> name,
expiration date and `api` scope.
**3.- Initialize Terraform**
Get your GitLab project ID and Name from settings -> general
Replace with your information in the following script.
For `YOUR-STATE-NAME` you can use your project name, but it could be another custom name. The script use the project
name, if you change it, change acordingly at the end of the variable TF_ADDRESS of the pipeline .gitlab-ci.yml.
```
terraform init \
-backend-config="address=https://gitlab.com/api/v4/projects/<YOUR-PROJECT-ID>/terraform/state/<YOUR-STATE-NAME>" \
-backend-config="lock_address=https://gitlab.com/api/v4/projects/<YOUR-PROJECT-ID>/terraform/state/<YOUR-STATE-NAME>/lock" \
-backend-config="unlock_address=https://gitlab.com/api/v4/projects/<YOUR-PROJECT-ID>/terraform/state/<YOUR-STATE-NAME>/lock" \
-backend-config="username=<YOUR-USERNAME>" \
-backend-config="password=<YOUR-ACCESS-TOKEN>" \
-backend-config="lock_method=POST" \
-backend-config="unlock_method=DELETE" \
-backend-config="retry_wait_min=5"
```
**to update your GitLab API password you can manually edit `.terraform/terrafom.state` or
re init terraform with the above cmd*
**4.- View GitLab managed Terraform States**
In your GitLab project go to Operations -> Terraform
provider "aws" {
profile = "default"
region = var.region
}
terraform {
backend "http" {
}
}
resource "tls_private_key" "ssh-key" {
algorithm = "RSA"
rsa_bits = 4096
}
resource "aws_key_pair" "generated_key" {
key_name = var.key_name
public_key = tls_private_key.ssh-key.public_key_openssh
tags = {
Training = "https://www.ipspace.net/PubCloud/"
}
}
// Save the key file locally
resource "local_file" "key" {
content = tls_private_key.ssh-key.private_key_pem
filename = "./id_rsa_aws.pem"
provisioner "local-exec" {
command = "chmod 600 ${self.filename}"
}
}
data "aws_ami" "aws_linux" {
// most_recent = true
owners = ["099720109477"] # Canonical
filter {
name = "name"
// values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
/*
Force the AMI ID free tier according AWS EC2 Console at 1/2/2021
*/
values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20201026"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
}
resource "aws_instance" "front" {
ami = data.aws_ami.aws_linux.id
instance_type = "t2.micro"
key_name = aws_key_pair.generated_key.key_name
subnet_id = aws_subnet.front.id
associate_public_ip_address = true
vpc_security_group_ids = [aws_security_group.infra_sg_public.id, aws_security_group.custom_sg_public.id]
connection {
host = self.public_ip
user = "ubuntu"
type = "ssh"
private_key = tls_private_key.ssh-key.private_key_pem
timeout = "2m"
}
// Save the key file
provisioner "file" {
content = tls_private_key.ssh-key.private_key_pem
destination = "/home/ubuntu/id_rsa_aws.pem"
}
// Install and setup web_api server
provisioner "file" {
source = "./web_api/main.py"
destination = "main.py"
}
provisioner "file" {
source = "./web_api/fastapi.service"
destination = "fastapi.service"
}
provisioner "file" {
content = "https://${aws_s3_bucket.ipspace.bucket}.s3.${var.region}.amazonaws.com/${aws_s3_bucket_object.object.key}"
destination = "/home/ubuntu/s3_bucket_url_my-pic.txt"
}
provisioner "remote-exec" {
inline = [
"sleep 5",
"sudo apt update",
"sleep 5",
"sudo apt install -y python3-pip",
"sudo pip3 install fastapi",
"sudo pip3 install uvicorn[standard]",
"chmod 600 id_rsa_aws.pem",
"sudo mv fastapi.service /etc/systemd/system/",
"sudo systemctl daemon-reload",
"sudo systemctl enable fastapi.service",
"sudo shutdown -fr now",
]
}
tags = {
Name = "front"
Training = "https://www.ipspace.net/PubCloud/"
}
}
resource "aws_instance" "back" {
ami = data.aws_ami.aws_linux.id
instance_type = "t2.micro"
key_name = aws_key_pair.generated_key.key_name
subnet_id = aws_subnet.back.id
associate_public_ip_address = false
vpc_security_group_ids = [aws_security_group.infra_sg_private.id, aws_security_group.custom_sg_private.id]
tags = {
Name = "back"
Training = "https://www.ipspace.net/PubCloud/"
}
}
resource "aws_vpc" "ipspace" {
cidr_block = "10.1.0.0/16"
tags = {
Name = "ipspace"
Training = "https://www.ipspace.net/PubCloud/"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.ipspace.id
tags = {
Name = "ipspace_igw"
Training = "https://www.ipspace.net/PubCloud/"
}
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.ipspace.id
tags = {
Name = "ipspace_public_rt"
Training = "https://www.ipspace.net/PubCloud/"
}
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
}
resource "aws_route_table" "private" {
vpc_id = aws_vpc.ipspace.id
tags = {
Name = "ipspace_private_rt"
Training = "https://www.ipspace.net/PubCloud/"
}
}
resource "aws_subnet" "front" {
vpc_id = aws_vpc.ipspace.id
cidr_block = "10.1.1.0/24"
tags = {
Name = "front"
Training = "https://www.ipspace.net/PubCloud/"
}
}
resource "aws_subnet" "back" {
vpc_id = aws_vpc.ipspace.id
cidr_block = "10.1.2.0/24"
tags = {
Name = "back"
Training = "https://www.ipspace.net/PubCloud/"
}
}
resource "aws_route_table_association" "public" {
subnet_id = aws_subnet.front.id
route_table_id = aws_route_table.public.id
}
resource "aws_route_table_association" "private" {
subnet_id = aws_subnet.back.id
route_table_id = aws_route_table.private.id
}
/*
Print outputs to screen for debug purposes
*/
output "vpc_name" {
value = aws_vpc.ipspace.tags.Name
description = "The Name of the VPC."
}
output "vpc_id" {
value = aws_vpc.ipspace.id
description = "The ID of the VPC."
}
output "front_instance_name" {
value = aws_instance.front.tags.Name
description = "The Name of the front/public instance."
}
output "front_instance_id" {
value = aws_instance.front.id
description = "The ID of the front/public instance."
}
output "front_private_ip" {
value = aws_instance.front.private_ip
description = "The private IP address of Public server instance."
}
output "front_public_ip" {
value = aws_instance.front.public_ip
description = "The Public IP address of Public server instance."
}
output "back_instance_name" {
value = aws_instance.back.tags.Name
description = "The Name of the Private instance."
}
output "back_instance_id" {
value = aws_instance.back.id
description = "The ID of the Private instance."
}
output "back_private_ip" {
value = aws_instance.back.private_ip
description = "The private IP address of Public server instance."
}
output "infra_sg_public_name" {
value = aws_security_group.infra_sg_public.name
description = "The Name fo the public SG for basic infrastructure."
}
output "infra_sg_public_id" {
value = aws_security_group.infra_sg_public.id
description = "The ID fo the public SG for basic infrastructure."
}
output "infra_sg_private_name" {
value = aws_security_group.infra_sg_private.name
description = "The Name fo the private SG for basic infrastructure."
}
output "infra_sg_private_id" {
value = aws_security_group.infra_sg_private.id
description = "The ID fo the private SG for basic infrastructure."
}
output "custom_sg_public_name" {
value = aws_security_group.custom_sg_public.name
description = "The Name fo the public SG for basic infrastructure."
}
output "custom_sg_public_id" {
value = aws_security_group.custom_sg_public.id
description = "The ID fo the public SG for basic infrastructure."
}
output "custom_sg_private_name" {
value = aws_security_group.custom_sg_private.name
description = "The Name fo the private SG for basic infrastructure."
}
output "custom_sg_private_id" {
value = aws_security_group.custom_sg_private.id
description = "The ID fo the private SG for basic infrastructure."
}
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
}
tls = {
source = "hashicorp/tls"
}
}
required_version = ">= 0.13"
}
resource "aws_s3_bucket" "ipspace" {
bucket = "cloud-networksecurity"
acl = "public-read"
}
resource "aws_s3_bucket_object" "object" {
bucket = aws_s3_bucket.ipspace.bucket
key = "my-pic.jpg"
source = "./web_api/my-pic.jpg"
storage_class = "ONEZONE_IA"
}
resource "aws_s3_bucket_policy" "bucket_policy" {
bucket = aws_s3_bucket.ipspace.id
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "vpc_only",
"Statement": [
{
"Sid": "GetOnlyFromVPC",
"Effect": "Allow",
"Principal": "*",
"Action": [
"s3:GetObject",
"s3:GetObjectVersion"
],
"Resource": [
"arn:aws:s3:::cloud-networksecurity/my-pic.jpg"
],
"Condition": {
"IpAddress": {
"aws:SourceIp": "${aws_instance.front.public_ip}"
}
}
}
]
}
POLICY
}
// The following 2 resources will the ones modified by the pipeline
resource "aws_security_group" "custom_sg_public" {
name = "custom_sg_public"
description = "Instances Services"
vpc_id = aws_vpc.ipspace.id
tags = {
Training = "https://www.ipspace.net/PubCloud/"
}
ingress {
description = "HTTP_IN"
// Wrong port number to change later to 80 with the pipeline
from_port = 8000
to_port = 8000
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "custom_sg_private" {
name = "custom_sg_private"
description = "Instances Services"
vpc_id = aws_vpc.ipspace.id
tags = {
Training = "https://www.ipspace.net/PubCloud/"
}
ingress {
description = "MYSQL_IN"
// Wrong port number to change later to 80 with the pipeline
from_port = 3000
to_port = 3000
protocol = "tcp"
security_groups = [aws_security_group.custom_sg_public.id]
}
}
\ No newline at end of file
resource "aws_security_group" "infra_sg_public" {
name = "infra_public"
description = "Infrastucture Services"