config.go 23.8 KB
Newer Older
Lee Brown's avatar
Lee Brown committed
1 2 3 4
package config

import (
	"context"
Lee Brown's avatar
Lee Brown committed
5
	"encoding/json"
Lee Brown's avatar
Lee Brown committed
6
	"fmt"
Lee Brown's avatar
Lee Brown committed
7
	"io/ioutil"
Lee Brown's avatar
Lee Brown committed
8 9 10 11 12
	"log"
	"os"
	"path/filepath"
	"strings"

Lee Brown's avatar
Lee Brown committed
13
	"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
14
	"geeks-accelerator/oss/saas-starter-kit/internal/schema"
Lee Brown's avatar
Lee Brown committed
15 16 17 18 19 20 21 22 23 24 25 26 27
	"github.com/aws/aws-sdk-go/aws"
	"github.com/aws/aws-sdk-go/service/cloudfront"
	"github.com/aws/aws-sdk-go/service/rds"
	"github.com/aws/aws-sdk-go/service/s3"
	"github.com/iancoleman/strcase"
	"github.com/jmoiron/sqlx"
	"github.com/pkg/errors"
	"gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy"
)

const (
	// GitLabProjectBaseUrl is the base url used to create links to a specific CI/CD job or pipeline by ID.
	GitLabProjectBaseUrl = "https://gitlab.com/geeks-accelerator/oss/saas-starter-kit"
28 29 30 31

	// EnableRdsServerless will use the Aurora database engine that scales the capacity based on database load. This is
	// a good option for intermittent or unpredictable workloads.
	EnableRdsServerless = true
Lee Brown's avatar
Lee Brown committed
32 33 34 35 36 37 38 39 40

	// EnableCloudFront will create a CloudFront distribution (CDN) that is associated with your public bucket.
	// Static asset files will be served by CloudFront instead of from S3 which will improve performance.
	EnableCloudFront = true
)

var (
	// ProjectNamePrefix will be appending to the name of the project.
	ProjectNamePrefix = ""
Lee Brown's avatar
Lee Brown committed
41 42 43 44 45 46
)

// Env defines the target deployment environment.
type Env = string

var (
Lee Brown's avatar
Lee Brown committed
47
	EnvDev   Env = webcontext.Env_Dev
48
	EnvStage Env = webcontext.Env_Stage
Lee Brown's avatar
Lee Brown committed
49
	EnvProd  Env = webcontext.Env_Prod
Lee Brown's avatar
Lee Brown committed
50 51 52
)

// List of env names used by main.go for help.
Lee Brown's avatar
Lee Brown committed
53
var EnvNames = []Env{
Lee Brown's avatar
Lee Brown committed
54 55 56 57 58
	EnvDev,
	EnvStage,
	EnvProd,
}

Lee Brown's avatar
Lee Brown committed
59 60 61 62 63
// init ensures global variables are set correctly.
func init() {
	ProjectNamePrefix = strings.Replace(ProjectNamePrefix, ".", "-", -1)
}

Lee Brown's avatar
Lee Brown committed
64 65 66
// NewConfig defines the details to setup the target environment for the project to build services and functions.
func NewConfig(log *log.Logger, targetEnv Env, awsCredentials devdeploy.AwsCredentials) (*devdeploy.Config, error) {
	cfg := &devdeploy.Config{
Lee Brown's avatar
Lee Brown committed
67 68 69 70 71
		Env:            targetEnv,
		AwsCredentials: awsCredentials,
	}

	// If AWS Credentials are not set and use role is not enabled, try to load the credentials from env vars.
Lee Brown's avatar
Lee Brown committed
72
	if cfg.AwsCredentials.UseRole == false && cfg.AwsCredentials.AccessKeyID == "" {
Lee Brown's avatar
Lee Brown committed
73
		var err error
Lee Brown's avatar
Lee Brown committed
74
		cfg.AwsCredentials, err = devdeploy.GetAwsCredentialsFromEnv(cfg.Env)
Lee Brown's avatar
Lee Brown committed
75 76 77
		if err != nil {
			return nil, err
		}
Lee Brown's avatar
Lee Brown committed
78 79
	} else if cfg.AwsCredentials.Region == "" {
		awsCreds, err := devdeploy.GetAwsCredentialsFromEnv(cfg.Env)
Lee Brown's avatar
Lee Brown committed
80 81 82
		if err != nil {
			return nil, err
		}
Lee Brown's avatar
Lee Brown committed
83
		cfg.AwsCredentials.Region = awsCreds.Region
Lee Brown's avatar
Lee Brown committed
84 85 86 87 88
	}

	// Get the current working directory. This should be somewhere contained within the project.
	workDir, err := os.Getwd()
	if err != nil {
Lee Brown's avatar
Lee Brown committed
89
		return cfg, errors.Wrap(err, "Failed to get current working directory.")
Lee Brown's avatar
Lee Brown committed
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
	}

	// Set the project root directory and project name. This is current set by finding the go.mod file for the project
	// repo. Project name is the directory name.
	modDetails, err := devdeploy.LoadModuleDetails(workDir)
	if err != nil {
		return cfg, err
	}

	// ProjectRoot should be the root directory for the project.
	cfg.ProjectRoot = modDetails.ProjectRoot

	// ProjectName will be used for prefixing AWS resources. This could be changed as needed or manually defined.
	cfg.ProjectName = ProjectNamePrefix + modDetails.ProjectName

105 106 107 108
	// In a verbatim fork of the repo, a CI/CD would fail due to a conflict creating AWS resources (such as S3) since
	// their name is calculated with the go.mod path. Since the name-scope of AWS resources is region/global scope,
	// it will fail to create appropriate resources for the account of the forked user.
	if cfg.ProjectName == "saas-starter-kit" {
Lee Brown's avatar
Lee Brown committed
109
		remoteUser := gitRemoteUser(modDetails.ProjectRoot)
Lee Brown's avatar
Lee Brown committed
110

Lee Brown's avatar
Lee Brown committed
111
		// Its a true fork from the origin repo.
Lee Brown's avatar
Lee Brown committed
112
		if remoteUser != "oss" && remoteUser != "geeks-accelerator" {
Lee Brown's avatar
Lee Brown committed
113
			// Replace the prefix 'saas' with the parent directory name, hopefully the gitlab group/username.
Lee Brown's avatar
Lee Brown committed
114 115 116 117 118 119 120
			projectPrefix := filepath.Base(filepath.Dir(cfg.ProjectRoot))
			projectPrefix = strings.Replace(projectPrefix, ".", "", -1)
			if len(projectPrefix) > 10 {
				projectPrefix = projectPrefix[0:10]
			}

			cfg.ProjectName = projectPrefix + "-starter-kit"
Lee Brown's avatar
Lee Brown committed
121

Lee Brown's avatar
Lee Brown committed
122
			log.Println("switching project name to ", cfg.ProjectName)
Lee Brown's avatar
Lee Brown committed
123
		}
124 125
	}

Lee Brown's avatar
Lee Brown committed
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	// Set default AWS ECR Repository Name.
	cfg.AwsEcrRepository = &devdeploy.AwsEcrRepository{
		RepositoryName: cfg.ProjectName,
		Tags: []devdeploy.Tag{
			{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
			{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
		},
	}

	// Set the deployment to use the default VPC for the region.
	cfg.AwsEc2Vpc = &devdeploy.AwsEc2Vpc{
		IsDefault: true,
	}

	// Set the security group to use for the deployed services, database and cluster. This will used the VPC ID defined
	// for the deployment.
	cfg.AwsEc2SecurityGroup = &devdeploy.AwsEc2SecurityGroup{
		GroupName:   cfg.ProjectName + "-" + cfg.Env,
		Description: fmt.Sprintf("Security group for %s services running on ECS", cfg.ProjectName),
		Tags: []devdeploy.Tag{
			{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
			{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
		},
	}

	// Set the name of the EC2 Security Group used by the gitlab runner. This is used to ensure the security
	// group defined above has access to the RDS cluster/instance and can thus handle schema migrations.
	cfg.GitlabRunnerEc2SecurityGroupName = "gitlab-runner"

	// Set the s3 buckets used by the deployed services.
	// S3 temp prefix used by services for short term storage. A lifecycle policy will be used for expiration.
	s3BucketTempPrefix := "tmp/"

	// Defines a life cycle policy to expire keys for the temp directory.
	bucketLifecycleTempRule := &s3.LifecycleRule{
		ID:     aws.String("Rule for : " + s3BucketTempPrefix),
		Status: aws.String("Enabled"),
		Filter: &s3.LifecycleRuleFilter{
			Prefix: aws.String(s3BucketTempPrefix),
		},
		Expiration: &s3.LifecycleExpiration{
			// Indicates the lifetime, in days, of the objects that are subject to the rule.
			// The value must be a non-zero positive integer.
			Days: aws.Int64(1),
		},
		// Specifies the days since the initiation of an incomplete multipart upload
		// that Amazon S3 will wait before permanently removing all parts of the upload.
		// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
		// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
		// in the Amazon Simple Storage Service Developer Guide.
		AbortIncompleteMultipartUpload: &s3.AbortIncompleteMultipartUpload{
			DaysAfterInitiation: aws.Int64(1),
		},
	}

	// Define the public S3 bucket used to serve static files for all the services.
	cfg.AwsS3BucketPublic = &devdeploy.AwsS3Bucket{
		BucketName:         cfg.ProjectName + "-public",
		IsPublic:           true,
		TempPrefix:         s3BucketTempPrefix,
		LocationConstraint: &cfg.AwsCredentials.Region,
		LifecycleRules:     []*s3.LifecycleRule{bucketLifecycleTempRule},
		CORSRules: []*s3.CORSRule{
			&s3.CORSRule{
				// Headers that are specified in the Access-Control-Request-Headers header.
				// These headers are allowed in a preflight OPTIONS request. In response to
				// any preflight OPTIONS request, Amazon S3 returns any requested headers that
				// are allowed.
				// AllowedHeaders: aws.StringSlice([]string{}),

				// An HTTP method that you allow the origin to execute. Valid values are GET,
				// PUT, HEAD, POST, and DELETE.
				//
				// AllowedMethods is a required field
				AllowedMethods: aws.StringSlice([]string{"GET", "POST"}),

				// One or more origins you want customers to be able to access the bucket from.
				//
				// AllowedOrigins is a required field
				AllowedOrigins: aws.StringSlice([]string{"*"}),

				// One or more headers in the response that you want customers to be able to
				// access from their applications (for example, from a JavaScript XMLHttpRequest
				// object).
				// ExposeHeaders: aws.StringSlice([]string{}),

				// The time in seconds that your browser is to cache the preflight response
				// for the specified resource.
				// MaxAgeSeconds: aws.Int64(),
			},
		},
	}

	// The base s3 key prefix used to upload static files.
	cfg.AwsS3BucketPublicKeyPrefix = "/public"

	// For production, enable Cloudfront CDN for all static files to avoid serving them from the slower S3 option.
Lee Brown's avatar
Lee Brown committed
223
	if EnableCloudFront && cfg.Env == EnvProd {
Lee Brown's avatar
Lee Brown committed
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
		cfg.AwsS3BucketPublic.CloudFront = &devdeploy.AwsS3BucketCloudFront{
			// S3 key prefix to request your content from a directory in your Amazon S3 bucket.
			OriginPath: cfg.AwsS3BucketPublicKeyPrefix,

			// A complex type that controls whether CloudFront caches the response to requests.
			CachedMethods: []string{"HEAD", "GET"},

			// The distribution's configuration information.
			DistributionConfig: &cloudfront.DistributionConfig{
				Comment:       aws.String(""),
				Enabled:       aws.Bool(true),
				HttpVersion:   aws.String("http2"),
				IsIPV6Enabled: aws.Bool(true),
				DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{
					Compress:   aws.Bool(true),
					DefaultTTL: aws.Int64(1209600),
					MinTTL:     aws.Int64(604800),
					MaxTTL:     aws.Int64(31536000),
					ForwardedValues: &cloudfront.ForwardedValues{
						QueryString: aws.Bool(true),
						Cookies: &cloudfront.CookiePreference{
							Forward: aws.String("none"),
						},
					},
					TrustedSigners: &cloudfront.TrustedSigners{
						Enabled:  aws.Bool(false),
						Quantity: aws.Int64(0),
					},
					ViewerProtocolPolicy: aws.String("allow-all"),
				},
				ViewerCertificate: &cloudfront.ViewerCertificate{
					CertificateSource:            aws.String("cloudfront"),
					MinimumProtocolVersion:       aws.String("TLSv1"),
					CloudFrontDefaultCertificate: aws.Bool(true),
				},
				PriceClass:      aws.String("PriceClass_All"),
				CallerReference: aws.String("devops-deploy" + cfg.AwsS3BucketPublic.BucketName),
			},
		}
	}

	// Define the private S3 bucket used for long term file storage including but not limited to: log exports,
	// AWS Lambda code, application caching.
	cfg.AwsS3BucketPrivate = &devdeploy.AwsS3Bucket{
		BucketName:         cfg.ProjectName + "-private",
		IsPublic:           false,
		TempPrefix:         s3BucketTempPrefix,
		LocationConstraint: &cfg.AwsCredentials.Region,
		LifecycleRules:     []*s3.LifecycleRule{bucketLifecycleTempRule},
		PublicAccessBlock: &s3.PublicAccessBlockConfiguration{
			// Specifies whether Amazon S3 should block public access control lists (ACLs)
			// for this bucket and objects in this bucket. Setting this element to TRUE
			// causes the following behavior:
			//
			//    * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is
			//    public.
			//
			//    * PUT Object calls fail if the request includes a public ACL.
			//
			// Enabling this setting doesn't affect existing policies or ACLs.
			BlockPublicAcls: aws.Bool(true),

			// Specifies whether Amazon S3 should block public bucket policies for this
			// bucket. Setting this element to TRUE causes Amazon S3 to reject calls to
			// PUT Bucket policy if the specified bucket policy allows public access.
			//
			// Enabling this setting doesn't affect existing bucket policies.
			BlockPublicPolicy: aws.Bool(true),

			// Specifies whether Amazon S3 should restrict public bucket policies for this
			// bucket. Setting this element to TRUE restricts access to this bucket to only
			// AWS services and authorized users within this account if the bucket has a
			// public policy.
			//
			// Enabling this setting doesn't affect previously stored bucket policies, except
			// that public and cross-account access within any public bucket policy, including
			// non-public delegation to specific accounts, is blocked.
			RestrictPublicBuckets: aws.Bool(true),

			// Specifies whether Amazon S3 should ignore public ACLs for this bucket and
			// objects in this bucket. Setting this element to TRUE causes Amazon S3 to
			// ignore all public ACLs on this bucket and objects in this bucket.
			//
			// Enabling this setting doesn't affect the persistence of any existing ACLs
			// and doesn't prevent new public ACLs from being set.
			IgnorePublicAcls: aws.Bool(true),
		},
	}

	// Add a bucket policy to enable exports from Cloudwatch Logs for the private S3 bucket.
	cfg.AwsS3BucketPrivate.Policy = func() string {
		policyResource := strings.Trim(filepath.Join(cfg.AwsS3BucketPrivate.BucketName, cfg.AwsS3BucketPrivate.TempPrefix), "/")
		return fmt.Sprintf(`{
				"Version": "2012-10-17",
				"Statement": [
				  {
					  "Action": "s3:GetBucketAcl",
					  "Effect": "Allow",
					  "Resource": "arn:aws:s3:::%s",
					  "Principal": { "Service": "logs.%s.amazonaws.com" }
				  },
				  {
					  "Action": "s3:PutObject" ,
					  "Effect": "Allow",
					  "Resource": "arn:aws:s3:::%s/*",
					  "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } },
					  "Principal": { "Service": "logs.%s.amazonaws.com" }
				  }
				]
			}`, cfg.AwsS3BucketPrivate.BucketName, cfg.AwsCredentials.Region, policyResource, cfg.AwsCredentials.Region)
	}()

	// Define the Redis Cache cluster used for ephemeral storage.
	cfg.AwsElasticCacheCluster = &devdeploy.AwsElasticCacheCluster{
		CacheClusterId:          cfg.ProjectName + "-" + cfg.Env,
		CacheNodeType:           "cache.t2.micro",
		CacheSubnetGroupName:    "default",
		Engine:                  "redis",
		EngineVersion:           "5.0.4",
		NumCacheNodes:           1,
		Port:                    6379,
		AutoMinorVersionUpgrade: aws.Bool(true),
		SnapshotRetentionLimit:  aws.Int64(7),
		ParameterNameValues: []devdeploy.AwsElasticCacheParameter{
			devdeploy.AwsElasticCacheParameter{
				ParameterName:  "maxmemory-policy",
				ParameterValue: "allkeys-lru",
			},
		},
	}

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
	// If serverless RDS is enabled, defined the RDS database cluster and link it to the database instance.
	if EnableRdsServerless {
		cfg.AwsRdsDBCluster = &devdeploy.AwsRdsDBCluster{
			DBClusterIdentifier:   cfg.ProjectName + "-" + cfg.Env,
			Engine:                "aurora-postgresql",
			EngineMode:            "serverless",
			DatabaseName:          "shared",
			MasterUsername:        "god",
			Port:                  5432,
			BackupRetentionPeriod: aws.Int64(7),
			CopyTagsToSnapshot:    aws.Bool(true),
			Tags: []devdeploy.Tag{
				{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
				{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
			},
			PreCreate: func(input *rds.CreateDBClusterInput) error {
				input.ScalingConfiguration = &rds.ScalingConfiguration{
					// A value that indicates whether to allow or disallow automatic pause for an
					// Aurora DB cluster in serverless DB engine mode. A DB cluster can be paused
					// only when it's idle (it has no connections).
					//
					// If a DB cluster is paused for more than seven days, the DB cluster might
					// be backed up with a snapshot. In this case, the DB cluster is restored when
					// there is a request to connect to it.
					AutoPause: aws.Bool(true),

					// The maximum capacity for an Aurora DB cluster in serverless DB engine mode.
					// Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.
					// The maximum capacity must be greater than or equal to the minimum capacity.
					MaxCapacity: aws.Int64(2),

					// The minimum capacity for an Aurora DB cluster in serverless DB engine mode.
					// Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.
					// The minimum capacity must be less than or equal to the maximum capacity.
					MinCapacity: aws.Int64(2),

					// The time, in seconds, before an Aurora DB cluster in serverless mode is paused.
					SecondsUntilAutoPause: aws.Int64(3600),

					// The action to take when the timeout is reached, either ForceApplyCapacityChange
					// or RollbackCapacityChange.
					// ForceApplyCapacityChange sets the capacity to the specified value as soon
					// as possible.
					// RollbackCapacityChange, the default, ignores the capacity change if a scaling
					// point is not found in the timeout period.
					// If you specify ForceApplyCapacityChange, connections that prevent Aurora
					// Serverless from finding a scaling point might be dropped.
					// For more information, see Autoscaling for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling)
					// in the Amazon Aurora User Guide.
					TimeoutAction: aws.String("ForceApplyCapacityChange"),
				}

				return nil
			},
Lee Brown's avatar
Lee Brown committed
409
			AfterCreate: func(res *rds.DBCluster, dbInfo *devdeploy.DBConnInfo, masterDb *sqlx.DB) error {
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
				return schema.Migrate(context.Background(), cfg.Env, masterDb, log, false)
			},
		}
	} else {
		// Define the RDS database instance for transactional data. A random password will be generated for any created instance.
		cfg.AwsRdsDBInstance = &devdeploy.AwsRdsDBInstance{
			DBInstanceIdentifier:    cfg.ProjectName + "-" + cfg.Env,
			DBName:                  "shared",
			Engine:                  "postgres",
			MasterUsername:          "god",
			Port:                    5432,
			DBInstanceClass:         "db.t2.small",
			AllocatedStorage:        20,
			PubliclyAccessible:      false,
			BackupRetentionPeriod:   aws.Int64(7),
			AutoMinorVersionUpgrade: true,
			CopyTagsToSnapshot:      aws.Bool(true),
			Tags: []devdeploy.Tag{
				{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
				{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
			},
Lee Brown's avatar
Lee Brown committed
431
			AfterCreate: func(res *rds.DBInstance, dbInfo *devdeploy.DBConnInfo, masterDb *sqlx.DB) error {
432 433 434
				return schema.Migrate(context.Background(), cfg.Env, masterDb, log, false)
			},
		}
Lee Brown's avatar
Lee Brown committed
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
	}

	// AwsIamPolicy defines the name and policy that will be attached to the task role. The policy document grants
	// the permissions required for deployed services to access AWS services. If the policy already exists, the
	// statements will be used to add new required actions, but not for removal.
	cfg.AwsIamPolicy = &devdeploy.AwsIamPolicy{
		PolicyName:  fmt.Sprintf("%s%sServices", cfg.ProjectNameCamel(), strcase.ToCamel(cfg.Env)),
		Description: fmt.Sprintf("Defines access for %s services. ", cfg.ProjectName),
		PolicyDocument: devdeploy.AwsIamPolicyDocument{
			Version: "2012-10-17",
			Statement: []devdeploy.AwsIamStatementEntry{
				{
					Sid:    "DefaultServiceAccess",
					Effect: "Allow",
					Action: []string{
						"cloudfront:ListDistributions",
						"ec2:DescribeNetworkInterfaces",
						"ec2:DeleteNetworkInterface",
						"ecs:ListTasks",
						"ecs:DescribeServices",
						"ecs:DescribeTasks",
						"ec2:DescribeNetworkInterfaces",
						"route53:ListHostedZones",
						"route53:ListResourceRecordSets",
						"route53:ChangeResourceRecordSets",
						"ecs:UpdateService",
						"ses:SendEmail",
						"ses:ListIdentities",
Lee Brown's avatar
Lee Brown committed
463
						"ses:GetAccountSendingEnabled",
Lee Brown's avatar
Lee Brown committed
464 465 466 467 468 469 470 471 472
						"secretsmanager:ListSecretVersionIds",
						"secretsmanager:GetSecretValue",
						"secretsmanager:CreateSecret",
						"secretsmanager:UpdateSecret",
						"secretsmanager:RestoreSecret",
						"secretsmanager:DeleteSecret",
					},
					Resource: "*",
				},
Lee Brown's avatar
Lee Brown committed
473 474 475 476 477 478 479

				{
					Effect: "Allow",
					Action: []string{
						"s3:ListBucket",
					},
					Resource: []string{
480 481
						"arn:aws:s3:::" + cfg.AwsS3BucketPublic.BucketName,
						"arn:aws:s3:::" + cfg.AwsS3BucketPrivate.BucketName,
Lee Brown's avatar
Lee Brown committed
482 483 484 485 486 487 488 489 490 491
					},
				},
				{
					Effect: "Allow",
					Action: []string{
						"s3:PutObject",
						"s3:PutObjectAcl",
						"s3:GetObject",
					},
					Resource: []string{
492 493
						"arn:aws:::" + cfg.AwsS3BucketPublic.BucketName + "/*",
						"arn:aws:::" + cfg.AwsS3BucketPrivate.BucketName + "/*",
Lee Brown's avatar
Lee Brown committed
494 495
					},
				},
Lee Brown's avatar
Lee Brown committed
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
				{
					Sid:    "ServiceInvokeLambda",
					Effect: "Allow",
					Action: []string{
						"iam:GetRole",
						"lambda:InvokeFunction",
						"lambda:ListVersionsByFunction",
						"lambda:GetFunction",
						"lambda:InvokeAsync",
						"lambda:GetFunctionConfiguration",
						"iam:PassRole",
						"lambda:GetAlias",
						"lambda:GetPolicy",
					},
					Resource: []string{
						"arn:aws:iam:::role/*",
						"arn:aws:lambda:::function:*",
					},
				},
				{
					Sid:    "datadoglambda",
					Effect: "Allow",
					Action: []string{
						"cloudwatch:Get*",
						"cloudwatch:List*",
						"ec2:Describe*",
						"support:*",
						"tag:GetResources",
						"tag:GetTagKeys",
						"tag:GetTagValues",
					},
					Resource: "*",
				},
			},
		},
	}
	log.Printf("\t\tSet Task Policy Name to '%s'.", cfg.AwsIamPolicy.PolicyName)

Lee Brown's avatar
Lee Brown committed
534 535
	// Append all the defined services to the config.
	for _, n := range ServiceNames {
Lee Brown's avatar
Lee Brown committed
536
		srv, err := NewService(log, n, cfg)
Lee Brown's avatar
Lee Brown committed
537 538 539 540 541 542 543 544
		if err != nil {
			return nil, err
		}
		cfg.ProjectServices = append(cfg.ProjectServices, srv)
	}

	// Append all the defined functions to the config.
	for _, n := range FunctionNames {
Lee Brown's avatar
Lee Brown committed
545
		fn, err := NewFunction(log, n, cfg)
Lee Brown's avatar
Lee Brown committed
546 547 548 549 550 551
		if err != nil {
			return nil, err
		}
		cfg.ProjectFunctions = append(cfg.ProjectFunctions, fn)
	}

Lee Brown's avatar
Lee Brown committed
552 553 554 555 556 557 558 559 560 561 562 563 564
	return cfg, nil
}

// getDatadogApiKey tries to find the datadog api key from env variable or AWS Secrets Manager.
func getDatadogApiKey(cfg *devdeploy.Config) (string, error) {
	// Load Datadog API key which can be either stored in an environment variable or in AWS Secrets Manager.
	// 1. Check env vars for [DEV|STAGE|PROD]_DD_API_KEY and DD_API_KEY
	apiKey := devdeploy.GetTargetEnv(cfg.Env, "DD_API_KEY")

	// 2. Check AWS Secrets Manager for datadog entry prefixed with target environment.
	if apiKey == "" {
		prefixedSecretId := cfg.SecretID("datadog")
		var err error
Lee Brown's avatar
Lee Brown committed
565
		apiKey, err = devdeploy.SecretManagerGetString(cfg.AwsCredentials.Session(), prefixedSecretId)
Lee Brown's avatar
Lee Brown committed
566
		if err != nil {
Lee Brown's avatar
Lee Brown committed
567
			if errors.Cause(err) != devdeploy.ErrSecreteNotFound {
Lee Brown's avatar
Lee Brown committed
568 569 570 571 572 573 574
				return "", err
			}
		}
	}

	// 3. Check AWS Secrets Manager for Datadog entry.
	if apiKey == "" {
575
		secretId := "datadog"
Lee Brown's avatar
Lee Brown committed
576
		var err error
Lee Brown's avatar
Lee Brown committed
577
		apiKey, err = devdeploy.SecretManagerGetString(cfg.AwsCredentials.Session(), secretId)
Lee Brown's avatar
Lee Brown committed
578
		if err != nil {
Lee Brown's avatar
Lee Brown committed
579
			if errors.Cause(err) != devdeploy.ErrSecreteNotFound {
Lee Brown's avatar
Lee Brown committed
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
				return "", err
			}
		}
	}

	return apiKey, nil
}

// getCommitRef returns a string that will be used by go build to replace main.go:build constant.
func getCommitRef() string {
	var commitRef string

	// Set the commit ref based on the GitLab CI/CD environment variables.
	if ev := os.Getenv("CI_COMMIT_TAG"); ev != "" {
		commitRef = "tag-" + ev
	} else if ev := os.Getenv("CI_COMMIT_REF_NAME"); ev != "" {
		commitRef = "branch-" + ev
	}

	if commitRef != "" {
		if ev := os.Getenv("CI_COMMIT_SHORT_SHA"); ev != "" {
			commitRef = commitRef + "@" + ev
		}
	}

	return commitRef
}
Lee Brown's avatar
Lee Brown committed
607 608 609 610 611

// gitRemoteUser returns the git username/organization for the git repo
func gitRemoteUser(projectRoot string) string {

	var remoteUrl string
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	if ev := os.Getenv("CI_PROJECT_PATH"); ev != "" {
		if strings.Contains(ev, "/") {
			remoteUrl = strings.Split(ev, "/")[1]
		} else {
			remoteUrl = ev
		}
	} else {
		dat, err := ioutil.ReadFile(filepath.Join(projectRoot, ".git/config"))
		if err != nil {
			return ""
		}

		lines := strings.Split(string(dat), "\n")
		for _, l := range lines {
			l = strings.TrimSpace(l)
			if strings.HasPrefix(l, "url =") {
				remoteUrl = l
				break
			}
Lee Brown's avatar
Lee Brown committed
631 632
		}

Lee Brown's avatar
Lee Brown committed
633 634 635 636 637 638 639 640 641
		if remoteUrl == "" {
			return ""
		}
		remoteUrl = strings.TrimSpace(strings.Split(remoteUrl, "=")[1])

		if !strings.Contains(remoteUrl, ":") {
			return ""
		}
		remoteUrl = strings.Split(remoteUrl, ":")[1]
Lee Brown's avatar
Lee Brown committed
642 643 644

	}

Lee Brown's avatar
Lee Brown committed
645
	remoteUser := strings.Split(remoteUrl, "/")[0]
Lee Brown's avatar
Lee Brown committed
646

Lee Brown's avatar
Lee Brown committed
647 648
	return remoteUser
}
Lee Brown's avatar
Lee Brown committed
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674

// DeployInfrastructureForTargetEnv executes the deploy commands for a target function.
func DeployInfrastructureForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, dryRun bool) error {

	cfg, err := NewConfig(log, targetEnv, awsCredentials)
	if err != nil {
		return err
	}

	if dryRun {
		cfgJSON, err := json.MarshalIndent(cfg, "", "    ")
		if err != nil {
			log.Fatalf("DeployFunctionForTargetEnv : Marshalling config to JSON : %+v", err)
		}
		log.Printf("DeployFunctionForTargetEnv : config : %v\n", string(cfgJSON))

		return nil
	}

	_, err = devdeploy.SetupInfrastructure(log, cfg, devdeploy.SetupOptionSkipCache)
	if err != nil {
		return err
	}

	return nil
}