Commit 44e0a361 authored by Lucas Brown's avatar Lucas Brown

Merge branch 'master' of gitlab.com:geeks-accelerator/oss/saas-starter-kit

parents 7c3f8479 931795a7
......@@ -34,7 +34,7 @@ cache:
.deploy_tmpl: &deploy_tmpl
<<: *job_tmpl
script:
- 'devops deploy -service=${SERVICE} -project=${PROJECT_NAME} -env=${TARGET_ENV} -enable_https=${ENABLE_HTTPS} -enable_elb=${ENABLE_ELB} -primary_host=${PRIMARY_HOST} -host_names=${HOST_NAMES} -private_bucket=${S3_BUCKET_PRIVATE} -public_bucket=${S3_BUCKET_PUBLIC}'
- 'devops deploy -service=${SERVICE} -project=${PROJECT_NAME} -env=${TARGET_ENV} -enable_https=${ENABLE_HTTPS} -enable_elb=${ENABLE_ELB} -primary_host=${PRIMARY_HOST} -host_names=${HOST_NAMES} -private_bucket=${S3_BUCKET_PRIVATE} -public_bucket=${S3_BUCKET_PUBLIC} -static_files_s3={STATIC_FILES_S3_ENABLED} -static_files_cloudfront={STATIC_FILES_CLOUDFRONT_ENABLED} -static_files_img_resize={STATIC_FILES_IMG_RESIZE_ENABLED}'
.migrate_tmpl: &migrate_tmpl
<<: *job_tmpl
......@@ -54,6 +54,45 @@ db:migrate:dev:
TARGET_ENV: 'dev'
AWS_USE_ROLE: 'true'
webapp:build:dev:
<<: *build_tmpl
stage: build:dev
tags:
- dev
only:
- master
- dev
- dev-web-app
variables:
TARGET_ENV: 'dev'
SERVICE: 'web-app'
AWS_USE_ROLE: 'true'
webapp:deploy:dev:
<<: *deploy_tmpl
stage: deploy:dev
tags:
- dev
only:
- master
- dev
- dev-web-app
dependencies:
- 'webapp:build:dev'
- 'db:migrate:dev'
variables:
TARGET_ENV: 'dev'
SERVICE: 'web-app'
ENABLE_HTTPS: 1
ENABLE_ELB: 0
PRIMARY_HOST: 'eproc.tech'
HOST_NAMES: 'www.eproc.tech, dev.eproc.tech'
S3_BUCKET_PRIVATE: 'saas-starter-kit-private'
S3_BUCKET_PUBLIC: 'saas-starter-kit-public'
STATIC_FILES_S3_ENABLED: 'true'
STATIC_FILES_CLOUDFRONT_ENABLED: 'false'
STATIC_FILES_IMG_RESIZE_ENABLED: 'true'
AWS_USE_ROLE: 'true'
webapi:build:dev:
<<: *build_tmpl
stage: build:dev
......@@ -84,14 +123,15 @@ webapi:deploy:dev:
SERVICE: 'web-api'
ENABLE_HTTPS: 1
ENABLE_ELB: 0
PRIMARY_HOST: 'eproc.tech'
HOST_NAMES: 'www.eproc.tech, api.eproc.tech'
PRIMARY_HOST: 'api.eproc.tech'
HOST_NAMES: 'api.dev.eproc.tech'
S3_BUCKET_PRIVATE: 'saas-starter-kit-private'
S3_BUCKET_PUBLIC: 'saas-starter-kit-public'
STATIC_FILES_S3_ENABLED: 'false'
STATIC_FILES_CLOUDFRONT_ENABLED: 'false'
STATIC_FILES_IMG_RESIZE_ENABLED: 'false'
AWS_USE_ROLE: 'true'
#ddlogscollector:deploy:stage:
# <<: *deploy_stage_tmpl
# variables:
......
......@@ -38,6 +38,7 @@ COPY --from=builder /static /static
COPY --from=builder /templates /templates
ENV TEMPLATE_DIR=/templates
ENV STATIC_DIR=/static
ARG service
ENV SERVICE_NAME $service
......
{
"family": "{SERVICE}",
"executionRoleArn": "",
"taskRoleArn": "",
"networkMode": "awsvpc",
"containerDefinitions": [
{
"name": "{ECS_SERVICE}",
"image": "{RELEASE_IMAGE}",
"essential": true,
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "{AWS_LOGS_GROUP}",
"awslogs-region": "{AWS_REGION}",
"awslogs-stream-prefix": "ecs"
}
},
"portMappings": [
{
"hostPort": 80,
"protocol": "tcp",
"containerPort": 80
}
],
"cpu": 128,
"memoryReservation": 128,
"volumesFrom": [],
"environment": [
{"name": "AWS_REGION", "value": "{AWS_REGION}"},
{"name": "AWS_USE_ROLE", "value": "true"},
{"name": "AWSLOGS_GROUP", "value": "{AWS_LOGS_GROUP}"},
{"name": "ECS_CLUSTER", "value": "{ECS_CLUSTER}"},
{"name": "ECS_SERVICE", "value": "{ECS_SERVICE}"},
{"name": "WEB_APP_HTTP_HOST", "value": "{HTTP_HOST}"},
{"name": "WEB_APP_HTTPS_HOST", "value": "{HTTPS_HOST}"},
{"name": "WEB_APP_SERVICE_PROJECT", "value": "{APP_PROJECT}"},
{"name": "WEB_APP_SERVICE_BASE_URL", "value": "{APP_BASE_URL}"},
{"name": "WEB_APP_SERVICE_HOST_NAMES", "value": "{HOST_NAMES}"},
{"name": "WEB_APP_SERVICE_ENABLE_HTTPS", "value": "{HTTPS_ENABLED}"},
{"name": "WEB_APP_SERVICE_STATICFILES_S3_ENABLED", "value": "{STATIC_FILES_S3_ENABLED}"},
{"name": "WEB_APP_SERVICE_STATICFILES_S3_PREFIX", "value": "{STATIC_FILES_S3_PREFIX}"},
{"name": "WEB_APP_SERVICE_STATICFILES_CLOUDFRONT_ENABLED", "value": "{STATIC_FILES_CLOUDFRONT_ENABLED}"},
{"name": "WEB_APP_SERVICE_STATICFILES_IMG_RESIZE_ENABLED", "value": "{STATIC_FILES_IMG_RESIZE_ENABLED}"},
{"name": "WEB_APP_REDIS_HOST", "value": "{CACHE_HOST}"},
{"name": "WEB_APP_DB_HOST", "value": "{DB_HOST}"},
{"name": "WEB_APP_DB_USER", "value": "{DB_USER}"},
{"name": "WEB_APP_DB_PASS", "value": "{DB_PASS}"},
{"name": "WEB_APP_DB_DATABASE", "value": "{DB_DATABASE}"},
{"name": "WEB_APP_DB_DRIVER", "value": "{DB_DRIVER}"},
{"name": "WEB_APP_DB_DISABLE_TLS", "value": "{DB_DISABLE_TLS}"},
{"name": "WEB_APP_AUTH_USE_AWS_SECRET_MANAGER", "value": "true"},
{"name": "WEB_APP_AUTH_AWS_SECRET_ID", "value": "auth-{ECS_SERVICE}"},
{"name": "WEB_APP_AWS_S3_BUCKET_PRIVATE", "value": "{AWS_S3_BUCKET_PRIVATE}"},
{"name": "WEB_APP_AWS_S3_BUCKET_PUBLIC", "value": "{AWS_S3_BUCKET_PUBLIC}"},
{"name": "BUILDINFO_CI_COMMIT_REF_NAME", "value": "{CI_COMMIT_REF_NAME}"},
{"name": "BUILDINFO_CI_COMMIT_REF_SLUG", "value": "{CI_COMMIT_REF_SLUG}"},
{"name": "BUILDINFO_CI_COMMIT_SHA", "value": "{CI_COMMIT_SHA}"},
{"name": "BUILDINFO_CI_COMMIT_TAG", "value": "{CI_COMMIT_TAG}"},
{"name": "BUILDINFO_CI_COMMIT_TITLE", "value": "{CI_COMMIT_TITLE}"},
{"name": "BUILDINFO_CI_COMMIT_DESCRIPTION", "value": "{CI_COMMIT_DESCRIPTION}"},
{"name": "BUILDINFO_CI_COMMIT_JOB_ID", "value": "{CI_COMMIT_JOB_ID}"},
{"name": "BUILDINFO_CI_COMMIT_JOB_URL", "value": "{CI_COMMIT_JOB_URL}"},
{"name": "BUILDINFO_CI_COMMIT_PIPELINE_ID", "value": "{CI_COMMIT_PIPELINE_ID}"},
{"name": "BUILDINFO_CI_COMMIT_PIPELINE_URL", "value": "{CI_COMMIT_PIPELINE_URL}"},
{"name": "DATADOG_ADDR", "value": "127.0.0.1:8125"},
{"name": "DD_TRACE_AGENT_HOSTNAME", "value": "127.0.0.1"},
{"name": "DD_TRACE_AGENT_PORT", "value": "8126"},
{"name": "DD_SERVICE_NAME", "value": "{ECS_SERVICE}"},
{"name": "DD_ENV", "value": "{ENV}"},
{"name": "ROUTE53_UPDATE_TASK_IPS", "value": "{ROUTE53_UPDATE_TASK_IPS}"},
{"name": "ROUTE53_ZONES", "value": "{ROUTE53_ZONES}"},
{"name": "ECS_ENABLE_CONTAINER_METADATA", "value": "true"}
],
"healthCheck": {
"retries": 3,
"command": [
"CMD-SHELL",
"curl -f http://localhost/ping || exit 1"
],
"timeout": 5,
"interval": 60,
"startPeriod": 60
},
"dockerLabels": {
"com.datadoghq.ad.check_names": "[\"{ECS_SERVICE}\"]",
"com.datadoghq.ad.logs": "[{\"source\": \"docker\", \"service\": \"{ECS_SERVICE}\", \"service_name\": \"{SERVICE}\", \"cluster\": \"{ECS_CLUSTER}\", \"env\": \"{ENV}\"}]",
"com.datadoghq.ad.init_configs": "[{}]",
"com.datadoghq.ad.instances": "[{\"host\": \"%%host%%\", \"port\": 80}]"
},
"ulimits": [
{
"name": "nofile",
"softLimit": 987654,
"hardLimit": 999999
}
]
},
{
"name": "datadog-agent",
"image": "datadog/agent:latest",
"essential": {DATADOG_ESSENTIAL},
"cpu": 128,
"memoryReservation": 128,
"portMappings": [
{
"containerPort": 8125
},
{
"containerPort": 8126
}
],
"environment": [
{
"name": "DD_API_KEY",
"value": "{DATADOG_APIKEY}"
},
{
"name": "DD_LOGS_ENABLED",
"value": "true"
},
{
"name": "DD_APM_ENABLED",
"value": "true"
},
{
"name": "DD_RECEIVER_PORT",
"value": "8126"
},
{
"name": "DD_APM_NON_LOCAL_TRAFFIC",
"value": "true"
},
{
"name": "DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL",
"value": "true"
},
{
"name": "DD_TAGS",
"value": "source:docker service:{ECS_SERVICE} service_name:{SERVICE} cluster:{ECS_CLUSTER} env:{ENV}"
},
{
"name": "DD_DOGSTATSD_ORIGIN_DETECTION",
"value": "true"
},
{
"name": "DD_DOGSTATSD_NON_LOCAL_TRAFFIC",
"value": "true"
},
{
"name": "ECS_FARGATE",
"value": "true"
}
]
}
],
"volumes": [],
"requiresCompatibilities": [
"FARGATE"
]
}
......@@ -20,8 +20,8 @@ import (
"syscall"
"time"
"geeks-accelerator/oss/saas-starter-kit/internal/mid"
"geeks-accelerator/oss/saas-starter-kit/cmd/web-app/handlers"
"geeks-accelerator/oss/saas-starter-kit/internal/mid"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/devops"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/flag"
img_resize "geeks-accelerator/oss/saas-starter-kit/internal/platform/img-resize"
......@@ -80,10 +80,10 @@ func main() {
HostNames []string `envconfig:"HOST_NAMES" example:"www.eproc.tech"`
EnableHTTPS bool `default:"false" envconfig:"ENABLE_HTTPS"`
TemplateDir string `default:"./templates" envconfig:"TEMPLATE_DIR"`
StaticDir string `default:"./static" envconfig:"STATIC_DIR"`
StaticS3 struct {
S3Enabled bool `envconfig:"ENABLED"`
S3KeyPrefix string `default:"public/web_app/static" envconfig:"KEY_PREFIX"`
StaticFiles struct {
Dir string `default:"./static" envconfig:"STATIC_DIR"`
S3Enabled bool `envconfig:"S3_ENABLED"`
S3Prefix string `default:"public/web_app/static" envconfig:"S3_PREFIX"`
CloudFrontEnabled bool `envconfig:"CLOUDFRONT_ENABLED"`
ImgResizeEnabled bool `envconfig:"IMG_RESIZE_ENABLED"`
}
......@@ -371,8 +371,8 @@ func main() {
// s3UrlFormatter is a help function used by to convert an s3 key to
// a publicly available image URL.
var staticS3UrlFormatter func(string) string
if cfg.Service.StaticS3.S3Enabled || cfg.Service.StaticS3.CloudFrontEnabled || cfg.Service.StaticS3.ImgResizeEnabled {
s3UrlFormatter, err := devops.S3UrlFormatter(awsSession, cfg.Aws.S3BucketPublic, cfg.Service.StaticS3.S3KeyPrefix, cfg.Service.StaticS3.CloudFrontEnabled)
if cfg.Service.StaticFiles.S3Enabled || cfg.Service.StaticFiles.CloudFrontEnabled || cfg.Service.StaticFiles.ImgResizeEnabled {
s3UrlFormatter, err := devops.S3UrlFormatter(awsSession, cfg.Aws.S3BucketPublic, cfg.Service.StaticFiles.S3Prefix, cfg.Service.StaticFiles.CloudFrontEnabled)
if err != nil {
log.Fatalf("main : S3UrlFormatter failed : %+v", err)
}
......@@ -381,7 +381,7 @@ func main() {
// When the path starts with a forward slash its referencing a local file,
// make sure the static file prefix is included
if strings.HasPrefix(p, "/") {
p = filepath.Join(cfg.Service.StaticS3.S3KeyPrefix, p)
p = filepath.Join(cfg.Service.StaticFiles.S3Prefix, p)
}
return s3UrlFormatter(p)
}
......@@ -402,7 +402,7 @@ func main() {
// templates should be updated to use a fully qualified URL for either the public file on S3
// on from the cloudfront distribution.
var staticUrlFormatter func(string) string
if cfg.Service.StaticS3.S3Enabled || cfg.Service.StaticS3.CloudFrontEnabled {
if cfg.Service.StaticFiles.S3Enabled || cfg.Service.StaticFiles.CloudFrontEnabled {
staticUrlFormatter = staticS3UrlFormatter
} else {
baseUrl, err := url.Parse(cfg.Service.BaseUrl)
......@@ -510,12 +510,12 @@ func main() {
// Image Formatter - additional functions exposed to templates for resizing images
// to support response web applications.
imgResizeS3KeyPrefix := filepath.Join(cfg.Service.StaticS3.S3KeyPrefix, "images/responsive")
imgResizeS3KeyPrefix := filepath.Join(cfg.Service.StaticFiles.S3Prefix, "images/responsive")
imgSrcAttr := func(ctx context.Context, p string, sizes []int, includeOrig bool) template.HTMLAttr {
u := staticUrlFormatter(p)
var srcAttr string
if cfg.Service.StaticS3.ImgResizeEnabled {
if cfg.Service.StaticFiles.ImgResizeEnabled {
srcAttr, _ = img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.Aws.S3BucketPublic, imgResizeS3KeyPrefix, u, sizes, includeOrig)
} else {
srcAttr = fmt.Sprintf("src=\"%s\"", u)
......@@ -546,7 +546,7 @@ func main() {
}
tmplFuncs["S3ImgUrl"] = func(ctx context.Context, p string, size int) string {
imgUrl := staticUrlFormatter(p)
if cfg.Service.StaticS3.ImgResizeEnabled {
if cfg.Service.StaticFiles.ImgResizeEnabled {
imgUrl, _ = img_resize.S3ImgUrl(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.Aws.S3BucketPublic, imgResizeS3KeyPrefix, imgUrl, size)
}
return imgUrl
......@@ -637,7 +637,7 @@ func main() {
if cfg.HTTP.Host != "" {
api := http.Server{
Addr: cfg.HTTP.Host,
Handler: handlers.APP(shutdown, log, cfg.Service.StaticDir, cfg.Service.TemplateDir, masterDb, redisClient, renderer, serviceMiddlewares...),
Handler: handlers.APP(shutdown, log, cfg.Service.StaticFiles.Dir, cfg.Service.TemplateDir, masterDb, redisClient, renderer, serviceMiddlewares...),
ReadTimeout: cfg.HTTP.ReadTimeout,
WriteTimeout: cfg.HTTP.WriteTimeout,
MaxHeaderBytes: 1 << 20,
......@@ -654,7 +654,7 @@ func main() {
if cfg.HTTPS.Host != "" {
api := http.Server{
Addr: cfg.HTTPS.Host,
Handler: handlers.APP(shutdown, log, cfg.Service.StaticDir, cfg.Service.TemplateDir, masterDb, redisClient, renderer, serviceMiddlewares...),
Handler: handlers.APP(shutdown, log, cfg.Service.StaticFiles.Dir, cfg.Service.TemplateDir, masterDb, redisClient, renderer, serviceMiddlewares...),
ReadTimeout: cfg.HTTPS.ReadTimeout,
WriteTimeout: cfg.HTTPS.WriteTimeout,
MaxHeaderBytes: 1 << 20,
......
package devops
import (
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/fsnotify/fsnotify"
"github.com/pkg/errors"
)
// SyncCfgInit provides the functionality to keep config files sync'd between running tasks and across deployments.
func SyncCfgInit(log *log.Logger, awsSession *session.Session, secretPrefix, watchDir string, syncInterval time.Duration) (func(), error) {
localfiles := make(map[string]time.Time)
// Do the initial sync before starting file watch to download any existing configs.
err := SyncCfgDir(log, awsSession, secretPrefix, watchDir, localfiles)
if err != nil {
return nil, err
}
// Create a new file watcher.
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, errors.WithStack(err)
}
// Return function that will should be run in the back ground via a go routine that will watch for new files created
// locally and updated in AWS Secrets Manager.
f := func() {
defer watcher.Close()
// Init the watch to wait for sync local files to Secret Manager.
WatchCfgDir(log, awsSession, secretPrefix, watchDir, watcher, localfiles)
// Init ticker to sync remote files from Secret Manager locally at the defined interval.
if syncInterval.Seconds() > 0 {
ticker := time.NewTicker(syncInterval)
defer ticker.Stop()
go func() {
for _ = range ticker.C {
log.Println("AWS Secrets Manager : Checking for remote updates")
// Do the initial sync before starting file watch to download any existing configs.
err := SyncCfgDir(log, awsSession, secretPrefix, watchDir, localfiles)
if err != nil {
log.Printf("AWS Secrets Manager : Remote sync error - %+v", err)
}
}
}()
}
}
log.Printf("AWS Secrets Manager : Watching config dir %s", watchDir)
// Note: Out of the box fsnotify can watch a single file, or a single directory.
if err := watcher.Add(watchDir); err != nil {
return nil, errors.Wrapf(err, "failed to add file watcher to %s", watchDir)
}
return f, nil
}
// SyncCfgDir lists all the Secrets from AWS Secrets Manager for a provided prefix and downloads them locally.
func SyncCfgDir(log *log.Logger, awsSession *session.Session, secretPrefix, watchDir string, localfiles map[string]time.Time) error {
svc := secretsmanager.New(awsSession)
// Get a list of secrets for the prefix when the time they were last changed.
secretIDs := make(map[string]time.Time)
err := svc.ListSecretsPages(&secretsmanager.ListSecretsInput{}, func(res *secretsmanager.ListSecretsOutput, lastPage bool) bool {
for _, s := range res.SecretList {
// Skip any secret that does not have a matching prefix.
if !strings.HasPrefix(*s.Name, secretPrefix) {
continue
}
secretIDs[*s.Name] = s.LastChangedDate.UTC()
}
return !lastPage
})
if err != nil {
return errors.Wrap(err, "failed to list secrets")
}
for id, curChanged := range secretIDs {
// Load the secret by ID from Secrets Manager.
res, err := svc.GetSecretValue(&secretsmanager.GetSecretValueInput{
SecretId: aws.String(id),
})
if err != nil {
return errors.Wrapf(err, "failed to get secret value for id %s", id)
}
filename := filepath.Base(id)
localpath := filepath.Join(watchDir, filename)
// Ensure the secret exists locally.
if exists(localpath) {
// If the secret was previously downloaded and current last changed time is less than or equal to the time
// the secret was last downloaded, then no need to update.
if lastChanged, ok := localfiles[id]; ok && curChanged.UTC().Unix() <= lastChanged.UTC().Unix() {
continue
}
}
log.Printf("AWS Secrets Manager : Writing Config %s", filename)
err = ioutil.WriteFile(localpath, res.SecretBinary, 0644)
if err != nil {
return errors.Wrapf(err, "failed to write secret value for id %s to %s", id, localpath)
}
// Only mark that the secret was updated when the file was successfully saved locally.
localfiles[id] = curChanged
}
return nil
}
// WatchCfgDir watches for new/updated files locally and uploads them to in AWS Secrets Manager.
func WatchCfgDir(log *log.Logger, awsSession *session.Session, secretPrefix, dir string, watcher *fsnotify.Watcher, localfiles map[string]time.Time) error {
for {
select {
// watch for events
case event, ok := <-watcher.Events:
if !ok {
return nil
}
err := handleWatchCfgEvent(log, awsSession, secretPrefix, event)
if err != nil {
log.Printf("AWS Secrets Manager : Watcher Error - %+v", err)
}
// watch for errors
case err, ok := <-watcher.Errors:
if !ok {
return nil
}
if err != nil {
log.Printf("AWS Secrets Manager : Watcher Error - %+v", err)
}
}
}
return nil
}
// handleWatchCfgEvent handles a fsnotify event. For new files, secrets are created, for updated files, the secret is
// updated. For deleted files the secret is removed.
func handleWatchCfgEvent(log *log.Logger, awsSession *session.Session, secretPrefix string, event fsnotify.Event) error {
svc := secretsmanager.New(awsSession)
fname := filepath.Base(event.Name)
secretID := filepath.Join(secretPrefix, fname)
if event.Op&fsnotify.Create == fsnotify.Create || event.Op&fsnotify.Write == fsnotify.Write {
dat, err := ioutil.ReadFile(event.Name)
if err != nil {
return errors.Wrapf(err, "file watcher failed to read file %s", event.Name)
}
// Create the new entry in AWS Secret Manager for the file.
_, err = svc.CreateSecret(&secretsmanager.CreateSecretInput{
Name: aws.String(secretID),
SecretString: aws.String(string(dat)),
})
if err != nil {
if aerr, ok := err.(awserr.Error); !ok {
if aerr.Code() == secretsmanager.ErrCodeInvalidRequestException {
// InvalidRequestException: You can't create this secret because a secret with this
// name is already scheduled for deletion.
// Restore secret after it was already previously deleted.
_, err = svc.RestoreSecret(&secretsmanager.RestoreSecretInput{
SecretId: aws.String(secretID),
})
if err != nil {
return errors.Wrapf(err, "file watcher failed to restore secret %s for %s", secretID, event.Name)
}
} else if aerr.Code() != secretsmanager.ErrCodeResourceExistsException {
return errors.Wrapf(err, "file watcher failed to create secret %s for %s", secretID, event.Name)
}
}
// If where was a resource exists error for create, then need to update the secret instead.
_, err = svc.UpdateSecret(&secretsmanager.UpdateSecretInput{
SecretId: aws.String(secretID),
SecretString: aws.String(string(dat)),
})
if err != nil {
return errors.Wrapf(err, "file watcher failed to update secret %s for %s", secretID, event.Name)
}
log.Printf("AWS Secrets Manager : Secret %s updated for %s", secretID, event.Name)
} else {
log.Printf("AWS Secrets Manager : Secret %s created for %s", secretID, event.Name)
}
} else if event.Op&fsnotify.Remove == fsnotify.Remove || event.Op&fsnotify.Rename == fsnotify.Rename {
// Delay delete to ensure the file is really deleted.
//delCheck := time.NewTimer(time.Minute)
//<-delCheck.C
// Create the new entry in AWS Secret Manager for the file.
_, err := svc.DeleteSecret(&secretsmanager.DeleteSecretInput{
SecretId: aws.String(secretID),
// (Optional) Specifies that the secret is to be deleted without any recovery
// window. You can't use both this parameter and the RecoveryWindowInDays parameter
// in the same API call.
//
// An asynchronous background process performs the actual deletion, so there
// can be a short delay before the operation completes. If you write code to
// delete and then immediately recreate a secret with the same name, ensure
// that your code includes appropriate back off and retry logic.
//
// Use this parameter with caution. This parameter causes the operation to skip
// the normal waiting period before the permanent deletion that AWS would normally
// impose with the RecoveryWindowInDays parameter. If you delete a secret with
// the ForceDeleteWithouRecovery parameter, then you have no opportunity to
// recover the secret. It is permanently lost.
ForceDeleteWithoutRecovery: aws.Bool(false),
// (Optional) Specifies the number of days that Secrets Manager waits before
// it can delete the secret. You can't use both this parameter and the ForceDeleteWithoutRecovery
// parameter in the same API call.
//
// This value can range from 7 to 30 days.
RecoveryWindowInDays: aws.Int64(30),
})
if err != nil {
return errors.Wrapf(err, "file watcher failed to delete secret %s for %s", secretID, event.Name)
}
log.Printf("AWS Secrets Manager : Secret %s deleted for %s", secretID, event.Name)
}
return nil
}
// Exists reports whether the named file or directory exists.
func exists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
package devops
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
// SyncS3StaticFiles copies the local files from the static directory to s3
// with public-read enabled.
func SyncS3StaticFiles(awsSession *session.Session, staticS3Bucket, staticS3Prefix, staticDir string) error {
uploader := s3manager.NewUploader(awsSession)
di := NewDirectoryIterator(staticS3Bucket, staticS3Prefix, staticDir, "public-read")
if err := uploader.UploadWithIterator(aws.BackgroundContext(), di); err != nil {
return err
}
return nil
}
......@@ -227,134 +227,3 @@ func RegisterEcsServiceTasksRoute53(log *log.Logger, awsSession *session.Session
return nil
}
/*
res, err := pester.Get("http://169.254.170.2/v2/metadata")
if err != nil {
fmt.Println("http://169.254.170.2/v2/metadata failed", err.Error())
} else {
dat, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
fmt.Println("http://169.254.170.2/v2/metadata, OK", string(dat))
}
http://169.254.170.2/v2/metadata,
{
"Cluster": "arn:aws:ecs:us-west-2:888955683113:cluster/example-project-dev",
"TaskARN": "arn:aws:ecs:us-west-2:888955683113:task/700e38dd-dec5-4201-b711-c04a51feef8a",
"Family": "web-api",
"Revision": "113",
"DesiredStatus": "RUNNING",
"KnownStatus": "RUNNING",
"Containers": [{
"DockerId": "c786dfdf6510b20294832ccbc3d66e6f1f915a4a79ead2588aa760a6365c839a",
"Name": "datadog-agent",
"DockerName": "ecs-web-api-113-datadog-agent-d884dee0c79af1fb6400",
"Image": "datadog/agent:latest",
"ImageID": "sha256:233c75f21f71838a59d478472d021be7006e752da6a70a11f77cf185c1050737",
"Labels": {
"com.amazonaws.ecs.cluster": "arn:aws:ecs:us-west-2:888955683113:cluster/example-project-dev",
"com.amazonaws.ecs.container-name": "datadog-agent",
"com.amazonaws.ecs.task-arn": "arn:aws:ecs:us-west-2:888955683113:task/700e38dd-dec5-4201-b711-c04a51feef8a",
"com.amazonaws.ecs.task-definition-family": "web-api",
"com.amazonaws.ecs.task-definition-version": "113"
},
"DesiredStatus": "RUNNING",
"KnownStatus": "STOPPED",
"ExitCode": 1,
"Limits": {
"CPU": 128,
"Memory": 0
},
"CreatedAt": "2019-07-11T05:36:54.135666318Z",
"StartedAt": "2019-07-11T05:36:54.481305866Z",
"FinishedAt": "2019-07-11T05:36:54.863742829Z",
"Type": "NORMAL",
"Networks": [{
"NetworkMode": "awsvpc",
"IPv4Addresses": ["172.31.62.204"]
}],
"Volumes": [{
"DockerName": "0960558c657c6e79d43e0e55f4ff259a97d78f58d9ad0d738e74495f4ba3cb06",
"Source": "/var/lib/docker/volumes/0960558c657c6e79d43e0e55f4ff259a97d78f58d9ad0d738e74495f4ba3cb06/_data",
"Destination": "/etc/datadog-agent"
}, {
"DockerName": "7a103f880857a1c2947e4a1bfff48efd25d24943a2d6a6e4dd86fa9dab3f10f0",
"Source": "/var/lib/docker/volumes/7a103f880857a1c2947e4a1bfff48efd25d24943a2d6a6e4dd86fa9dab3f10f0/_data",
"Destination": "/tmp"
}, {
"DockerName": "c88c03366eadb5d9da27708919e77ac5f8e0877c3dbb32c80580cb22e5811c00",
"Source": "/var/lib/docker/volumes/c88c03366eadb5d9da27708919e77ac5f8e0877c3dbb32c80580cb22e5811c00/_data",
"Destination": "/var/log/datadog"
}, {
"DockerName": "df97387f6ccc34c023055ef8a34a41e9d1edde4715c1849f1460683d31749539",
"Source": "/var/lib/docker/volumes/df97387f6ccc34c023055ef8a34a41e9d1edde4715c1849f1460683d31749539/_data",
"Destination": "/var/run/s6"
}]
}, {
"DockerId": "ab6bd869e675f64122a33a74da9183b304bbc60b649a15d0d83ebc48eeafdd76",
"Name": "~internal~ecs~pause",
"DockerName": "ecs-web-api-113-internalecspause-aab99b88b9ddadb0c701",
"Image": "fg-proxy:tinyproxy",
"ImageID": "",
"Labels": {
"c