Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ data/

.pgdata

terraform/veda-wfs3-shared-mcp
6 changes: 5 additions & 1 deletion terraform/features-api/ecs_api.tf
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,10 @@ module "ecs_cluster" {
// stupid hack b/c of FastAPI and Starlette bug
name = "FAST_API_SCHEME"
value = "http" //quick hack for now, TODO: include 'contains' function
},
{
name = "TIPG_CATALOG_TTL"
value = "300"
}
]

Expand All @@ -97,7 +101,7 @@ module "ecs_cluster" {
lb_security_group_id = aws_security_group.web_inbound_sg.id
lb_container_port = var.service_port

tags = var.tags
tags = var.tags
permissions_boundary_policy_name = var.permissions_boundary_policy_name
}

Expand Down
2 changes: 1 addition & 1 deletion terraform/features-api/rds.tf
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ resource "aws_db_instance" "db" {
db_name = "ghgc"
identifier = "${var.project_name}-${var.env}"
engine = "postgres"
engine_version = "14.3"
engine_version = "14.9"
// https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html
allocated_storage = 100
max_allocated_storage = 500
Expand Down
2 changes: 1 addition & 1 deletion terraform/features-api/terraform.tf
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ terraform {
}
backend "s3" {
bucket = "ghgc-smce-tf-shared-state"
key = "root/features-api"
key = "root/features-api-dev"
region = "us-west-2"
}
}
12 changes: 6 additions & 6 deletions terraform/features-api/terraform.tfvars
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
region = "us-west-2"
registry_name = "feature-api-pre-dev"
env = "pre-dev"
project_name = "veda-pre-dev"
service_port = 8080
vpc_id = "vpc-0c6727f22063d860f"
region = "us-west-2"
registry_name = "feature-api-dev"
env = "smce-ghgc"
project_name = "veda-ghgc-wfs3"
service_port = 8080
vpc_id = "vpc-0c6727f22063d860f"
78 changes: 39 additions & 39 deletions terraform/modules/aws_ecs_service/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ data "aws_iam_policy_document" "ecs_assume_role_policy" {
}

resource "aws_iam_role" "ecs_execution_role" {
name = "${var.service_name}-${var.environment}_ecs_task_execution_role"
assume_role_policy = data.aws_iam_policy_document.ecs_assume_role_policy.json
tags = var.tags
name = "${var.service_name}-${var.environment}_ecs_task_execution_role"
assume_role_policy = data.aws_iam_policy_document.ecs_assume_role_policy.json
tags = var.tags
permissions_boundary = local.permissions_boundary
}

Expand Down Expand Up @@ -123,15 +123,15 @@ resource "aws_security_group_rule" "service_egress" {
// bind the ECS service's SG as a source
// to the VPC's default SG if it was passed as a variable
resource "aws_security_group_rule" "rds_sg_allows_ecs_sg" {
for_each = {
for index, rule in var.additional_sg_ingress_rules_for_vpc_default_sg:
for_each = {
for index, rule in var.additional_sg_ingress_rules_for_vpc_default_sg :
rule.primary_key => rule # this works b/c one key has to be primary
}
security_group_id = each.value.vpc_default_sg_id
type = "ingress"
from_port = each.value.from_port
to_port = each.value.to_port
protocol = each.value.protocol
security_group_id = each.value.vpc_default_sg_id
type = "ingress"
from_port = each.value.from_port
to_port = each.value.to_port
protocol = each.value.protocol
source_security_group_id = aws_security_group.service.id
}

Expand Down Expand Up @@ -173,7 +173,7 @@ resource "aws_security_group_rule" "service_ingress_lb" {
# ECS
########################################################################
resource "aws_ecs_cluster" "service" {
name = "tf-${var.service_name}-${var.environment}"
name = "tf-${var.service_name}-${var.environment}-dev"
tags = var.tags
setting {
name = "containerInsights"
Expand All @@ -196,8 +196,8 @@ resource "aws_ecs_service" "service" {
deployment_minimum_healthy_percent = 100

network_configuration {
subnets = var.subnet_ids
security_groups = [aws_security_group.service.id]
subnets = var.subnet_ids
security_groups = [aws_security_group.service.id]
//assign_public_ip = true
}

Expand All @@ -220,22 +220,22 @@ resource "aws_ecs_task_definition" "service" {
tags = var.tags
execution_role_arn = aws_iam_role.ecs_execution_role.arn
task_role_arn = aws_iam_role.ecs_execution_role.arn
container_definitions = templatefile("${path.module}/container_definition.tftpl",
{
service_name = var.service_name
environment = var.environment
image = var.image
container_command = length(var.container_command) > 0 ? jsonencode(var.container_command) : ""
working_directory = var.container_working_directory
container_secrets = jsonencode(var.container_secrets)
container_environment = jsonencode(var.container_environment)
service_protocol = var.service_protocol
service_port = var.service_port
use_adot_as_sidecar = var.use_adot_as_sidecar ? "on" : ""
log_group = aws_cloudwatch_log_group.service.name
region = var.region
}
)
container_definitions = templatefile("${path.module}/container_definition.tftpl",
{
service_name = var.service_name
environment = var.environment
image = var.image
container_command = length(var.container_command) > 0 ? jsonencode(var.container_command) : ""
working_directory = var.container_working_directory
container_secrets = jsonencode(var.container_secrets)
container_environment = jsonencode(var.container_environment)
service_protocol = var.service_protocol
service_port = var.service_port
use_adot_as_sidecar = var.use_adot_as_sidecar ? "on" : ""
log_group = aws_cloudwatch_log_group.service.name
region = var.region
}
)
}

#######################################################################
Expand All @@ -247,19 +247,19 @@ resource "aws_ecs_task_definition" "service" {
data "aws_iam_policy_document" "api_ecs_to_otel_access" {
statement {
actions = [
"xray:PutTraceSegments",
"xray:PutTelemetryRecords",
"xray:GetSamplingRules",
"xray:GetSamplingTargets",
"xray:GetSamplingStatisticSummaries",
"cloudwatch:PutMetricData",
"ec2:DescribeVolumes",
"ec2:DescribeTags",
"ssm:GetParameters"
"xray:PutTraceSegments",
"xray:PutTelemetryRecords",
"xray:GetSamplingRules",
"xray:GetSamplingTargets",
"xray:GetSamplingStatisticSummaries",
"cloudwatch:PutMetricData",
"ec2:DescribeVolumes",
"ec2:DescribeTags",
"ssm:GetParameters"
]

resources = [
"*",
"*",
]
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import boto3
import http.client
import os
import base64
import ast
import json
mwaa_env_name = 'veda-pipeline-staging-mwaa'
dag_name = 'veda_discover'
mwaa_cli_command = 'dags trigger'
client = boto3.client('mwaa')


def lambda_handler(event, context):
for record in event['Records']:
print(f"[ RECORD ]: {record}")
s3_event_key = record['s3']['object']['key']
print(f"[ S3 EVENT KEY ]: {s3_event_key}")
s3_filename_target = os.path.split(s3_event_key)[-1]
print(f"[ S3 FILENAME TARGET ]: {s3_filename_target}")
s3_filename_no_ext = os.path.splitext(s3_filename_target)[0]
print(f"[ S3 FILENAME NO EXT ]: {s3_filename_no_ext}")

if s3_filename_target.endswith(".gpkg"):
return {
'statusCode': 200,
'body': json.dumps('Hello from Lambda!')
}

if s3_event_key.startswith("EIS/FEDSoutput-v3"):
return {
'statusCode': 200,
'body': json.dumps('Hello from Lambda!')
}

bucket_key_prefix = "EIS/FEDSoutput/Snapshot/"
if s3_event_key.startswith("EIS/FEDSoutput-v3"):
bucket_key_prefix = "EIS/FEDSoutput-v3/Snapshot/"
if s3_filename_no_ext.startswith("lf_"):
bucket_key_prefix = "EIS/FEDSoutput/LFArchive/"


# get web token
mwaa_cli_token = client.create_cli_token(
Name=mwaa_env_name
)
print(f"[ CLI TOKEN ]: {mwaa_cli_token}")
serialized_args = json.dumps({
"discovery": "s3",
"collection": s3_filename_no_ext,
"prefix": bucket_key_prefix,
"bucket": "veda-data-store-staging",
"filename_regex": f"^(.*){s3_filename_target}$",
"vector": True
})
conn = http.client.HTTPSConnection(mwaa_cli_token['WebServerHostname'])
payload = f"{mwaa_cli_command} {dag_name} --conf '{serialized_args}'"
print(f"[ CLI PAYLOAD ]: {payload}")
headers = {
'Authorization': 'Bearer ' + mwaa_cli_token['CliToken'],
'Content-Type': 'text/plain'
}
conn.request("POST", "/aws_mwaa/cli", payload, headers)
res = conn.getresponse()
data = res.read()
dict_str = data.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
print(f"[ DATA ]: {mydata}")
print(f"[ STDOUT ]: {base64.b64decode(mydata['stdout'])}")
return {
'statusCode': 200,
'body': json.dumps('Hello from Lambda!')
}
66 changes: 66 additions & 0 deletions terraform/veda-wfs3/rds.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
resource "aws_db_subnet_group" "db" {
name = "tf-${var.project_name}-${var.env}-subnet-group"
subnet_ids = module.networking.private_subnets_id
tags = {
Name = "tf-${var.project_name}-subnet-group"
}
}

resource "aws_db_parameter_group" "default" {
name = "tf-${var.project_name}-${var.env}-postgres14-param-group"
family = "postgres14"

parameter {
name = "work_mem"
# NOTE: I had `work_mem` set to ~100MB and `max_connections` around 75 and TileJSON completely failed
# 16MB
value = var.env == "staging" ? "16384" : "8192"
}

parameter {
name = "max_connections"
value = "475"
apply_method = "pending-reboot"
}

# NOTE: here to show what shared_buffers are but doesn't really make sense why it won't provision with these
# parameter {
# name = "shared_buffers"
# value = var.env == "staging" ? "8064856" : "4032428"
# apply_method = "pending-reboot"
# }

parameter {
name = "seq_page_cost"
value = "1"
}

parameter {
name = "random_page_cost"
value = "1.2"
}
}

resource "aws_db_instance" "db" {
db_name = "veda"
identifier = "${var.project_name}-${var.env}"
engine = "postgres"
engine_version = "14.10"
// https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html
allocated_storage = 100
max_allocated_storage = 500
storage_type = "gp2"
instance_class = var.env == "staging" ? "db.r5.xlarge" : "db.r5.large"
db_subnet_group_name = aws_db_subnet_group.db.name
vpc_security_group_ids = module.networking.security_groups_ids
skip_final_snapshot = true
apply_immediately = true
backup_retention_period = 7
username = "postgres"
password = var.db_password
storage_encrypted = var.db_encrypted
allow_major_version_upgrade = true
parameter_group_name = aws_db_parameter_group.default.name
}


Loading