Search code examples
postgresqlamazon-web-servicesaws-lambdaterraformamazon-rds

Terraforming AWS Lambda with PostgreSQL -- how to pass hostname to lambda?


AWS Environment -- Lamdba function pulling from RDS/PostgreSQL instance and pushing through API Gateway.

SecretsManager makes it easy enough to pass static data like usernames and passwords to the Lambda function. Terraform, however, prohibits the use of dynamic variables in its variable resource. As such, Terraform cannot define a host and pass the host to the Lambda function through SecretsManager.

I've considered exporting the variable to the environment at the end of the Terraform script. But this would not export to the same machine that the Lambda is running on, correct? SecretsManager is obviously a hacked-ass way to do it in anyway. And all the resources I'm finding just seem to have some manual in between during the creation of the RDS instance and the Lambda function.

Thank you in advanced.

Terraform :

### Variables

variable "pgdb-username" {
  default = "frank"
}

resource "random_password" "pgdb" {
  length = 16
}

variable "pgdb-secrets" {
  default = {
    "username" = ""
    "password" = ""
  }
}

# Secrets
resource "aws_secretsmanager_secret" "pgdb" {
  name = "postgres-database"
}

resource "aws_secretsmanager_secret_version" "pgdb" {
  secret_id     = aws_secretsmanager_secret.pgdb.id
  secret_string = jsonencode(var.pgdb-secrets)
}

### General 

terraform {
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "> 3.5.0"

    }
  }
}

provider "aws" {
  region = "us-east-1"
}


### Lambda

data "archive_file" "lambda-zip" {
  type        = "zip"
  source_dir  = "lambda"
  output_path = "lambda.zip"
}

resource "aws_iam_role" "lambda-iam" {
  name = "lambda-iam"
  assume_role_policy = jsonencode({
    "Version" = "2012-10-17"
    "Statement" = {
      "Action" = "sts:AssumeRole"
      "Principal" = {
        "Service" = "lambda.amazonaws.com"
      }
      "Effect" = "Allow"
      "Sid"    = ""
    }
  })
}

resource "aws_lambda_function" "lambda" {
  filename         = "lambda.zip"
  function_name    = "lambda-function"
  role             = aws_iam_role.lambda-iam.arn
  handler          = "lambda.lambda_handler"
  source_code_hash = data.archive_file.lambda-zip.output_base64sha256
  runtime          = "python3.8"
}

### API

resource "aws_apigatewayv2_api" "lambda-api" {
  name          = "v2-http-api"
  protocol_type = "HTTP"
}

resource "aws_apigatewayv2_stage" "lambda-stage" {
  api_id      = aws_apigatewayv2_api.lambda-api.id
  name        = "$default"
  auto_deploy = true
}

resource "aws_apigatewayv2_integration" "lambda-integration" {
  api_id               = aws_apigatewayv2_api.lambda-api.id
  integration_type     = "AWS_PROXY"
  integration_method   = "POST"
  integration_uri      = aws_lambda_function.lambda.invoke_arn
  passthrough_behavior = "WHEN_NO_MATCH"
}

resource "aws_apigatewayv2_route" "lambda_route" {
  api_id    = aws_apigatewayv2_api.lambda-api.id
  route_key = "GET /{proxy+}"
  target    = "integrations/${aws_apigatewayv2_integration.lambda-integration.id}"
}

resource "aws_lambda_permission" "api-gw" {
  statement_id  = "AllowExecutionFromAPIGateway"
  action        = "lambda:InvokeFunction"
  function_name = aws_lambda_function.lambda.arn
  principal     = "apigateway.amazonaws.com"
  source_arn    = "${aws_apigatewayv2_api.lambda-api.execution_arn}/*/*/*"
}

### RDS
resource "aws_db_instance" "bl-db" {

  allocated_storage       = 1 # gigabytes
  backup_retention_period = 7 # in days
  engine                  = "postgres"
  engine_version          = "9.5.4"
  identifier              = "main"
  name                    = "main"
  instance_class          = "db.r3.large"
  multi_az                = false
  username                = var.pgdb-username.result
  password                = random_password.pgdb.result
  port                    = 5432
  publicly_accessible     = true
  storage_encrypted       = true
  storage_type            = "gp2"
}


Lambda in Python :

#!env/bin/python

import json 
import os 
import random
import psycopg2 as pg
import awswrangler.secretsmanager as awssm 


database_name = "main"
table_name = "cities"
username = sm.get_secret_json( "postgres-database" ).get( "username" )
password = sm.get_secret_json( "postgres-database" ).get( "password" )
port = "5432"
host_name =  


def lambda_handler(event, context): 

    cxn = pg.connect( user=username, 
                password=password, 
                host=host_name, 
                port=port, 
                database=database_name)

    query_create_table = f"create table cities ( ix serial primary key, names varchar(50) unique not null );"
    query_insert_data = f"insert into {table_name} (city) values ('Washington'), ('Philadelphia'), ('New York'), ('Chicago'), ('Los Angeles'), ('Seattle'), ('Portland'), ('Dallas'), ('Miami'), ('Charlotte');"

    csr = cxn.cursor()
    csr.execute( query_create_table ) 
    csr.execute( query_insert_data )
    cxn.commit()
    len_table = csr.rowcount 
    random_record = random.randint(1, len_table)
    query_random_data = f"select names from cities where ix = {random_record};"
    cxn.execute( query_random_data )

    random_record = cxn.fetchall()

    print( f"{len_table} rows inserted sucessfully" ) 

    return {"statusCode" : 200, 
            "body" : f"a random city is {random_record}" } 



Solution

  • You can pass the db instance address to the lambda function using lambda environment variables via terraform.

    resource "aws_lambda_function" "test_lambda" {
      ...
    
      environment {
        variables = {
          DB_INSTANCE_ADDRESS = aws_db_instance.bl-db.address
        }
      }
    }
    

    Then read the environment variable in your lambda handler code.

    DB_INSTANCE_ADDRESS = os.getenv('DB_INSTANCE_ADDRESS')