Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,12 @@

All notable changes to this project will be documented in this file.

## [Unreleased]

### Features

* Add Docker execution support for shell commands in build process

## [8.1.0](https://github.com/terraform-aws-modules/terraform-aws-lambda/compare/v8.0.1...v8.1.0) (2025-08-22)


Expand Down
28 changes: 28 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -503,6 +503,34 @@ To override the docker entrypoint when building in docker, set `docker_entrypoin

The entrypoint must map to a path within your container, so you need to either build your own image that contains the entrypoint or map it to a file on the host by mounting a volume (see [Passing additional Docker options](#passing-additional-docker-options)).

#### Shell Commands with Docker

When `build_in_docker = true`, shell commands specified in the `commands` parameter are executed inside the Docker container. This allows you to run package managers or other tools that are only available in the Lambda runtime environment:

```hcl
module "lambda_function" {
source = "terraform-aws-modules/lambda/aws"

function_name = "my-lambda"
runtime = "python3.12"
build_in_docker = true
docker_image = "public.ecr.aws/lambda/python:3.12"

source_path = [{
path = "${path.module}/src"
commands = [
# Install system dependencies in Lambda container
"microdnf install -y gcc",
# Build native extensions
"pip install --target=. -r requirements.txt",
":zip"
]
}]
}
```

This is particularly useful when you need to install packages or compile code using tools that are specific to the Lambda runtime environment but may not be available on your build machine.

## <a name="package"></a> Deployment package - Create or use existing

By default, this module creates deployment package and uses it to create or update Lambda Function or Lambda Layer.
Expand Down
1 change: 1 addition & 0 deletions examples/build-package/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ Note that this example may create resources which cost money. Run `terraform des
| <a name="module_package_src_poetry2"></a> [package\_src\_poetry2](#module\_package\_src\_poetry2) | ../../ | n/a |
| <a name="module_package_with_commands_and_patterns"></a> [package\_with\_commands\_and\_patterns](#module\_package\_with\_commands\_and\_patterns) | ../../ | n/a |
| <a name="module_package_with_docker"></a> [package\_with\_docker](#module\_package\_with\_docker) | ../../ | n/a |
| <a name="module_package_with_docker_shell_commands"></a> [package\_with\_docker\_shell\_commands](#module\_package\_with\_docker\_shell\_commands) | ../../ | n/a |
| <a name="module_package_with_npm_lock_in_docker"></a> [package\_with\_npm\_lock\_in\_docker](#module\_package\_with\_npm\_lock\_in\_docker) | ../../ | n/a |
| <a name="module_package_with_npm_requirements_in_docker"></a> [package\_with\_npm\_requirements\_in\_docker](#module\_package\_with\_npm\_requirements\_in\_docker) | ../../ | n/a |
| <a name="module_package_with_patterns"></a> [package\_with\_patterns](#module\_package\_with\_patterns) | ../../ | n/a |
Expand Down
21 changes: 21 additions & 0 deletions examples/build-package/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,27 @@ module "package_src_poetry2" {
artifacts_dir = "${path.root}/builds/package_src_poetry2/"
}

# Create zip-archive with custom shell commands executed in Docker container
module "package_with_docker_shell_commands" {
source = "../../"

create_function = false

build_in_docker = true
runtime = "python3.12"
docker_image = "public.ecr.aws/lambda/python:3.12"

source_path = [{
path = "${path.module}/../fixtures/python-app1"
commands = [
"echo 'Running shell commands in Docker container'",
"ls -la",
":zip"
]
}]
artifacts_dir = "${path.root}/builds/package_docker_shell_commands/"
}

# Create zip-archive of a single directory where "poetry export" & "pip install --no-deps" will also be executed (not using docker)
module "package_dir_poetry_no_docker" {
source = "../../"
Expand Down
166 changes: 141 additions & 25 deletions package.py
Original file line number Diff line number Diff line change
Expand Up @@ -906,6 +906,38 @@ def execute(self, build_plan, zip_stream, query):
sh_work_dir = None
pf = None

# Resolve Docker image ID once for all steps
docker = query.docker if query else None
docker_image_tag_id = None

if docker:
docker_image = docker.docker_image
if docker_image:
output = check_output(docker_image_id_command(docker_image))
if output:
docker_image_tag_id = output.decode().strip()
log.debug(
"DOCKER TAG ID: %s -> %s", docker_image, docker_image_tag_id
)
else:
log.info(
"Docker image not found locally, pulling: %s", docker_image
)
try:
check_call(docker_pull_command(docker_image))
output = check_output(docker_image_id_command(docker_image))
if output:
docker_image_tag_id = output.decode().strip()
log.debug(
"DOCKER TAG ID (after pull): %s -> %s",
docker_image,
docker_image_tag_id,
)
except subprocess.CalledProcessError as e:
log.warning(
"Failed to pull Docker image %s: %s", docker_image, e
)

for step in build_plan:
# init step
sh_work_dir = tf_work_dir
Expand Down Expand Up @@ -987,52 +1019,128 @@ def execute(self, build_plan, zip_stream, query):
# XXX: timestamp=0 - what actually do with it?
zs.write_dirs(rd, prefix=prefix, timestamp=0)
elif cmd == "sh":
with tempfile.NamedTemporaryFile(
mode="w+t", delete=True
) as temp_file:
script = action[1]
script = action[1]

if docker and docker_image_tag_id:
if log.isEnabledFor(DEBUG2):
log.debug("exec shell script ...")
log.debug("exec shell script in docker...")
for line in script.splitlines():
sh_log.debug(line)

script = "\n".join(
(
# Prepare script with working directory tracking
enhanced_script = "\n".join(
[
script,
# NOTE: Execute `pwd` to determine the subprocess shell's
# working directory after having executed all other commands.
"retcode=$?",
f"pwd >{temp_file.name}",
"pwd",
"exit $retcode",
)
]
)

# Add chown to fix file ownership (like pip at line 1150-1154)
chown_mask = "{}:{}".format(os.getuid(), os.getgid())
full_script = "{} && {}".format(
enhanced_script,
shlex_join(["chown", "-R", chown_mask, "."]),
)

p = subprocess.Popen(
script,
shell_command = [full_script]

docker_cmd = docker_run_command(
sh_work_dir,
shell_command,
query.runtime,
image=docker_image_tag_id,
shell=True,
ssh_agent=docker.with_ssh_agent,
docker=docker,
)

# Capture output to extract new working directory
result = subprocess.run(
docker_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=sh_work_dir,
text=True,
check=False,
)

call_stdout, call_stderr = p.communicate()
exit_code = p.returncode
log.debug("exit_code: %s", exit_code)
if exit_code != 0:
if result.returncode != 0:
raise RuntimeError(
"Script did not run successfully, exit code {}: {} - {}".format(
exit_code,
call_stdout.decode("utf-8").strip(),
call_stderr.decode("utf-8").strip(),
"Script did not run successfully in docker, exit code {}: {} - {}".format(
result.returncode,
result.stdout.strip(),
result.stderr.strip(),
)
)

temp_file.seek(0)
# NOTE: This var `sh_work_dir` is consumed in cmd == "zip" loop
sh_work_dir = temp_file.read().strip()
# Extract final working directory from stdout
# The 'pwd' command output is in stdout, but we need to parse it
# because there might be other output from the script
output_lines = result.stdout.strip().split("\n")
if output_lines:
final_pwd = output_lines[-1]
# Map container path back to host path
# Container path structure: /var/task = sh_work_dir (via volume mount)
if final_pwd.startswith("/var/task"):
relative_path = final_pwd[len("/var/task") :].lstrip(
"/"
)
sh_work_dir = (
os.path.join(sh_work_dir, relative_path)
if relative_path
else sh_work_dir
)
sh_work_dir = os.path.normpath(sh_work_dir)

log.debug("WORKDIR: %s", sh_work_dir)

else:
# Execute shell commands on host
with tempfile.NamedTemporaryFile(
mode="w+t", delete=True
) as temp_file:
if log.isEnabledFor(DEBUG2):
log.debug("exec shell script ...")
for line in script.splitlines():
sh_log.debug(line)

script = "\n".join(
(
script,
# NOTE: Execute `pwd` to determine the subprocess shell's
# working directory after having executed all other commands.
"retcode=$?",
f"pwd >{temp_file.name}",
"exit $retcode",
)
)

p = subprocess.Popen(
script,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=sh_work_dir,
)

call_stdout, call_stderr = p.communicate()
exit_code = p.returncode
log.debug("exit_code: %s", exit_code)
if exit_code != 0:
raise RuntimeError(
"Script did not run successfully, exit code {}: {} - {}".format(
exit_code,
call_stdout.decode("utf-8").strip(),
call_stderr.decode("utf-8").strip(),
)
)

temp_file.seek(0)
# NOTE: This var `sh_work_dir` is consumed in cmd == "zip" loop
sh_work_dir = temp_file.read().strip()
log.debug("WORKDIR: %s", sh_work_dir)

elif cmd == "set:workdir":
path = action[1]
sh_work_dir = os.path.normpath(os.path.join(tf_work_dir, path))
Expand Down Expand Up @@ -1516,6 +1624,14 @@ def docker_image_id_command(tag):
return docker_cmd


def docker_pull_command(image):
""""""
docker_cmd = ["docker", "pull", image]
cmd_log.info(shlex_join(docker_cmd))
log_handler and log_handler.flush()
return docker_cmd


def docker_build_command(tag=None, docker_file=None, build_root=False):
""""""
if not (build_root or docker_file):
Expand Down
Loading
Loading