Cookbook Recipes
Real-world examples and patterns for Docker container management in Eiffel.
Recipe 1: Run a Web Server
Deploy an Nginx web server with port mapping and volume mounts.
class
WEB_SERVER_EXAMPLE
create
make
feature {NONE}
make
local
client: DOCKER_CLIENT
spec: CONTAINER_SPEC
container: detachable DOCKER_CONTAINER
do
create client.make
if client.ping then
-- Ensure image exists
if not client.image_exists ("nginx:alpine") then
print ("Pulling nginx:alpine...%N")
client.pull_image ("nginx:alpine").do_nothing
end
-- Configure container
create spec.make ("nginx:alpine")
spec.set_name ("my-web-server")
.add_port (80, 8080)
.add_volume ("C:\www", "/usr/share/nginx/html")
.set_restart_policy ("unless-stopped")
.do_nothing
-- Run container
container := client.run_container (spec)
if attached container as c then
print ("Web server running at http://localhost:8080%N")
print ("Container ID: " + c.short_id + "%N")
else
if attached client.last_error as err then
print ("Failed: " + err.message + "%N")
end
end
else
print ("Docker is not running%N")
end
end
end
Recipe 2: Database Container
Run PostgreSQL with environment variables and persistent storage.
class
DATABASE_EXAMPLE
feature
start_postgres (a_password: STRING; a_data_dir: STRING)
local
spec: CONTAINER_SPEC
do
create spec.make ("postgres:16-alpine")
spec.set_name ("my-postgres")
.set_hostname ("postgres")
.add_port (5432, 5432)
.add_env ("POSTGRES_PASSWORD", a_password)
.add_env ("POSTGRES_USER", "myapp")
.add_env ("POSTGRES_DB", "myapp_db")
.add_volume (a_data_dir, "/var/lib/postgresql/data")
.set_restart_policy ("unless-stopped")
.set_memory_limit (1024 * 1024 * 1024) -- 1 GB
.do_nothing
postgres := client.run_container (spec)
if attached postgres as pg then
print ("PostgreSQL running on port 5432%N")
print ("Connection: postgresql://myapp:***@localhost:5432/myapp_db%N")
end
end
stop_postgres
do
if attached postgres as pg then
client.stop_container (pg.id, 30).do_nothing
print ("PostgreSQL stopped%N")
end
end
feature {NONE}
client: DOCKER_CLIENT
postgres: detachable DOCKER_CONTAINER
end
Recipe 3: Batch Job Container
Run a one-time container, wait for completion, and capture output.
class
BATCH_JOB_EXAMPLE
feature
run_batch_job (a_script: STRING): STRING
-- Run script in container and return output
local
spec: CONTAINER_SPEC
container: detachable DOCKER_CONTAINER
exit_code: INTEGER
do
Result := ""
create spec.make ("alpine:latest")
spec.set_name ("batch-job-" + generate_id)
.set_cmd (<<"/bin/sh", "-c", a_script>>)
.set_auto_remove (True) -- Auto-cleanup
.do_nothing
container := client.run_container (spec)
if attached container as c then
-- Wait for completion
exit_code := client.wait_container (c.id)
-- Get output before container is removed
if attached client.container_logs (c.id, True, True, 1000) as logs then
Result := logs
end
if exit_code /= 0 then
Result.prepend ("[ERROR: exit code " + exit_code.out + "]%N")
end
end
end
generate_id: STRING
do
Result := (create {SIMPLE_UUID}.make).short_string
end
feature {NONE}
client: DOCKER_CLIENT
end
Usage
local
job: BATCH_JOB_EXAMPLE
output: STRING
do
create job
output := job.run_batch_job ("echo 'Hello' && date && ls -la")
print (output)
end
Recipe 4: Container Manager
Manage multiple containers with cleanup on exit.
class
CONTAINER_MANAGER
create
make
feature {NONE}
make
do
create client.make
create managed_containers.make (10)
end
feature
start (a_spec: CONTAINER_SPEC): detachable DOCKER_CONTAINER
-- Start container and track for cleanup
do
Result := client.run_container (a_spec)
if attached Result as c then
managed_containers.extend (c.id)
end
end
stop_all
-- Stop all managed containers
do
across managed_containers as id loop
client.stop_container (id, 10).do_nothing
end
end
cleanup
-- Stop and remove all managed containers
do
across managed_containers as id loop
client.remove_container (id, True).do_nothing
end
managed_containers.wipe_out
end
status
-- Print status of all managed containers
do
across managed_containers as id loop
if attached client.get_container (id) as c then
print (c.out + "%N")
end
end
end
feature {NONE}
client: DOCKER_CLIENT
managed_containers: ARRAYED_LIST [STRING]
end
Recipe 5: Container Health Monitoring
Monitor container health and auto-restart on failure.
class
CONTAINER_MONITOR
feature
monitor (a_container_id: STRING; a_check_interval_ms: INTEGER)
-- Monitor container and restart if stopped
local
l_running: BOOLEAN
do
from
l_running := True
until
not l_running
loop
if attached client.get_container (a_container_id) as c then
if c.is_running then
-- All good
elseif c.is_exited then
if c.has_exited_successfully then
print ("Container completed successfully%N")
l_running := False
else
print ("Container failed (exit " + c.exit_code.out + "), restarting...%N")
client.start_container (a_container_id).do_nothing
end
elseif c.is_dead then
print ("Container is dead, cannot recover%N")
l_running := False
end
else
print ("Container not found%N")
l_running := False
end
if l_running then
sleep (a_check_interval_ms)
end
end
end
feature {NONE}
client: DOCKER_CLIENT
sleep (ms: INTEGER)
do
(create {EXECUTION_ENVIRONMENT}).sleep (ms * 1_000_000)
end
end
Recipe 6: Cleanup Old Containers and Images
Remove stopped containers and unused images.
class
DOCKER_CLEANUP
feature
cleanup_stopped_containers
-- Remove all stopped containers
local
removed_count: INTEGER
do
across client.list_containers (True) as c loop
if c.is_exited or c.is_dead then
if client.remove_container (c.id, False) then
print ("Removed: " + c.short_id + "%N")
removed_count := removed_count + 1
end
end
end
print ("Removed " + removed_count.out + " containers%N")
end
cleanup_dangling_images
-- Remove images without tags
local
removed_count: INTEGER
do
across client.list_images as img loop
if img.primary_tag.same_string ("<none>:<none>") then
if client.remove_image (img.id, False) then
print ("Removed image: " + img.short_id + "%N")
removed_count := removed_count + 1
end
end
end
print ("Removed " + removed_count.out + " images%N")
end
full_cleanup
-- Clean containers first, then images
do
cleanup_stopped_containers
cleanup_dangling_images
end
feature {NONE}
client: DOCKER_CLIENT
end
Recipe 7: Retry Pattern for Unreliable Operations
Retry operations with exponential backoff.
class
DOCKER_RETRY
feature
pull_with_retry (a_image: STRING; a_max_attempts: INTEGER): BOOLEAN
-- Pull image with retries on network errors
local
attempt: INTEGER
delay_ms: INTEGER
do
from
attempt := 1
delay_ms := 1000 -- Start with 1 second
until
Result or attempt > a_max_attempts
loop
print ("Pulling " + a_image + " (attempt " + attempt.out + ")...%N")
if client.pull_image (a_image) then
Result := True
print ("Success!%N")
else
if attached client.last_error as err then
if err.is_retryable and attempt < a_max_attempts then
print ("Retrying in " + (delay_ms // 1000).out + " seconds...%N")
sleep (delay_ms)
delay_ms := delay_ms * 2 -- Exponential backoff
else
print ("Failed: " + err.message + "%N")
end
end
end
attempt := attempt + 1
end
end
feature {NONE}
client: DOCKER_CLIENT
sleep (ms: INTEGER)
do
(create {EXECUTION_ENVIRONMENT}).sleep (ms * 1_000_000)
end
end