diff --git a/Cargo.lock b/Cargo.lock
index b14c402a..d7fdda89 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2854,7 +2854,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "525e9ff3e1a4be2fbea1fdf0e98686a6d98b4d8f937e1bf7402245af1909e8c3"
dependencies = [
"byteorder-lite",
- "quick-error",
+ "quick-error 2.0.1",
]
[[package]]
@@ -3327,6 +3327,15 @@ dependencies = [
"tendril",
]
+[[package]]
+name = "matchers"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9"
+dependencies = [
+ "regex-automata",
+]
+
[[package]]
name = "maybe-rayon"
version = "0.1.1"
@@ -3586,6 +3595,15 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0676bb32a98c1a483ce53e500a81ad9c3d5b3f7c920c28c24e9cb0980d0b5bc8"
+[[package]]
+name = "nu-ansi-term"
+version = "0.50.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
+dependencies = [
+ "windows-sys 0.61.2",
+]
+
[[package]]
name = "num-bigint"
version = "0.4.6"
@@ -4186,6 +4204,32 @@ dependencies = [
"perry-hir",
]
+[[package]]
+name = "perry-container-compose"
+version = "0.5.28"
+dependencies = [
+ "anyhow",
+ "async-trait",
+ "clap",
+ "dotenvy",
+ "hex",
+ "indexmap",
+ "md-5",
+ "once_cell",
+ "proptest",
+ "rand 0.8.5",
+ "regex",
+ "serde",
+ "serde_json",
+ "serde_yaml",
+ "shellexpand",
+ "thiserror 1.0.69",
+ "tokio",
+ "tracing",
+ "tracing-subscriber",
+ "which 6.0.3",
+]
+
[[package]]
name = "perry-diagnostics"
version = "0.5.28"
@@ -4265,6 +4309,7 @@ dependencies = [
"aes-gcm",
"anyhow",
"argon2",
+ "async-trait",
"base64",
"bcrypt",
"bson",
@@ -4294,7 +4339,9 @@ dependencies = [
"nanoid",
"once_cell",
"pbkdf2",
+ "perry-container-compose",
"perry-runtime",
+ "proptest",
"rand 0.8.5",
"redis",
"regex",
@@ -4308,6 +4355,7 @@ dependencies = [
"scrypt",
"serde",
"serde_json",
+ "serde_yaml",
"sha2",
"sqlx",
"thiserror 1.0.69",
@@ -4748,6 +4796,25 @@ dependencies = [
"syn 2.0.117",
]
+[[package]]
+name = "proptest"
+version = "1.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b45fcc2344c680f5025fe57779faef368840d0bd1f42f216291f0dc4ace4744"
+dependencies = [
+ "bit-set 0.8.0",
+ "bit-vec 0.8.0",
+ "bitflags",
+ "num-traits",
+ "rand 0.9.2",
+ "rand_chacha 0.9.0",
+ "rand_xorshift",
+ "regex-syntax",
+ "rusty-fork",
+ "tempfile",
+ "unarray",
+]
+
[[package]]
name = "psm"
version = "0.1.30"
@@ -4808,6 +4875,12 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "quick-error"
+version = "1.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
+
[[package]]
name = "quick-error"
version = "2.0.1"
@@ -4961,6 +5034,15 @@ dependencies = [
"getrandom 0.3.4",
]
+[[package]]
+name = "rand_xorshift"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a"
+dependencies = [
+ "rand_core 0.9.5",
+]
+
[[package]]
name = "rav1e"
version = "0.8.1"
@@ -5005,7 +5087,7 @@ dependencies = [
"avif-serialize",
"imgref",
"loop9",
- "quick-error",
+ "quick-error 2.0.1",
"rav1e",
"rayon",
"rgb",
@@ -5412,6 +5494,18 @@ version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
+[[package]]
+name = "rusty-fork"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2"
+dependencies = [
+ "fnv",
+ "quick-error 1.2.3",
+ "tempfile",
+ "wait-timeout",
+]
+
[[package]]
name = "ryu"
version = "1.0.23"
@@ -5679,6 +5773,19 @@ dependencies = [
"syn 2.0.117",
]
+[[package]]
+name = "serde_yaml"
+version = "0.9.34+deprecated"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
+dependencies = [
+ "indexmap",
+ "itoa",
+ "ryu",
+ "serde",
+ "unsafe-libyaml",
+]
+
[[package]]
name = "servo_arc"
version = "0.3.0"
@@ -5716,12 +5823,30 @@ dependencies = [
"digest",
]
+[[package]]
+name = "sharded-slab"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
+dependencies = [
+ "lazy_static",
+]
+
[[package]]
name = "shell-words"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc6fe69c597f9c37bfeeeeeb33da3530379845f10be461a66d16d03eca2ded77"
+[[package]]
+name = "shellexpand"
+version = "3.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32824fab5e16e6c4d86dc1ba84489390419a39f97699852b66480bb87d297ed8"
+dependencies = [
+ "dirs 6.0.0",
+]
+
[[package]]
name = "shlex"
version = "1.3.0"
@@ -6480,6 +6605,15 @@ dependencies = [
"syn 2.0.117",
]
+[[package]]
+name = "thread_local"
+version = "1.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185"
+dependencies = [
+ "cfg-if",
+]
+
[[package]]
name = "tiff"
version = "0.11.3"
@@ -6489,7 +6623,7 @@ dependencies = [
"fax",
"flate2",
"half",
- "quick-error",
+ "quick-error 2.0.1",
"weezl",
"zune-jpeg",
]
@@ -6869,6 +7003,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a"
dependencies = [
"once_cell",
+ "valuable",
+]
+
+[[package]]
+name = "tracing-log"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
+dependencies = [
+ "log",
+ "once_cell",
+ "tracing-core",
+]
+
+[[package]]
+name = "tracing-subscriber"
+version = "0.3.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319"
+dependencies = [
+ "matchers",
+ "nu-ansi-term",
+ "once_cell",
+ "regex-automata",
+ "sharded-slab",
+ "smallvec",
+ "thread_local",
+ "tracing",
+ "tracing-core",
+ "tracing-log",
]
[[package]]
@@ -6953,6 +7117,12 @@ version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
+[[package]]
+name = "unarray"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
+
[[package]]
name = "unicase"
version = "2.9.0"
@@ -7026,6 +7196,12 @@ dependencies = [
"subtle",
]
+[[package]]
+name = "unsafe-libyaml"
+version = "0.2.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
+
[[package]]
name = "untrusted"
version = "0.9.0"
@@ -7150,6 +7326,12 @@ dependencies = [
"syn 2.0.117",
]
+[[package]]
+name = "valuable"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65"
+
[[package]]
name = "vcpkg"
version = "0.2.15"
@@ -7168,6 +7350,15 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
+[[package]]
+name = "wait-timeout"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "walkdir"
version = "2.5.0"
diff --git a/Cargo.toml b/Cargo.toml
index 34d9be1f..16492b9d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -26,6 +26,7 @@ members = [
"crates/perry-codegen-wear-tiles",
"crates/perry-codegen-wasm",
"crates/perry-ui-test",
+ "crates/perry-container-compose",
]
# Only build platform-independent crates by default.
# Platform-specific UI crates (perry-ui-macos, perry-ui-ios, etc.) must be built
diff --git a/README.md b/README.md
index 8d3db750..5ad79944 100644
--- a/README.md
+++ b/README.md
@@ -497,6 +497,43 @@ These packages are natively implemented in Rust — no Node.js required:
| **Database** | mysql2, pg, ioredis |
| **Security** | bcrypt, argon2, jsonwebtoken |
| **Utilities** | dotenv, uuid, nodemailer, zlib, node-cron |
+| **Container** | perry/container (OCI container management) |
+
+---
+
+## Container Module
+
+Perry includes a native container management module `perry/container` for creating, running, and managing OCI containers:
+
+```typescript
+import { run, list, composeUp } from 'perry/container';
+
+// Run a container
+const container = await run({
+ image: 'nginx:alpine',
+ name: 'my-nginx',
+ ports: ['8080:80'],
+});
+
+// List containers
+const containers = await list();
+console.log(containers);
+
+// Multi-container orchestration
+const compose = await composeUp({
+ services: {
+ web: { image: 'nginx:alpine' },
+ db: { image: 'postgres:15-alpine' },
+ },
+});
+```
+
+**Platform support:**
+- macOS/iOS: Podman (apple/container support coming soon)
+- Linux: Podman (native)
+- Windows: Podman Desktop (experimental)
+
+See `example-code/container-demo/` for a complete example.
---
diff --git a/crates/perry-container-compose/Cargo.toml b/crates/perry-container-compose/Cargo.toml
new file mode 100644
index 00000000..82046c4d
--- /dev/null
+++ b/crates/perry-container-compose/Cargo.toml
@@ -0,0 +1,42 @@
+[package]
+name = "perry-container-compose"
+version.workspace = true
+edition.workspace = true
+license.workspace = true
+repository.workspace = true
+authors = ["Perry Contributors"]
+description = "Port of container-compose/cli to Rust - Docker Compose-like experience for Apple Container / Podman"
+
+[dependencies]
+serde = { workspace = true }
+serde_json = { workspace = true }
+serde_yaml = "0.9"
+tokio = { workspace = true }
+clap = { workspace = true }
+anyhow = { workspace = true }
+thiserror = { workspace = true }
+tracing = "0.1"
+tracing-subscriber = { version = "0.3", features = ["env-filter"] }
+async-trait = "0.1"
+md-5 = "0.10"
+hex = "0.4"
+dotenvy = { workspace = true }
+indexmap = { version = "2.2", features = ["serde"] }
+rand = "0.8"
+regex = "1"
+once_cell = "1"
+which = "6"
+shellexpand = "3"
+
+[dev-dependencies]
+tokio = { workspace = true }
+proptest = "1"
+
+[features]
+default = []
+ffi = [] # Enable FFI exports for Perry TypeScript integration
+integration-tests = [] # Tests that require a running container backend
+
+[[bin]]
+name = "perry-compose"
+path = "src/main.rs"
diff --git a/crates/perry-container-compose/examples/build/main.ts b/crates/perry-container-compose/examples/build/main.ts
new file mode 100644
index 00000000..8aaf7f83
--- /dev/null
+++ b/crates/perry-container-compose/examples/build/main.ts
@@ -0,0 +1,23 @@
+import { composeUp, composeDown } from 'perry/compose';
+
+const stack = await composeUp({
+ version: '3.8',
+ services: {
+ app: {
+ build: {
+ context: '.',
+ dockerfile: 'Dockerfile',
+ args: {
+ BUILD_ENV: 'production',
+ },
+ },
+ ports: ['8080:8080'],
+ environment: {
+ NODE_ENV: 'production',
+ },
+ },
+ },
+});
+
+// Tear down when done
+await composeDown(stack);
diff --git a/crates/perry-container-compose/examples/multi-service/main.ts b/crates/perry-container-compose/examples/multi-service/main.ts
new file mode 100644
index 00000000..5fce10b2
--- /dev/null
+++ b/crates/perry-container-compose/examples/multi-service/main.ts
@@ -0,0 +1,36 @@
+import { composeUp, composeDown, composeLogs } from 'perry/compose';
+
+const stack = await composeUp({
+ version: '3.8',
+ services: {
+ db: {
+ image: 'postgres:16-alpine',
+ environment: {
+ // ${VAR:-default} interpolation is supported in string values
+ POSTGRES_USER: '${DB_USER:-myuser}',
+ POSTGRES_PASSWORD: '${DB_PASSWORD:-secret}',
+ POSTGRES_DB: 'mydb',
+ },
+ volumes: ['db-data:/var/lib/postgresql/data'],
+ ports: ['5432:5432'],
+ },
+ web: {
+ image: 'myapp:latest',
+ dependsOn: ['db'],
+ ports: ['3000:3000'],
+ environment: {
+ DATABASE_URL: 'postgres://${DB_USER:-myuser}:${DB_PASSWORD:-secret}@db:5432/mydb',
+ },
+ },
+ },
+ volumes: {
+ 'db-data': {},
+ },
+});
+
+// Stream logs from both services
+const logs = await composeLogs(stack, { services: ['web', 'db'], follow: false });
+console.log(logs);
+
+// Tear down, removing named volumes
+await composeDown(stack, { volumes: true });
diff --git a/crates/perry-container-compose/examples/simple/main.ts b/crates/perry-container-compose/examples/simple/main.ts
new file mode 100644
index 00000000..5a33883f
--- /dev/null
+++ b/crates/perry-container-compose/examples/simple/main.ts
@@ -0,0 +1,21 @@
+import { composeUp, composeDown, composePs } from 'perry/compose';
+
+const stack = await composeUp({
+ version: '3.8',
+ services: {
+ web: {
+ image: 'nginx:alpine',
+ containerName: 'simple-nginx',
+ ports: ['8080:80'],
+ labels: {
+ app: 'simple-nginx',
+ },
+ },
+ },
+});
+
+const statuses = await composePs(stack);
+console.table(statuses);
+
+// Tear down when done
+await composeDown(stack);
diff --git a/crates/perry-container-compose/src/backend.rs b/crates/perry-container-compose/src/backend.rs
new file mode 100644
index 00000000..42b11cb4
--- /dev/null
+++ b/crates/perry-container-compose/src/backend.rs
@@ -0,0 +1,1443 @@
+//! Container backend abstraction — `ContainerBackend` trait, `CliProtocol` trait,
+//! protocol implementations (`DockerProtocol`, `AppleContainerProtocol`, `LimaProtocol`),
+//! generic `CliBackend
`, and `detect_backend()`.
+
+use crate::error::{ComposeError, Result};
+use crate::types::{
+ ComposeNetwork, ComposeVolume, ContainerHandle, ContainerInfo, ContainerLogs, ContainerSpec,
+ ImageInfo,
+};
+use async_trait::async_trait;
+use serde::Deserialize;
+use std::collections::HashMap;
+use std::path::PathBuf;
+use std::process::Stdio;
+use tokio::process::Command;
+use tracing::debug;
+
+// ─────────────────────────────────────────────────────────────────────────────
+// 4.8 BackendProbeResult — defined in error.rs, re-exported here
+// ─────────────────────────────────────────────────────────────────────────────
+pub use crate::error::BackendProbeResult;
+
+// ─────────────────────────────────────────────────────────────────────────────
+// 4.1 NetworkConfig and VolumeConfig — lean config structs
+// ─────────────────────────────────────────────────────────────────────────────
+
+/// Lean network configuration decoupled from compose-spec types.
+#[derive(Debug, Clone, Default)]
+pub struct NetworkConfig {
+ pub driver: Option,
+ pub labels: HashMap,
+ pub internal: bool,
+ pub enable_ipv6: bool,
+}
+
+/// Lean volume configuration decoupled from compose-spec types.
+#[derive(Debug, Clone, Default)]
+pub struct VolumeConfig {
+ pub driver: Option,
+ pub labels: HashMap,
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// Conversions from compose-spec types to lean config types
+// ─────────────────────────────────────────────────────────────────────────────
+
+impl From<&ComposeNetwork> for NetworkConfig {
+ fn from(n: &ComposeNetwork) -> Self {
+ NetworkConfig {
+ driver: n.driver.clone(),
+ labels: n.labels.as_ref().map(|l| l.to_map()).unwrap_or_default(),
+ internal: n.internal.unwrap_or(false),
+ enable_ipv6: n.enable_ipv6.unwrap_or(false),
+ }
+ }
+}
+
+impl From<&ComposeVolume> for VolumeConfig {
+ fn from(v: &ComposeVolume) -> Self {
+ VolumeConfig {
+ driver: v.driver.clone(),
+ labels: v.labels.as_ref().map(|l| l.to_map()).unwrap_or_default(),
+ }
+ }
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// 4.1 ContainerBackend trait
+// ─────────────────────────────────────────────────────────────────────────────
+
+/// Runtime-agnostic async interface for container operations.
+#[async_trait]
+pub trait ContainerBackend: Send + Sync {
+ fn backend_name(&self) -> &str;
+ async fn check_available(&self) -> Result<()>;
+ async fn run(&self, spec: &ContainerSpec) -> Result;
+ async fn create(&self, spec: &ContainerSpec) -> Result;
+ async fn start(&self, id: &str) -> Result<()>;
+ async fn stop(&self, id: &str, timeout: Option) -> Result<()>;
+ async fn remove(&self, id: &str, force: bool) -> Result<()>;
+ async fn list(&self, all: bool) -> Result>;
+ async fn inspect(&self, id: &str) -> Result;
+ async fn logs(&self, id: &str, tail: Option) -> Result;
+ async fn exec(
+ &self,
+ id: &str,
+ cmd: &[String],
+ env: Option<&HashMap>,
+ workdir: Option<&str>,
+ ) -> Result;
+ async fn pull_image(&self, reference: &str) -> Result<()>;
+ async fn list_images(&self) -> Result>;
+ async fn remove_image(&self, reference: &str, force: bool) -> Result<()>;
+ async fn create_network(&self, name: &str, config: &NetworkConfig) -> Result<()>;
+ async fn remove_network(&self, name: &str) -> Result<()>;
+ async fn create_volume(&self, name: &str, config: &VolumeConfig) -> Result<()>;
+ async fn remove_volume(&self, name: &str) -> Result<()>;
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// Shared JSON deserialization helpers (Docker-compatible output format)
+// ─────────────────────────────────────────────────────────────────────────────
+
+#[derive(Debug, Deserialize)]
+struct DockerListEntry {
+ #[serde(rename = "ID", alias = "Id", default)]
+ id: String,
+ #[serde(rename = "Names", alias = "names", default)]
+ names: serde_json::Value,
+ #[serde(rename = "Image", alias = "image", default)]
+ image: String,
+ #[serde(rename = "Status", alias = "status", default)]
+ status: String,
+ #[serde(rename = "Ports", alias = "ports", default)]
+ ports: serde_json::Value,
+ #[serde(rename = "Created", alias = "created", default)]
+ created: serde_json::Value,
+}
+
+impl DockerListEntry {
+ fn into_container_info(self) -> ContainerInfo {
+ let name = match &self.names {
+ serde_json::Value::Array(arr) => arr
+ .first()
+ .and_then(|v| v.as_str())
+ .map(|s| s.trim_start_matches('/').to_string())
+ .unwrap_or_default(),
+ serde_json::Value::String(s) => s.trim_start_matches('/').to_string(),
+ _ => String::new(),
+ };
+ let ports = match &self.ports {
+ serde_json::Value::Array(arr) => arr
+ .iter()
+ .filter_map(|v| v.as_str().map(String::from))
+ .collect(),
+ serde_json::Value::String(s) if !s.is_empty() => vec![s.clone()],
+ _ => vec![],
+ };
+ let created = match &self.created {
+ serde_json::Value::String(s) => s.clone(),
+ serde_json::Value::Number(n) => n.to_string(),
+ _ => String::new(),
+ };
+ ContainerInfo {
+ id: self.id,
+ name,
+ image: self.image,
+ status: self.status,
+ ports,
+ created,
+ }
+ }
+}
+
+#[derive(Debug, Deserialize)]
+struct DockerInspectEntry {
+ #[serde(rename = "Id", alias = "ID", default)]
+ id: String,
+ #[serde(rename = "Name", alias = "name", default)]
+ name: String,
+ #[serde(rename = "Image", alias = "image", default)]
+ image: String,
+ #[serde(rename = "State", alias = "state")]
+ state: Option,
+ #[serde(rename = "Created", alias = "created", default)]
+ created: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct DockerInspectState {
+ #[serde(rename = "Running", alias = "running", default)]
+ running: bool,
+ #[serde(rename = "Status", alias = "status", default)]
+ status: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct DockerImageEntry {
+ #[serde(rename = "ID", alias = "Id", default)]
+ id: String,
+ #[serde(rename = "Repository", alias = "repository", default)]
+ repository: String,
+ #[serde(rename = "Tag", alias = "tag", default)]
+ tag: String,
+ #[serde(rename = "Size", alias = "size", default)]
+ size: serde_json::Value,
+ #[serde(rename = "Created", alias = "created", default)]
+ created: String,
+}
+
+fn parse_size(v: &serde_json::Value) -> u64 {
+ match v {
+ serde_json::Value::Number(n) => n.as_u64().unwrap_or(0),
+ serde_json::Value::String(s) => s.parse().unwrap_or(0),
+ _ => 0,
+ }
+}
+
+fn is_not_found(stderr: &str) -> bool {
+ let s = stderr.to_lowercase();
+ s.contains("not found")
+ || s.contains("no such")
+ || s.contains("does not exist")
+ || s.contains("unknown container")
+}
+
+/// Build the common Docker-compatible `run`/`create` flags from a `ContainerSpec`.
+/// When `include_detach` is true, `--detach` is added (Docker/podman/nerdctl).
+/// When false (apple/container), it is omitted.
+pub fn docker_run_flags(spec: &ContainerSpec, include_detach: bool) -> Vec {
+ let mut args: Vec = Vec::new();
+ if spec.rm.unwrap_or(false) {
+ args.push("--rm".into());
+ }
+ if include_detach {
+ args.push("--detach".into());
+ }
+ if let Some(name) = &spec.name {
+ args.push("--name".into());
+ args.push(name.clone());
+ }
+ if let Some(network) = &spec.network {
+ args.push("--network".into());
+ args.push(network.clone());
+ }
+ if let Some(ports) = &spec.ports {
+ for p in ports {
+ args.push("-p".into());
+ args.push(p.clone());
+ }
+ }
+ if let Some(vols) = &spec.volumes {
+ for v in vols {
+ args.push("-v".into());
+ args.push(v.clone());
+ }
+ }
+ if let Some(envs) = &spec.env {
+ let mut pairs: Vec<(&String, &String)> = envs.iter().collect();
+ pairs.sort_by_key(|(k, _)| k.as_str());
+ for (k, v) in pairs {
+ args.push("-e".into());
+ args.push(format!("{}={}", k, v));
+ }
+ }
+ if let Some(ep) = &spec.entrypoint {
+ args.push("--entrypoint".into());
+ args.push(ep.join(" "));
+ }
+ args
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// 4.2 CliProtocol trait with Docker-compatible defaults
+// ─────────────────────────────────────────────────────────────────────────────
+
+/// Translates abstract container operations into CLI arguments for a specific
+/// runtime family, and parses the CLI's JSON output back into typed structs.
+///
+/// Every method has a Docker-compatible default. Only `protocol_name()` is
+/// required. New protocols override only what differs.
+pub trait CliProtocol: Send + Sync {
+ /// Human-readable protocol name (e.g. `"docker-compatible"`, `"apple/container"`).
+ fn protocol_name(&self) -> &str;
+
+ /// Optional prefix inserted before every subcommand.
+ /// `LimaProtocol` returns `Some(["shell", "", "nerdctl"])`.
+ fn subcommand_prefix(&self) -> Option> {
+ None
+ }
+
+ // ── Argument builders (Docker-compatible defaults) ─────────────────────
+
+ fn run_args(&self, spec: &ContainerSpec) -> Vec {
+ let mut args = vec!["run".into()];
+ args.extend(docker_run_flags(spec, true));
+ args.push(spec.image.clone());
+ if let Some(cmd) = &spec.cmd {
+ args.extend(cmd.iter().cloned());
+ }
+ args
+ }
+
+ fn create_args(&self, spec: &ContainerSpec) -> Vec {
+ let mut args = vec!["create".into()];
+ args.extend(docker_run_flags(spec, false));
+ args.push(spec.image.clone());
+ if let Some(cmd) = &spec.cmd {
+ args.extend(cmd.iter().cloned());
+ }
+ args
+ }
+
+ fn start_args(&self, id: &str) -> Vec {
+ vec!["start".into(), id.into()]
+ }
+
+ fn stop_args(&self, id: &str, timeout: Option) -> Vec {
+ let mut args = vec!["stop".into()];
+ if let Some(t) = timeout {
+ args.push("-t".into());
+ args.push(t.to_string());
+ }
+ args.push(id.into());
+ args
+ }
+
+ fn remove_args(&self, id: &str, force: bool) -> Vec {
+ let mut args = vec!["rm".into()];
+ if force {
+ args.push("-f".into());
+ }
+ args.push(id.into());
+ args
+ }
+
+ fn list_args(&self, all: bool) -> Vec {
+ let mut args = vec!["ps".into(), "--format".into(), "json".into()];
+ if all {
+ args.push("--all".into());
+ }
+ args
+ }
+
+ fn inspect_args(&self, id: &str) -> Vec {
+ vec!["inspect".into(), "--format".into(), "json".into(), id.into()]
+ }
+
+ fn logs_args(&self, id: &str, tail: Option) -> Vec {
+ let mut args = vec!["logs".into()];
+ if let Some(t) = tail {
+ args.push("--tail".into());
+ args.push(t.to_string());
+ }
+ args.push(id.into());
+ args
+ }
+
+ fn exec_args(
+ &self,
+ id: &str,
+ cmd: &[String],
+ env: Option<&HashMap>,
+ workdir: Option<&str>,
+ ) -> Vec {
+ let mut args = vec!["exec".into()];
+ if let Some(wd) = workdir {
+ args.push("--workdir".into());
+ args.push(wd.into());
+ }
+ if let Some(envs) = env {
+ let mut pairs: Vec<(&String, &String)> = envs.iter().collect();
+ pairs.sort_by_key(|(k, _)| k.as_str());
+ for (k, v) in pairs {
+ args.push("-e".into());
+ args.push(format!("{}={}", k, v));
+ }
+ }
+ args.push(id.into());
+ args.extend(cmd.iter().cloned());
+ args
+ }
+
+ fn pull_image_args(&self, reference: &str) -> Vec {
+ vec!["pull".into(), reference.into()]
+ }
+
+ fn list_images_args(&self) -> Vec {
+ vec!["images".into(), "--format".into(), "json".into()]
+ }
+
+ fn remove_image_args(&self, reference: &str, force: bool) -> Vec {
+ let mut args = vec!["rmi".into()];
+ if force {
+ args.push("-f".into());
+ }
+ args.push(reference.into());
+ args
+ }
+
+ fn create_network_args(&self, name: &str, config: &NetworkConfig) -> Vec {
+ let mut args = vec!["network".into(), "create".into()];
+ if let Some(d) = &config.driver {
+ args.push("--driver".into());
+ args.push(d.clone());
+ }
+ let mut pairs: Vec<(&String, &String)> = config.labels.iter().collect();
+ pairs.sort_by_key(|(k, _)| k.as_str());
+ for (k, v) in pairs {
+ args.push("--label".into());
+ args.push(format!("{}={}", k, v));
+ }
+ if config.internal {
+ args.push("--internal".into());
+ }
+ if config.enable_ipv6 {
+ args.push("--ipv6".into());
+ }
+ args.push(name.into());
+ args
+ }
+
+ fn remove_network_args(&self, name: &str) -> Vec {
+ vec!["network".into(), "rm".into(), name.into()]
+ }
+
+ fn create_volume_args(&self, name: &str, config: &VolumeConfig) -> Vec {
+ let mut args = vec!["volume".into(), "create".into()];
+ if let Some(d) = &config.driver {
+ args.push("--driver".into());
+ args.push(d.clone());
+ }
+ let mut pairs: Vec<(&String, &String)> = config.labels.iter().collect();
+ pairs.sort_by_key(|(k, _)| k.as_str());
+ for (k, v) in pairs {
+ args.push("--label".into());
+ args.push(format!("{}={}", k, v));
+ }
+ args.push(name.into());
+ args
+ }
+
+ fn remove_volume_args(&self, name: &str) -> Vec {
+ vec!["volume".into(), "rm".into(), name.into()]
+ }
+
+ // ── Output parsers (Docker JSON defaults) ─────────────────────────────
+
+ fn parse_list_output(&self, stdout: &str) -> Vec {
+ let trimmed = stdout.trim();
+ if trimmed.starts_with('[') {
+ serde_json::from_str::>(trimmed)
+ .unwrap_or_default()
+ .into_iter()
+ .map(|e| e.into_container_info())
+ .collect()
+ } else {
+ trimmed
+ .lines()
+ .filter(|l| !l.trim().is_empty())
+ .filter_map(|l| serde_json::from_str::(l).ok())
+ .map(|e| e.into_container_info())
+ .collect()
+ }
+ }
+
+ fn parse_inspect_output(&self, id: &str, stdout: &str) -> Option {
+ let trimmed = stdout.trim();
+ let entry: Option = if trimmed.starts_with('[') {
+ serde_json::from_str::>(trimmed)
+ .ok()
+ .and_then(|v| v.into_iter().next())
+ } else {
+ serde_json::from_str::(trimmed).ok()
+ };
+ entry.map(|e| {
+ let running = e.state.as_ref().map(|s| s.running).unwrap_or(false);
+ let status = e
+ .state
+ .as_ref()
+ .map(|s| s.status.clone())
+ .filter(|s| !s.is_empty())
+ .unwrap_or_else(|| if running { "running" } else { "stopped" }.into());
+ ContainerInfo {
+ id: if e.id.is_empty() { id.to_string() } else { e.id },
+ name: e.name.trim_start_matches('/').to_string(),
+ image: e.image,
+ status,
+ ports: vec![],
+ created: e.created,
+ }
+ })
+ }
+
+ fn parse_list_images_output(&self, stdout: &str) -> Vec {
+ let trimmed = stdout.trim();
+ let entries: Vec = if trimmed.starts_with('[') {
+ serde_json::from_str(trimmed).unwrap_or_default()
+ } else {
+ trimmed
+ .lines()
+ .filter(|l| !l.trim().is_empty())
+ .filter_map(|l| serde_json::from_str(l).ok())
+ .collect()
+ };
+ entries
+ .into_iter()
+ .map(|e| ImageInfo {
+ id: e.id,
+ repository: e.repository,
+ tag: e.tag,
+ size: parse_size(&e.size),
+ created: e.created,
+ })
+ .collect()
+ }
+
+ fn parse_container_id(&self, stdout: &str) -> String {
+ stdout.trim().to_string()
+ }
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// 4.3 DockerProtocol
+// ─────────────────────────────────────────────────────────────────────────────
+
+/// `CliProtocol` for Docker-compatible runtimes: docker, podman, nerdctl,
+/// orbstack, colima. All methods use the trait defaults.
+pub struct DockerProtocol;
+
+impl CliProtocol for DockerProtocol {
+ fn protocol_name(&self) -> &str {
+ "docker-compatible"
+ }
+ // All other methods inherit Docker-compatible defaults from the trait.
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// 4.4 AppleContainerProtocol
+// ─────────────────────────────────────────────────────────────────────────────
+
+/// `CliProtocol` for the `apple/container` CLI on macOS/iOS.
+///
+/// The only difference from Docker: `run` does not support `--detach`.
+pub struct AppleContainerProtocol;
+
+impl CliProtocol for AppleContainerProtocol {
+ fn protocol_name(&self) -> &str {
+ "apple/container"
+ }
+
+ /// `apple/container run` does not accept `--detach`; omit it.
+ fn run_args(&self, spec: &ContainerSpec) -> Vec {
+ let mut args = vec!["run".into()];
+ args.extend(docker_run_flags(spec, false));
+ args.push(spec.image.clone());
+ if let Some(cmd) = &spec.cmd {
+ args.extend(cmd.iter().cloned());
+ }
+ args
+ }
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// 4.5 LimaProtocol
+// ─────────────────────────────────────────────────────────────────────────────
+
+/// `CliProtocol` for Lima. Wraps every command with `limactl shell nerdctl`.
+pub struct LimaProtocol {
+ pub instance: String,
+}
+
+impl LimaProtocol {
+ pub fn new(instance: impl Into) -> Self {
+ LimaProtocol {
+ instance: instance.into(),
+ }
+ }
+}
+
+impl CliProtocol for LimaProtocol {
+ fn protocol_name(&self) -> &str {
+ "lima"
+ }
+
+ fn subcommand_prefix(&self) -> Option> {
+ Some(vec![
+ "shell".into(),
+ self.instance.clone(),
+ "nerdctl".into(),
+ ])
+ }
+ // All other methods inherit Docker-compatible defaults from the trait.
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// 4.6 Generic CliBackend
+// ─────────────────────────────────────────────────────────────────────────────
+
+/// Concrete `ContainerBackend` that executes CLI commands via
+/// `tokio::process::Command`. Generic over `P: CliProtocol` — zero vtable
+/// overhead, monomorphised at compile time.
+pub struct CliBackend {
+ pub bin: PathBuf,
+ pub protocol: P,
+}
+
+/// Type aliases for the common backends.
+pub type DockerBackend = CliBackend;
+pub type AppleBackend = CliBackend;
+pub type LimaBackend = CliBackend;
+
+impl CliBackend {
+ pub fn new(bin: PathBuf, protocol: P) -> Self {
+ CliBackend { bin, protocol }
+ }
+
+ /// Build the full argument list, prepending the protocol's subcommand
+ /// prefix (e.g. `["shell", "default", "nerdctl"]` for Lima) when present.
+ pub fn full_args(&self, subcommand_args: Vec) -> Vec {
+ match self.protocol.subcommand_prefix() {
+ Some(prefix) => {
+ let mut full = prefix;
+ full.extend(subcommand_args);
+ full
+ }
+ None => subcommand_args,
+ }
+ }
+
+ /// Execute the binary with the given arguments and return the raw output.
+ async fn exec_raw(&self, args: Vec) -> Result {
+ let full = self.full_args(args);
+ let output = Command::new(&self.bin)
+ .args(&full)
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .map_err(ComposeError::IoError)?;
+ Ok(output)
+ }
+
+ /// Execute and return stdout as a `String`, mapping non-zero exit codes to
+ /// `ComposeError::BackendError`.
+ async fn exec_ok(&self, args: Vec) -> Result {
+ let output = self.exec_raw(args).await?;
+ if output.status.success() {
+ Ok(String::from_utf8_lossy(&output.stdout).to_string())
+ } else {
+ Err(ComposeError::BackendError {
+ code: output.status.code().unwrap_or(-1),
+ message: String::from_utf8_lossy(&output.stderr).to_string(),
+ })
+ }
+ }
+}
+
+#[async_trait]
+impl ContainerBackend for CliBackend {
+ fn backend_name(&self) -> &str {
+ self.bin
+ .file_name()
+ .and_then(|n| n.to_str())
+ .unwrap_or("unknown")
+ }
+
+ async fn check_available(&self) -> Result<()> {
+ let output = Command::new(&self.bin)
+ .arg("--version")
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .map_err(ComposeError::IoError)?;
+ if output.status.success() {
+ Ok(())
+ } else {
+ Err(ComposeError::BackendError {
+ code: output.status.code().unwrap_or(-1),
+ message: format!(
+ "'{}' not available: {}",
+ self.backend_name(),
+ String::from_utf8_lossy(&output.stderr)
+ ),
+ })
+ }
+ }
+
+ async fn run(&self, spec: &ContainerSpec) -> Result {
+ let args = self.protocol.run_args(spec);
+ let stdout = self.exec_ok(args).await?;
+ let id = self.protocol.parse_container_id(&stdout);
+ let name = spec.name.clone().or_else(|| Some(id.clone()));
+ Ok(ContainerHandle { id, name })
+ }
+
+ async fn create(&self, spec: &ContainerSpec) -> Result {
+ let args = self.protocol.create_args(spec);
+ let stdout = self.exec_ok(args).await?;
+ let id = self.protocol.parse_container_id(&stdout);
+ let name = spec.name.clone().or_else(|| Some(id.clone()));
+ Ok(ContainerHandle { id, name })
+ }
+
+ async fn start(&self, id: &str) -> Result<()> {
+ self.exec_ok(self.protocol.start_args(id)).await?;
+ Ok(())
+ }
+
+ async fn stop(&self, id: &str, timeout: Option) -> Result<()> {
+ self.exec_ok(self.protocol.stop_args(id, timeout)).await?;
+ Ok(())
+ }
+
+ async fn remove(&self, id: &str, force: bool) -> Result<()> {
+ self.exec_ok(self.protocol.remove_args(id, force)).await?;
+ Ok(())
+ }
+
+ async fn list(&self, all: bool) -> Result> {
+ let stdout = self.exec_ok(self.protocol.list_args(all)).await?;
+ Ok(self.protocol.parse_list_output(&stdout))
+ }
+
+ async fn inspect(&self, id: &str) -> Result {
+ let output = self.exec_raw(self.protocol.inspect_args(id)).await?;
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ if is_not_found(&stderr) {
+ return Err(ComposeError::NotFound(id.to_string()));
+ }
+ return Err(ComposeError::BackendError {
+ code: output.status.code().unwrap_or(-1),
+ message: stderr.to_string(),
+ });
+ }
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ self.protocol
+ .parse_inspect_output(id, &stdout)
+ .ok_or_else(|| ComposeError::NotFound(id.to_string()))
+ }
+
+ async fn logs(&self, id: &str, tail: Option) -> Result {
+ let output = self.exec_raw(self.protocol.logs_args(id, tail)).await?;
+ Ok(ContainerLogs {
+ stdout: String::from_utf8_lossy(&output.stdout).to_string(),
+ stderr: String::from_utf8_lossy(&output.stderr).to_string(),
+ })
+ }
+
+ async fn exec(
+ &self,
+ id: &str,
+ cmd: &[String],
+ env: Option<&HashMap>,
+ workdir: Option<&str>,
+ ) -> Result {
+ let output = self
+ .exec_raw(self.protocol.exec_args(id, cmd, env, workdir))
+ .await?;
+ Ok(ContainerLogs {
+ stdout: String::from_utf8_lossy(&output.stdout).to_string(),
+ stderr: String::from_utf8_lossy(&output.stderr).to_string(),
+ })
+ }
+
+ async fn pull_image(&self, reference: &str) -> Result<()> {
+ self.exec_ok(self.protocol.pull_image_args(reference)).await?;
+ Ok(())
+ }
+
+ async fn list_images(&self) -> Result> {
+ let stdout = self.exec_ok(self.protocol.list_images_args()).await?;
+ Ok(self.protocol.parse_list_images_output(&stdout))
+ }
+
+ async fn remove_image(&self, reference: &str, force: bool) -> Result<()> {
+ self.exec_ok(self.protocol.remove_image_args(reference, force))
+ .await?;
+ Ok(())
+ }
+
+ async fn create_network(&self, name: &str, config: &NetworkConfig) -> Result<()> {
+ self.exec_ok(self.protocol.create_network_args(name, config))
+ .await?;
+ Ok(())
+ }
+
+ async fn remove_network(&self, name: &str) -> Result<()> {
+ let output = self
+ .exec_raw(self.protocol.remove_network_args(name))
+ .await?;
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ if is_not_found(&stderr) {
+ return Ok(());
+ }
+ return Err(ComposeError::BackendError {
+ code: output.status.code().unwrap_or(-1),
+ message: stderr.to_string(),
+ });
+ }
+ Ok(())
+ }
+
+ async fn create_volume(&self, name: &str, config: &VolumeConfig) -> Result<()> {
+ self.exec_ok(self.protocol.create_volume_args(name, config))
+ .await?;
+ Ok(())
+ }
+
+ async fn remove_volume(&self, name: &str) -> Result<()> {
+ let output = self
+ .exec_raw(self.protocol.remove_volume_args(name))
+ .await?;
+ if !output.status.success() {
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ if is_not_found(&stderr) {
+ return Ok(());
+ }
+ return Err(ComposeError::BackendError {
+ code: output.status.code().unwrap_or(-1),
+ message: stderr.to_string(),
+ });
+ }
+ Ok(())
+ }
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// 4.7 detect_backend() and probe_candidate()
+// ─────────────────────────────────────────────────────────────────────────────
+
+const PROBE_TIMEOUT_SECS: u64 = 2;
+
+/// Platform-ordered list of candidate runtime names to probe.
+fn platform_candidates() -> &'static [&'static str] {
+ #[cfg(any(target_os = "macos", target_os = "ios"))]
+ {
+ &[
+ "apple/container",
+ "orbstack",
+ "colima",
+ "rancher-desktop",
+ "podman",
+ "lima",
+ "docker",
+ ]
+ }
+ #[cfg(target_os = "linux")]
+ {
+ &["podman", "nerdctl", "docker"]
+ }
+ #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "linux")))]
+ {
+ &["podman", "nerdctl", "docker"]
+ }
+}
+
+/// Run a quick probe command with a timeout and return its stdout.
+async fn probe_run(bin: &str, args: &[&str]) -> std::result::Result {
+ use tokio::time::{timeout, Duration};
+ let fut = Command::new(bin)
+ .args(args)
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output();
+ match timeout(Duration::from_secs(PROBE_TIMEOUT_SECS), fut).await {
+ Ok(Ok(out)) => {
+ if out.status.success() {
+ Ok(String::from_utf8_lossy(&out.stdout).to_string())
+ } else {
+ Err(String::from_utf8_lossy(&out.stderr).to_string())
+ }
+ }
+ Ok(Err(e)) => Err(e.to_string()),
+ Err(_) => Err(format!("probe timed out after {}s", PROBE_TIMEOUT_SECS)),
+ }
+}
+
+/// Probe a single named runtime and return a type-erased `Box`
+/// if it is available, or a human-readable reason string if it is not.
+pub async fn probe_candidate(
+ name: &str,
+) -> std::result::Result, String> {
+ match name {
+ // ── apple/container ──────────────────────────────────────────────
+ "apple/container" => {
+ let bin = which::which("container")
+ .map_err(|_| "container binary not found on PATH".to_string())?;
+ probe_run(bin.to_str().unwrap_or("container"), &["--version"])
+ .await
+ .map_err(|e| format!("apple/container --version failed: {}", e))?;
+ Ok(Box::new(CliBackend::new(bin, AppleContainerProtocol)))
+ }
+
+ // ── orbstack ─────────────────────────────────────────────────────
+ "orbstack" => {
+ let orb_ok = which::which("orb")
+ .ok()
+ .map(|b| {
+ let b_str = b.to_string_lossy().to_string();
+ async move { probe_run(&b_str, &["--version"]).await.is_ok() }
+ });
+ let sock_ok = std::path::Path::new(
+ &shellexpand::tilde("~/.orbstack/run/docker.sock").to_string(),
+ )
+ .exists();
+ let orb_available = match orb_ok {
+ Some(fut) => fut.await,
+ None => false,
+ };
+ if orb_available || sock_ok {
+ let bin = which::which("docker")
+ .or_else(|_| which::which("orb"))
+ .map_err(|_| "orbstack: neither docker nor orb found".to_string())?;
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ } else {
+ Err("orbstack: neither `orb --version` succeeded nor socket found".into())
+ }
+ }
+
+ // ── colima ───────────────────────────────────────────────────────
+ "colima" => {
+ let bin = which::which("colima")
+ .map_err(|_| "colima not found".to_string())?;
+ let status = probe_run(bin.to_str().unwrap_or("colima"), &["status"])
+ .await
+ .map_err(|e| format!("colima status failed: {}", e))?;
+ if !status.to_lowercase().contains("running") {
+ return Err("colima is installed but not running".into());
+ }
+ let docker_bin = which::which("docker")
+ .map_err(|_| "docker CLI not found (needed for colima)".to_string())?;
+ Ok(Box::new(CliBackend::new(docker_bin, DockerProtocol)))
+ }
+
+ // ── rancher-desktop ──────────────────────────────────────────────
+ "rancher-desktop" => {
+ let bin = which::which("nerdctl")
+ .map_err(|_| "nerdctl not found".to_string())?;
+ probe_run(bin.to_str().unwrap_or("nerdctl"), &["--version"])
+ .await
+ .map_err(|e| format!("nerdctl --version failed: {}", e))?;
+ let sock = std::path::Path::new(
+ &shellexpand::tilde("~/.rd/run/containerd-shim.sock").to_string(),
+ )
+ .exists();
+ if sock {
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ } else {
+ Err("rancher-desktop: nerdctl found but containerd socket missing".into())
+ }
+ }
+
+ // ── podman ───────────────────────────────────────────────────────
+ "podman" => {
+ let bin = which::which("podman")
+ .map_err(|_| "podman not found".to_string())?;
+ probe_run(bin.to_str().unwrap_or("podman"), &["--version"])
+ .await
+ .map_err(|e| format!("podman --version failed: {}", e))?;
+
+ #[cfg(any(target_os = "macos", target_os = "ios"))]
+ {
+ let machines = probe_run(
+ bin.to_str().unwrap_or("podman"),
+ &["machine", "list", "--format", "json"],
+ )
+ .await
+ .unwrap_or_default();
+ let has_running = serde_json::from_str::>(&machines)
+ .unwrap_or_default()
+ .iter()
+ .any(|m| m.get("Running").and_then(|v| v.as_bool()).unwrap_or(false));
+ if !has_running {
+ return Err(
+ "podman: no running machine found (run `podman machine start`)".into(),
+ );
+ }
+ }
+
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ }
+
+ // ── lima ─────────────────────────────────────────────────────────
+ "lima" => {
+ let bin = which::which("limactl")
+ .map_err(|_| "limactl not found".to_string())?;
+ let list_out = probe_run(bin.to_str().unwrap_or("limactl"), &["list", "--json"])
+ .await
+ .map_err(|e| format!("limactl list --json failed: {}", e))?;
+ let instance = list_out
+ .lines()
+ .filter_map(|l| serde_json::from_str::(l).ok())
+ .find(|v| {
+ v.get("status")
+ .and_then(|s| s.as_str())
+ .map(|s| s.eq_ignore_ascii_case("running"))
+ .unwrap_or(false)
+ })
+ .and_then(|v| v.get("name").and_then(|n| n.as_str()).map(String::from))
+ .ok_or_else(|| "limactl: no running Lima instance found".to_string())?;
+ Ok(Box::new(CliBackend::new(bin, LimaProtocol::new(instance))))
+ }
+
+ // ── nerdctl (standalone) ─────────────────────────────────────────
+ "nerdctl" => {
+ let bin = which::which("nerdctl")
+ .map_err(|_| "nerdctl not found".to_string())?;
+ probe_run(bin.to_str().unwrap_or("nerdctl"), &["--version"])
+ .await
+ .map_err(|e| format!("nerdctl --version failed: {}", e))?;
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ }
+
+ // ── docker ───────────────────────────────────────────────────────
+ "docker" => {
+ let bin = which::which("docker")
+ .map_err(|_| "docker not found".to_string())?;
+ probe_run(bin.to_str().unwrap_or("docker"), &["--version"])
+ .await
+ .map_err(|e| format!("docker --version failed: {}", e))?;
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ }
+
+ other => Err(format!("unknown runtime '{}'", other)),
+ }
+}
+
+/// Detect the best available container backend for the current platform.
+///
+/// 1. If `PERRY_CONTAINER_BACKEND` is set, use that backend directly.
+/// 2. Otherwise, probe `platform_candidates()` in order with a 2s timeout each.
+/// 3. If no candidate is available, returns `Err(NoBackendFound { probed })`.
+pub async fn detect_backend() -> std::result::Result, ComposeError> {
+ use std::time::Duration;
+
+ // ── Override via env var ──────────────────────────────────────────────
+ if let Ok(override_name) = std::env::var("PERRY_CONTAINER_BACKEND") {
+ let name = override_name.trim().to_string();
+ debug!("PERRY_CONTAINER_BACKEND={}, probing directly", name);
+ return probe_candidate(&name).await.map_err(|reason| {
+ ComposeError::BackendNotAvailable {
+ name: name.clone(),
+ reason,
+ }
+ });
+ }
+
+ // ── Platform probe sequence ───────────────────────────────────────────
+ let mut probed: Vec = Vec::new();
+
+ for &candidate in platform_candidates() {
+ debug!("probing container backend: {}", candidate);
+ match tokio::time::timeout(
+ Duration::from_secs(PROBE_TIMEOUT_SECS),
+ probe_candidate(candidate),
+ )
+ .await
+ {
+ Ok(Ok(backend)) => {
+ debug!("selected container backend: {}", candidate);
+ return Ok(backend);
+ }
+ Ok(Err(reason)) => {
+ debug!("backend '{}' not available: {}", candidate, reason);
+ probed.push(BackendProbeResult {
+ name: candidate.to_string(),
+ available: false,
+ reason,
+ });
+ }
+ Err(_) => {
+ debug!("backend '{}' probe timed out", candidate);
+ probed.push(BackendProbeResult {
+ name: candidate.to_string(),
+ available: false,
+ reason: format!("probe timed out after {}s", PROBE_TIMEOUT_SECS),
+ });
+ }
+ }
+ }
+
+ Err(ComposeError::NoBackendFound { probed })
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// Legacy compatibility shims
+// ─────────────────────────────────────────────────────────────────────────────
+
+/// Legacy container status enum kept for backward compatibility with `compose.rs`.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum ContainerStatus {
+ Running,
+ Stopped,
+ NotFound,
+}
+
+impl ContainerStatus {
+ pub fn is_running(&self) -> bool {
+ matches!(self, ContainerStatus::Running)
+ }
+ pub fn exists(&self) -> bool {
+ !matches!(self, ContainerStatus::NotFound)
+ }
+}
+
+/// Legacy exec result kept for backward compatibility.
+#[derive(Debug, Clone)]
+pub struct ExecResult {
+ pub stdout: String,
+ pub stderr: String,
+ pub exit_code: i32,
+}
+
+/// Legacy `Backend` trait kept for backward compatibility with `compose.rs`.
+/// New code should use `ContainerBackend` + `CliBackend` instead.
+#[async_trait]
+pub trait Backend: Send + Sync {
+ fn name(&self) -> &'static str;
+
+ async fn build(
+ &self,
+ context: &str,
+ dockerfile: Option<&str>,
+ tag: &str,
+ args: Option<&HashMap>,
+ target: Option<&str>,
+ network: Option<&str>,
+ ) -> Result<()>;
+
+ async fn run(
+ &self,
+ image: &str,
+ name: &str,
+ ports: Option<&[String]>,
+ env: Option<&HashMap>,
+ volumes: Option<&[String]>,
+ labels: Option<&HashMap>,
+ cmd: Option<&[String]>,
+ detach: bool,
+ ) -> Result<()>;
+
+ async fn start(&self, name: &str) -> Result<()>;
+ async fn stop(&self, name: &str) -> Result<()>;
+ async fn remove(&self, name: &str, force: bool) -> Result<()>;
+ async fn inspect(&self, name: &str) -> Result;
+ async fn list(&self, label_filter: Option<&str>) -> Result>;
+ async fn logs(&self, name: &str, tail: Option, follow: bool) -> Result;
+ async fn exec(
+ &self,
+ name: &str,
+ cmd: &[String],
+ user: Option<&str>,
+ workdir: Option<&str>,
+ env: Option<&HashMap>,
+ ) -> Result;
+ async fn create_network(
+ &self,
+ name: &str,
+ driver: Option<&str>,
+ labels: Option<&HashMap>,
+ ) -> Result<()>;
+ async fn remove_network(&self, name: &str) -> Result<()>;
+ async fn create_volume(
+ &self,
+ name: &str,
+ driver: Option<&str>,
+ labels: Option<&HashMap>,
+ ) -> Result<()>;
+ async fn remove_volume(&self, name: &str) -> Result<()>;
+}
+
+/// Synchronous best-effort backend selector for legacy callers.
+/// Prefer `detect_backend().await` in async contexts.
+pub fn get_backend() -> Result> {
+ Err(ComposeError::BackendNotAvailable {
+ name: "legacy".into(),
+ reason: "use detect_backend() instead".into(),
+ })
+}
+
+/// Synchronous best-effort `ContainerBackend` selector for legacy callers.
+/// Prefer `detect_backend().await` in async contexts.
+pub fn get_container_backend() -> Result> {
+ Err(ComposeError::BackendNotAvailable {
+ name: "legacy".into(),
+ reason: "use detect_backend() instead".into(),
+ })
+}
+
+// ─────────────────────────────────────────────────────────────────────────────
+// Tests
+// ─────────────────────────────────────────────────────────────────────────────
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn dummy_spec(name: Option<&str>) -> ContainerSpec {
+ ContainerSpec {
+ image: "alpine:latest".into(),
+ name: name.map(String::from),
+ ports: Some(vec!["8080:80".into()]),
+ volumes: Some(vec!["/tmp:/data".into()]),
+ env: Some({
+ let mut m = HashMap::new();
+ m.insert("FOO".into(), "bar".into());
+ m
+ }),
+ cmd: Some(vec!["sh".into(), "-c".into(), "echo hi".into()]),
+ entrypoint: None,
+ network: Some("mynet".into()),
+ rm: Some(true),
+ }
+ }
+
+ // ── DockerProtocol ────────────────────────────────────────────────────
+
+ #[test]
+ fn docker_run_args_contains_expected_flags() {
+ let p = DockerProtocol;
+ let spec = dummy_spec(Some("mycontainer"));
+ let args = p.run_args(&spec);
+ assert!(args.contains(&"run".into()));
+ assert!(args.contains(&"--rm".into()));
+ assert!(args.contains(&"--detach".into()));
+ assert!(args.contains(&"--name".into()));
+ assert!(args.contains(&"mycontainer".into()));
+ assert!(args.contains(&"-p".into()));
+ assert!(args.contains(&"8080:80".into()));
+ assert!(args.contains(&"-v".into()));
+ assert!(args.contains(&"/tmp:/data".into()));
+ assert!(args.contains(&"-e".into()));
+ assert!(args.contains(&"FOO=bar".into()));
+ assert!(args.contains(&"--network".into()));
+ assert!(args.contains(&"mynet".into()));
+ assert!(args.contains(&"alpine:latest".into()));
+ }
+
+ #[test]
+ fn docker_stop_args_with_timeout() {
+ let p = DockerProtocol;
+ let args = p.stop_args("abc123", Some(10));
+ assert_eq!(args, vec!["stop", "-t", "10", "abc123"]);
+ }
+
+ #[test]
+ fn docker_stop_args_no_timeout() {
+ let p = DockerProtocol;
+ let args = p.stop_args("abc123", None);
+ assert_eq!(args, vec!["stop", "abc123"]);
+ }
+
+ #[test]
+ fn docker_remove_args_force() {
+ let p = DockerProtocol;
+ assert_eq!(p.remove_args("c1", true), vec!["rm", "-f", "c1"]);
+ assert_eq!(p.remove_args("c1", false), vec!["rm", "c1"]);
+ }
+
+ #[test]
+ fn docker_list_args() {
+ let p = DockerProtocol;
+ assert!(p.list_args(true).contains(&"--all".into()));
+ assert!(!p.list_args(false).contains(&"--all".into()));
+ }
+
+ #[test]
+ fn docker_parse_list_output_array() {
+ let p = DockerProtocol;
+ let json = r#"[{"ID":"abc","Names":["/myapp"],"Image":"nginx","Status":"running","Ports":["80/tcp"],"Created":"2024-01-01"}]"#;
+ let infos = p.parse_list_output(json);
+ assert_eq!(infos.len(), 1);
+ assert_eq!(infos[0].id, "abc");
+ assert_eq!(infos[0].name, "myapp");
+ }
+
+ #[test]
+ fn docker_parse_list_output_ndjson() {
+ let p = DockerProtocol;
+ let json = "{\"ID\":\"abc\",\"Names\":[\"/myapp\"],\"Image\":\"nginx\",\"Status\":\"running\",\"Ports\":[],\"Created\":\"2024-01-01\"}\n{\"ID\":\"def\",\"Names\":[\"/other\"],\"Image\":\"redis\",\"Status\":\"stopped\",\"Ports\":[],\"Created\":\"2024-01-02\"}";
+ let infos = p.parse_list_output(json);
+ assert_eq!(infos.len(), 2);
+ }
+
+ #[test]
+ fn docker_parse_inspect_output() {
+ let p = DockerProtocol;
+ let json = r#"[{"Id":"abc123","Name":"/myapp","Image":"nginx","State":{"Running":true,"Status":"running"},"Created":"2024-01-01"}]"#;
+ let info = p.parse_inspect_output("abc123", json).unwrap();
+ assert_eq!(info.status, "running");
+ assert_eq!(info.name, "myapp");
+ }
+
+ #[test]
+ fn docker_parse_images_output() {
+ let p = DockerProtocol;
+ let json = r#"[{"ID":"sha256:abc","Repository":"nginx","Tag":"latest","Size":50000000,"Created":"2024-01-01"}]"#;
+ let images = p.parse_list_images_output(json);
+ assert_eq!(images.len(), 1);
+ assert_eq!(images[0].repository, "nginx");
+ assert_eq!(images[0].size, 50_000_000);
+ }
+
+ // ── NetworkConfig / VolumeConfig args ─────────────────────────────────
+
+ #[test]
+ fn create_network_args_with_config() {
+ let p = DockerProtocol;
+ let mut labels = HashMap::new();
+ labels.insert("env".into(), "prod".into());
+ let config = NetworkConfig {
+ driver: Some("bridge".into()),
+ labels,
+ internal: true,
+ enable_ipv6: false,
+ };
+ let args = p.create_network_args("mynet", &config);
+ assert!(args.contains(&"network".into()));
+ assert!(args.contains(&"create".into()));
+ assert!(args.contains(&"--driver".into()));
+ assert!(args.contains(&"bridge".into()));
+ assert!(args.contains(&"--label".into()));
+ assert!(args.contains(&"env=prod".into()));
+ assert!(args.contains(&"--internal".into()));
+ assert!(!args.contains(&"--ipv6".into()));
+ assert!(args.last() == Some(&"mynet".into()));
+ }
+
+ #[test]
+ fn create_volume_args_with_config() {
+ let p = DockerProtocol;
+ let config = VolumeConfig {
+ driver: Some("local".into()),
+ labels: HashMap::new(),
+ };
+ let args = p.create_volume_args("myvol", &config);
+ assert!(args.contains(&"volume".into()));
+ assert!(args.contains(&"create".into()));
+ assert!(args.contains(&"--driver".into()));
+ assert!(args.contains(&"local".into()));
+ assert!(args.last() == Some(&"myvol".into()));
+ }
+
+ // ── From conversions ──────────────────────────────────────────────────
+
+ #[test]
+ fn network_config_from_compose_network() {
+ use crate::types::ListOrDict;
+ let mut cn = ComposeNetwork::default();
+ cn.driver = Some("overlay".into());
+ cn.internal = Some(true);
+ cn.enable_ipv6 = Some(true);
+ cn.labels = Some(ListOrDict::List(vec!["foo=bar".into()]));
+ let nc = NetworkConfig::from(&cn);
+ assert_eq!(nc.driver, Some("overlay".into()));
+ assert!(nc.internal);
+ assert!(nc.enable_ipv6);
+ assert_eq!(nc.labels.get("foo"), Some(&"bar".into()));
+ }
+
+ #[test]
+ fn volume_config_from_compose_volume() {
+ use crate::types::ListOrDict;
+ let mut cv = ComposeVolume::default();
+ cv.driver = Some("nfs".into());
+ cv.labels = Some(ListOrDict::List(vec!["tier=data".into()]));
+ let vc = VolumeConfig::from(&cv);
+ assert_eq!(vc.driver, Some("nfs".into()));
+ assert_eq!(vc.labels.get("tier"), Some(&"data".into()));
+ }
+
+ // ── AppleContainerProtocol ────────────────────────────────────────────
+
+ #[test]
+ fn apple_run_args_no_detach() {
+ let p = AppleContainerProtocol;
+ let spec = dummy_spec(Some("mycontainer"));
+ let args = p.run_args(&spec);
+ assert!(!args.contains(&"--detach".into()));
+ assert!(args.contains(&"--rm".into()));
+ assert!(args.contains(&"--name".into()));
+ }
+
+ #[test]
+ fn apple_protocol_name() {
+ let p = AppleContainerProtocol;
+ assert_eq!(p.protocol_name(), "apple/container");
+ }
+
+ // ── LimaProtocol ─────────────────────────────────────────────────────
+
+ #[test]
+ fn lima_subcommand_prefix() {
+ let p = LimaProtocol::new("default");
+ let prefix = p.subcommand_prefix().unwrap();
+ assert_eq!(prefix, vec!["shell", "default", "nerdctl"]);
+ }
+
+ #[test]
+ fn lima_run_args_delegates_to_docker_defaults() {
+ let lima = LimaProtocol::new("default");
+ let docker = DockerProtocol;
+ let spec = dummy_spec(None);
+ assert_eq!(lima.run_args(&spec), docker.run_args(&spec));
+ }
+
+ #[test]
+ fn lima_protocol_name() {
+ let p = LimaProtocol::new("myvm");
+ assert_eq!(p.protocol_name(), "lima");
+ }
+
+ // ── CliBackend full_args ───────────────────────────────────────────
+
+ #[test]
+ fn cli_backend_full_args_no_prefix() {
+ let backend = CliBackend::new(PathBuf::from("docker"), DockerProtocol);
+ let result = backend.full_args(vec!["ps".into(), "--all".into()]);
+ assert_eq!(result, vec!["ps", "--all"]);
+ }
+
+ #[test]
+ fn cli_backend_full_args_with_lima_prefix() {
+ let backend = CliBackend::new(PathBuf::from("limactl"), LimaProtocol::new("default"));
+ let result = backend.full_args(vec!["ps".into(), "--all".into()]);
+ assert_eq!(result, vec!["shell", "default", "nerdctl", "ps", "--all"]);
+ }
+
+ #[test]
+ fn backend_name_from_path() {
+ let backend = CliBackend::new(PathBuf::from("/usr/bin/podman"), DockerProtocol);
+ assert_eq!(backend.backend_name(), "podman");
+ }
+
+ // ── Type aliases ──────────────────────────────────────────────────────
+
+ #[test]
+ fn type_aliases_compile() {
+ let _: DockerBackend = CliBackend::new(PathBuf::from("docker"), DockerProtocol);
+ let _: AppleBackend = CliBackend::new(PathBuf::from("container"), AppleContainerProtocol);
+ let _: LimaBackend =
+ CliBackend::new(PathBuf::from("limactl"), LimaProtocol::new("default"));
+ }
+
+ // ── BackendProbeResult serialization ─────────────────────────────────
+
+ #[test]
+ fn probe_result_round_trip() {
+ let r = BackendProbeResult {
+ name: "podman".into(),
+ available: false,
+ reason: "not found".into(),
+ };
+ let json = serde_json::to_string(&r).unwrap();
+ let r2: BackendProbeResult = serde_json::from_str(&json).unwrap();
+ assert_eq!(r2.name, "podman");
+ assert!(!r2.available);
+ }
+}
diff --git a/crates/perry-container-compose/src/cli.rs b/crates/perry-container-compose/src/cli.rs
new file mode 100644
index 00000000..608856cc
--- /dev/null
+++ b/crates/perry-container-compose/src/cli.rs
@@ -0,0 +1,263 @@
+//! CLI entry point for `perry-compose` binary.
+//!
+//! clap-based CLI with all subcommands.
+
+use crate::compose::ComposeEngine;
+use crate::error::Result;
+use crate::project::ComposeProject;
+use clap::{Args, Parser, Subcommand};
+use std::path::PathBuf;
+use std::sync::Arc;
+
+/// perry-compose: Docker Compose-like experience for Apple Container / Podman
+#[derive(Parser, Debug)]
+#[command(
+ name = "perry-compose",
+ version,
+ about = "Docker Compose-like CLI for container backends, powered by Perry",
+ long_about = None
+)]
+pub struct Cli {
+ /// Path to compose file(s)
+ #[arg(short = 'f', long = "file", value_name = "FILE", global = true)]
+ pub files: Vec,
+
+ /// Project name (default: directory name)
+ #[arg(short = 'p', long = "project-name", global = true)]
+ pub project_name: Option,
+
+ /// Environment file(s)
+ #[arg(long = "env-file", value_name = "FILE", global = true)]
+ pub env_files: Vec,
+
+ #[command(subcommand)]
+ pub command: Commands,
+}
+
+#[derive(Subcommand, Debug)]
+pub enum Commands {
+ /// Start services
+ Up(UpArgs),
+ /// Stop and remove services
+ Down(DownArgs),
+ /// Start existing stopped services
+ Start(ServiceArgs),
+ /// Stop running services
+ Stop(ServiceArgs),
+ /// Restart services
+ Restart(ServiceArgs),
+ /// List service status
+ Ps(PsArgs),
+ /// View output from containers
+ Logs(LogsArgs),
+ /// Execute a command in a running service
+ Exec(ExecArgs),
+ /// Validate and view the Compose file
+ Config(ConfigArgs),
+}
+
+#[derive(Args, Debug)]
+pub struct UpArgs {
+ #[arg(short = 'd', long = "detach")]
+ pub detach: bool,
+ #[arg(long = "build")]
+ pub build: bool,
+ #[arg(long = "remove-orphans")]
+ pub remove_orphans: bool,
+ pub services: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct DownArgs {
+ #[arg(short = 'v', long = "volumes")]
+ pub volumes: bool,
+ #[arg(long = "remove-orphans")]
+ pub remove_orphans: bool,
+ pub services: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct ServiceArgs {
+ pub services: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct PsArgs {
+ #[arg(short = 'a', long = "all")]
+ pub all: bool,
+ pub services: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct LogsArgs {
+ #[arg(short = 'f', long = "follow")]
+ pub follow: bool,
+ #[arg(long = "tail")]
+ pub tail: Option,
+ #[arg(short = 't', long = "timestamps")]
+ pub timestamps: bool,
+ pub services: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct ExecArgs {
+ pub service: String,
+ pub cmd: Vec,
+ #[arg(short = 'u', long = "user")]
+ pub user: Option,
+ #[arg(short = 'w', long = "workdir")]
+ pub workdir: Option,
+ #[arg(short = 'e', long = "env")]
+ pub env: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct ConfigArgs {
+ #[arg(long = "format", default_value = "yaml")]
+ pub format: String,
+ #[arg(long = "resolve-image-digests")]
+ pub resolve: bool,
+}
+
+// ============ Command dispatch ============
+
+pub async fn run(cli: Cli) -> Result<()> {
+ let config = crate::config::ProjectConfig::new(
+ cli.files.clone(),
+ cli.project_name.clone(),
+ cli.env_files.clone(),
+ );
+ let project = ComposeProject::load(&config)?;
+ let backend: Arc =
+ Arc::from(crate::backend::detect_backend().await?);
+ let engine = Arc::new(ComposeEngine::new(
+ project.spec.clone(),
+ project.project_name.clone(),
+ backend,
+ ));
+
+ match cli.command {
+ Commands::Up(args) => {
+ engine
+ .up(&args.services, args.detach, args.build, args.remove_orphans)
+ .await?;
+ }
+
+ Commands::Down(args) => {
+ engine.down(args.volumes, args.remove_orphans).await?;
+ }
+
+ Commands::Start(args) => {
+ engine.start(&args.services).await?;
+ }
+
+ Commands::Stop(args) => {
+ engine.stop(&args.services).await?;
+ }
+
+ Commands::Restart(args) => {
+ engine.restart(&args.services).await?;
+ }
+
+ Commands::Ps(_args) => {
+ let infos = engine.ps().await?;
+ print_ps_table(&infos);
+ }
+
+ Commands::Logs(args) => {
+ let service = args.services.first().map(|s| s.as_str());
+ let logs = engine.logs(service, args.tail).await?;
+ if !logs.stdout.is_empty() {
+ print!("{}", logs.stdout);
+ }
+ if !logs.stderr.is_empty() {
+ eprint!("{}", logs.stderr);
+ }
+ }
+
+ Commands::Exec(args) => {
+ let env: std::collections::HashMap = args
+ .env
+ .iter()
+ .filter_map(|e| {
+ let mut parts = e.splitn(2, '=');
+ let k = parts.next()?.to_owned();
+ let v = parts.next().unwrap_or("").to_owned();
+ Some((k, v))
+ })
+ .collect();
+
+ let cmd = args.cmd.clone();
+ if args.user.is_some() || args.workdir.is_some() || !env.is_empty() {
+ // Use backend directly for user/workdir/env support
+ let svc = engine
+ .spec
+ .services
+ .get(&args.service)
+ .ok_or_else(|| crate::error::ComposeError::NotFound(args.service.clone()))?;
+ let container_name =
+ crate::service::service_container_name(svc, &args.service);
+
+ let result = engine
+ .backend
+ .exec(
+ &container_name,
+ &cmd,
+ if env.is_empty() { None } else { Some(&env) },
+ args.workdir.as_deref(),
+ )
+ .await?;
+
+ print!("{}", result.stdout);
+ eprint!("{}", result.stderr);
+ } else {
+ let result = engine.exec(&args.service, &cmd).await?;
+ print!("{}", result.stdout);
+ eprint!("{}", result.stderr);
+ }
+ }
+
+ Commands::Config(args) => {
+ let yaml = engine.config()?;
+ if args.format == "json" {
+ let value: serde_yaml::Value = serde_yaml::from_str(&yaml)?;
+ let json = serde_json::to_string_pretty(&value)?;
+ println!("{}", json);
+ } else {
+ println!("{}", yaml);
+ }
+ }
+ }
+
+ Ok(())
+}
+
+fn print_ps_table(infos: &[crate::types::ContainerInfo]) {
+ let col_w_svc = 24usize;
+ let col_w_status = 12usize;
+ let col_w_container = 36usize;
+
+ println!(
+ "{:>>,
+> = once_cell::sync::Lazy::new(|| std::sync::Mutex::new(IndexMap::new()));
+
+/// Next available stack ID.
+static NEXT_STACK_ID: AtomicU64 = AtomicU64::new(1);
+
+/// The compose orchestration engine.
+pub struct ComposeEngine {
+ pub spec: ComposeSpec,
+ pub project_name: String,
+ pub backend: Arc,
+}
+
+impl ComposeEngine {
+ // ── 8.2 Constructor ──────────────────────────────────────────────────
+
+ /// Create a new `ComposeEngine`.
+ pub fn new(
+ spec: ComposeSpec,
+ project_name: String,
+ backend: Arc,
+ ) -> Self {
+ ComposeEngine {
+ spec,
+ project_name,
+ backend,
+ }
+ }
+
+ /// Register this engine in the global registry and return a handle.
+ fn register(self: &Arc) -> ComposeHandle {
+ let stack_id = NEXT_STACK_ID.fetch_add(1, Ordering::SeqCst);
+ let services: Vec = self.spec.services.keys().cloned().collect();
+ let handle = ComposeHandle {
+ stack_id,
+ project_name: self.project_name.clone(),
+ services,
+ };
+ COMPOSE_ENGINES
+ .lock()
+ .unwrap()
+ .insert(stack_id, Arc::clone(self));
+ handle
+ }
+
+ /// Look up an engine by stack ID.
+ pub fn get_engine(stack_id: u64) -> Option> {
+ COMPOSE_ENGINES.lock().unwrap().get(&stack_id).cloned()
+ }
+
+ /// Remove an engine from the registry.
+ pub fn unregister(stack_id: u64) {
+ COMPOSE_ENGINES.lock().unwrap().shift_remove(&stack_id);
+ }
+
+ // ── 8.3 up ───────────────────────────────────────────────────────────
+
+ /// Bring up services in dependency order.
+ ///
+ /// 1. Creates all networks (skipping external ones).
+ /// 2. Creates all named volumes (skipping external ones).
+ /// 3. Starts services in `resolve_startup_order()` order.
+ /// 4. On any failure: rolls back all previously started containers in
+ /// reverse order, removes created networks and volumes, then returns
+ /// `ComposeError::ServiceStartupFailed`.
+ pub async fn up(
+ self: &Arc,
+ services: &[String],
+ _detach: bool,
+ build: bool,
+ _remove_orphans: bool,
+ ) -> Result {
+ let order = resolve_startup_order(&self.spec)?;
+
+ // Filter to target services (preserve dependency order)
+ let target: Vec = if services.is_empty() {
+ order.clone()
+ } else {
+ order
+ .into_iter()
+ .filter(|s| services.contains(s))
+ .collect()
+ };
+
+ // ── 1. Create networks ────────────────────────────────────────────
+ let mut created_networks: Vec = Vec::new();
+ if let Some(networks) = &self.spec.networks {
+ for (net_name, net_config_opt) in networks {
+ let external = net_config_opt
+ .as_ref()
+ .map_or(false, |c| c.external.unwrap_or(false));
+ if external {
+ continue;
+ }
+ let resolved_name = net_config_opt
+ .as_ref()
+ .and_then(|c| c.name.as_deref())
+ .unwrap_or(net_name.as_str())
+ .to_string();
+ let config = net_config_opt
+ .as_ref()
+ .map(NetworkConfig::from)
+ .unwrap_or_default();
+ tracing::info!("Creating network '{}'…", resolved_name);
+ if let Err(e) = self.backend.create_network(&resolved_name, &config).await {
+ for n in created_networks.iter().rev() {
+ let _ = self.backend.remove_network(n).await;
+ }
+ return Err(ComposeError::ServiceStartupFailed {
+ service: format!("network/{}", net_name),
+ message: e.to_string(),
+ });
+ }
+ created_networks.push(resolved_name);
+ }
+ }
+
+ // ── 2. Create volumes ─────────────────────────────────────────────
+ let mut created_volumes: Vec = Vec::new();
+ if let Some(volumes) = &self.spec.volumes {
+ for (vol_name, vol_config_opt) in volumes {
+ let external = vol_config_opt
+ .as_ref()
+ .map_or(false, |c| c.external.unwrap_or(false));
+ if external {
+ continue;
+ }
+ let resolved_name = vol_config_opt
+ .as_ref()
+ .and_then(|c| c.name.as_deref())
+ .unwrap_or(vol_name.as_str())
+ .to_string();
+ let config = vol_config_opt
+ .as_ref()
+ .map(VolumeConfig::from)
+ .unwrap_or_default();
+ tracing::info!("Creating volume '{}'…", resolved_name);
+ if let Err(e) = self.backend.create_volume(&resolved_name, &config).await {
+ for v in created_volumes.iter().rev() {
+ let _ = self.backend.remove_volume(v).await;
+ }
+ for n in created_networks.iter().rev() {
+ let _ = self.backend.remove_network(n).await;
+ }
+ return Err(ComposeError::ServiceStartupFailed {
+ service: format!("volume/{}", vol_name),
+ message: e.to_string(),
+ });
+ }
+ created_volumes.push(resolved_name);
+ }
+ }
+
+ // ── 3. Start services in dependency order ─────────────────────────
+ let mut started_containers: Vec = Vec::new();
+
+ for svc_name in &target {
+ let svc = self
+ .spec
+ .services
+ .get(svc_name)
+ .ok_or_else(|| ComposeError::NotFound(svc_name.clone()))?;
+
+ let container_name = service::service_container_name(svc, svc_name);
+
+ match self.backend.inspect(&container_name).await {
+ Ok(info) if info.status.to_lowercase().contains("running") => {
+ tracing::debug!("Service '{}' already running", svc_name);
+ continue;
+ }
+ Ok(_) => {
+ // Exists but stopped — start it
+ tracing::info!("Starting existing container for '{}'…", svc_name);
+ if let Err(e) = self.backend.start(&container_name).await {
+ self.rollback_startup(
+ &started_containers,
+ &created_networks,
+ &created_volumes,
+ )
+ .await;
+ return Err(ComposeError::ServiceStartupFailed {
+ service: svc_name.clone(),
+ message: e.to_string(),
+ });
+ }
+ started_containers.push(container_name);
+ continue;
+ }
+ Err(ComposeError::NotFound(_)) => {
+ // Container doesn't exist — fall through to create it
+ }
+ Err(e) => {
+ self.rollback_startup(
+ &started_containers,
+ &created_networks,
+ &created_volumes,
+ )
+ .await;
+ return Err(ComposeError::ServiceStartupFailed {
+ service: svc_name.clone(),
+ message: e.to_string(),
+ });
+ }
+ }
+
+ // Optionally pull/build image
+ if build && svc.needs_build() {
+ let tag = svc.image_ref(svc_name);
+ tracing::info!("Pulling/building image '{}'…", tag);
+ if let Err(e) = self.backend.pull_image(&tag).await {
+ tracing::warn!("Could not pull '{}': {}", tag, e);
+ }
+ }
+
+ // Build ContainerSpec from ComposeService
+ let image = svc.image_ref(svc_name);
+ let env = svc.resolved_env();
+ let ports = svc.port_strings();
+ let vols = svc.volume_strings();
+ let cmd = svc.command_list();
+
+ let network = svc
+ .networks
+ .as_ref()
+ .and_then(|n| n.names().into_iter().next());
+
+ let spec = crate::types::ContainerSpec {
+ image,
+ name: Some(container_name.clone()),
+ ports: if ports.is_empty() { None } else { Some(ports) },
+ volumes: if vols.is_empty() { None } else { Some(vols) },
+ env: if env.is_empty() { None } else { Some(env) },
+ cmd,
+ entrypoint: None,
+ network,
+ rm: Some(false),
+ };
+
+ tracing::info!("Starting service '{}'…", svc_name);
+ if let Err(e) = self.backend.run(&spec).await {
+ self.rollback_startup(
+ &started_containers,
+ &created_networks,
+ &created_volumes,
+ )
+ .await;
+ return Err(ComposeError::ServiceStartupFailed {
+ service: svc_name.clone(),
+ message: e.to_string(),
+ });
+ }
+ started_containers.push(container_name);
+ }
+
+ Ok(self.register())
+ }
+
+ /// Roll back a failed `up()` by stopping/removing started containers,
+ /// then removing created networks and volumes.
+ async fn rollback_startup(
+ &self,
+ started_containers: &[String],
+ created_networks: &[String],
+ created_volumes: &[String],
+ ) {
+ for container in started_containers.iter().rev() {
+ let _ = self.backend.stop(container, None).await;
+ let _ = self.backend.remove(container, true).await;
+ }
+ for net in created_networks.iter().rev() {
+ let _ = self.backend.remove_network(net).await;
+ }
+ for vol in created_volumes.iter().rev() {
+ let _ = self.backend.remove_volume(vol).await;
+ }
+ }
+
+ // ── 8.4 down ─────────────────────────────────────────────────────────
+
+ /// Stop and remove all service containers; remove networks; optionally
+ /// remove named volumes.
+ pub async fn down(&self, volumes: bool, _remove_orphans: bool) -> Result<()> {
+ let mut order = resolve_startup_order(&self.spec)?;
+ order.reverse(); // Tear down in reverse dependency order
+
+ // 1. Stop and remove containers
+ for svc_name in &order {
+ let svc = match self.spec.services.get(svc_name) {
+ Some(s) => s,
+ None => continue,
+ };
+ let container_name = service::service_container_name(svc, svc_name);
+
+ match self.backend.inspect(&container_name).await {
+ Ok(info) => {
+ if info.status.to_lowercase().contains("running") {
+ let _ = self.backend.stop(&container_name, None).await;
+ }
+ let _ = self.backend.remove(&container_name, true).await;
+ }
+ Err(ComposeError::NotFound(_)) => {}
+ Err(e) => {
+ tracing::warn!("Error inspecting '{}' during down: {}", container_name, e);
+ }
+ }
+ }
+
+ // 2. Remove networks (non-external, idempotent)
+ if let Some(networks) = &self.spec.networks {
+ for (net_name, net_config_opt) in networks {
+ let external = net_config_opt
+ .as_ref()
+ .map_or(false, |c| c.external.unwrap_or(false));
+ if external {
+ continue;
+ }
+ let resolved_name = net_config_opt
+ .as_ref()
+ .and_then(|c| c.name.as_deref())
+ .unwrap_or(net_name.as_str());
+ let _ = self.backend.remove_network(resolved_name).await;
+ }
+ }
+
+ // 3. Remove volumes (if requested, non-external)
+ if volumes {
+ if let Some(vols) = &self.spec.volumes {
+ for (vol_name, vol_config_opt) in vols {
+ let external = vol_config_opt
+ .as_ref()
+ .map_or(false, |c| c.external.unwrap_or(false));
+ if external {
+ continue;
+ }
+ let resolved_name = vol_config_opt
+ .as_ref()
+ .and_then(|c| c.name.as_deref())
+ .unwrap_or(vol_name.as_str());
+ let _ = self.backend.remove_volume(resolved_name).await;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ // ── 8.5 ps / logs / exec ─────────────────────────────────────────────
+
+ /// List the status of all service containers.
+ pub async fn ps(&self) -> Result> {
+ let mut results = Vec::new();
+
+ for (svc_name, svc) in &self.spec.services {
+ let container_name = service::service_container_name(svc, svc_name);
+ match self.backend.inspect(&container_name).await {
+ Ok(info) => results.push(info),
+ Err(ComposeError::NotFound(_)) => {
+ results.push(ContainerInfo {
+ id: container_name.clone(),
+ name: container_name,
+ image: svc.image_ref(svc_name),
+ status: "not found".to_string(),
+ ports: svc.port_strings(),
+ created: String::new(),
+ });
+ }
+ Err(e) => return Err(e),
+ }
+ }
+
+ results.sort_by(|a, b| a.name.cmp(&b.name));
+ Ok(results)
+ }
+
+ /// Get logs from a service (or all services if `service` is `None`).
+ pub async fn logs(
+ &self,
+ service: Option<&str>,
+ tail: Option,
+ ) -> Result {
+ let service_names: Vec = match service {
+ Some(s) => vec![s.to_string()],
+ None => self.spec.services.keys().cloned().collect(),
+ };
+
+ let mut combined_stdout = String::new();
+ let mut combined_stderr = String::new();
+ let multi = service_names.len() > 1;
+
+ for svc_name in &service_names {
+ let svc = self
+ .spec
+ .services
+ .get(svc_name)
+ .ok_or_else(|| ComposeError::NotFound(svc_name.clone()))?;
+ let container_name = service::service_container_name(svc, svc_name);
+ let logs = self.backend.logs(&container_name, tail).await?;
+ if multi {
+ for line in logs.stdout.lines() {
+ combined_stdout.push_str(&format!("{} | {}\n", svc_name, line));
+ }
+ for line in logs.stderr.lines() {
+ combined_stderr.push_str(&format!("{} | {}\n", svc_name, line));
+ }
+ } else {
+ combined_stdout = logs.stdout;
+ combined_stderr = logs.stderr;
+ }
+ }
+
+ Ok(ContainerLogs {
+ stdout: combined_stdout,
+ stderr: combined_stderr,
+ })
+ }
+
+ /// Execute a command in a running service container.
+ pub async fn exec(&self, service: &str, cmd: &[String]) -> Result {
+ let svc = self
+ .spec
+ .services
+ .get(service)
+ .ok_or_else(|| ComposeError::NotFound(service.to_owned()))?;
+
+ let container_name = service::service_container_name(svc, service);
+
+ match self.backend.inspect(&container_name).await {
+ Ok(info) if !info.status.to_lowercase().contains("running") => {
+ return Err(ComposeError::ServiceStartupFailed {
+ service: service.to_owned(),
+ message: format!("container '{}' is not running", container_name),
+ });
+ }
+ Err(ComposeError::NotFound(_)) => {
+ return Err(ComposeError::NotFound(format!(
+ "service '{}' container not found",
+ service
+ )));
+ }
+ Err(e) => return Err(e),
+ Ok(_) => {}
+ }
+
+ self.backend.exec(&container_name, cmd, None, None).await
+ }
+
+ // ── 8.6 start / stop / restart ───────────────────────────────────────
+
+ /// Start existing stopped service containers.
+ pub async fn start(&self, services: &[String]) -> Result<()> {
+ let target: Vec = if services.is_empty() {
+ self.spec.services.keys().cloned().collect()
+ } else {
+ services.to_vec()
+ };
+
+ for svc_name in &target {
+ let svc = self
+ .spec
+ .services
+ .get(svc_name)
+ .ok_or_else(|| ComposeError::NotFound(svc_name.clone()))?;
+ let container_name = service::service_container_name(svc, svc_name);
+ self.backend.start(&container_name).await?;
+ }
+
+ Ok(())
+ }
+
+ /// Stop running service containers.
+ pub async fn stop(&self, services: &[String]) -> Result<()> {
+ let target: Vec = if services.is_empty() {
+ self.spec.services.keys().cloned().collect()
+ } else {
+ services.to_vec()
+ };
+
+ for svc_name in &target {
+ let svc = self
+ .spec
+ .services
+ .get(svc_name)
+ .ok_or_else(|| ComposeError::NotFound(svc_name.clone()))?;
+ let container_name = service::service_container_name(svc, svc_name);
+ self.backend.stop(&container_name, None).await?;
+ }
+
+ Ok(())
+ }
+
+ /// Restart service containers (stop then start).
+ pub async fn restart(&self, services: &[String]) -> Result<()> {
+ self.stop(services).await?;
+ self.start(services).await
+ }
+
+ /// Validate and return the resolved compose configuration as YAML.
+ pub fn config(&self) -> Result {
+ self.spec.to_yaml()
+ }
+}
+
+// ── 8.1 Dependency resolution (Kahn's algorithm) ─────────────────────────────
+
+/// Resolve the startup order of services using Kahn's algorithm (BFS topological sort).
+///
+/// Returns services in dependency order (dependencies first). If a cycle is
+/// detected, returns `ComposeError::DependencyCycle` listing all services in
+/// the cycle. Zero-in-degree services are sorted alphabetically for determinism.
+pub fn resolve_startup_order(spec: &ComposeSpec) -> Result> {
+ // Edge direction: if A depends_on B, then B → A (B must start before A).
+ // in_degree[A] = number of services A depends on.
+ let mut in_degree: IndexMap = IndexMap::new();
+ // dependents[B] = list of services that must start after B
+ let mut dependents: IndexMap> = IndexMap::new();
+
+ for name in spec.services.keys() {
+ in_degree.insert(name.clone(), 0);
+ dependents.insert(name.clone(), Vec::new());
+ }
+
+ for (name, service) in &spec.services {
+ if let Some(deps) = &service.depends_on {
+ for dep in deps.service_names() {
+ if !spec.services.contains_key(&dep) {
+ return Err(ComposeError::ValidationError {
+ message: format!(
+ "Service '{}' depends on '{}' which is not defined",
+ name, dep
+ ),
+ });
+ }
+ // A depends on dep → in_degree[A] += 1, dependents[dep] gets A
+ *in_degree.get_mut(name).unwrap() += 1;
+ dependents.get_mut(&dep).unwrap().push(name.clone());
+ }
+ }
+ }
+
+ // Seed BFS queue with zero-in-degree services (sorted for determinism)
+ let mut queue: std::collections::BTreeSet = in_degree
+ .iter()
+ .filter(|(_, °)| deg == 0)
+ .map(|(name, _)| name.clone())
+ .collect();
+
+ let mut order: Vec = Vec::with_capacity(spec.services.len());
+ while let Some(service) = queue.pop_first() {
+ order.push(service.clone());
+ for dependent in dependents.get(&service).unwrap_or(&Vec::new()).clone() {
+ let deg = in_degree.get_mut(&dependent).unwrap();
+ *deg -= 1;
+ if *deg == 0 {
+ queue.insert(dependent);
+ }
+ }
+ }
+
+ if order.len() != spec.services.len() {
+ let cycle_services: Vec = in_degree
+ .iter()
+ .filter(|(_, °)| deg > 0)
+ .map(|(name, _)| name.clone())
+ .collect();
+ return Err(ComposeError::DependencyCycle {
+ services: cycle_services,
+ });
+ }
+
+ Ok(order)
+}
+
+// ── Tests ─────────────────────────────────────────────────────────────────────
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::types::ComposeService;
+
+ fn make_compose(edges: &[(&str, &[&str])]) -> ComposeSpec {
+ let mut services = IndexMap::new();
+ for (name, deps) in edges {
+ let mut svc = ComposeService::default();
+ if !deps.is_empty() {
+ svc.depends_on = Some(crate::types::DependsOnSpec::List(
+ deps.iter().map(|s| s.to_string()).collect(),
+ ));
+ }
+ services.insert(name.to_string(), svc);
+ }
+ ComposeSpec {
+ services,
+ ..Default::default()
+ }
+ }
+
+ #[test]
+ fn test_simple_chain() {
+ let compose = make_compose(&[("web", &["db"]), ("db", &[]), ("proxy", &["web"])]);
+ let order = resolve_startup_order(&compose).unwrap();
+ let pos = |name: &str| order.iter().position(|s| s == name).unwrap();
+ assert!(pos("db") < pos("web"), "db must precede web");
+ assert!(pos("web") < pos("proxy"), "web must precede proxy");
+ }
+
+ #[test]
+ fn test_no_deps() {
+ let compose = make_compose(&[("a", &[]), ("b", &[]), ("c", &[])]);
+ let order = resolve_startup_order(&compose).unwrap();
+ assert_eq!(order.len(), 3);
+ }
+
+ #[test]
+ fn test_diamond_dependency() {
+ let compose = make_compose(&[
+ ("a", &[]),
+ ("b", &["a"]),
+ ("c", &["a"]),
+ ("d", &["b", "c"]),
+ ]);
+ let order = resolve_startup_order(&compose).unwrap();
+ let pos = |name: &str| order.iter().position(|s| s == name).unwrap();
+ assert!(pos("a") < pos("b"));
+ assert!(pos("a") < pos("c"));
+ assert!(pos("b") < pos("d"));
+ assert!(pos("c") < pos("d"));
+ }
+
+ #[test]
+ fn test_cycle_detected() {
+ let compose = make_compose(&[("a", &["b"]), ("b", &["a"])]);
+ let result = resolve_startup_order(&compose);
+ assert!(result.is_err());
+ assert!(matches!(
+ result.unwrap_err(),
+ ComposeError::DependencyCycle { .. }
+ ));
+ }
+
+ #[test]
+ fn test_cycle_lists_all_services() {
+ // a -> b -> c -> a (3-node cycle)
+ let compose = make_compose(&[("a", &["c"]), ("b", &["a"]), ("c", &["b"])]);
+ let result = resolve_startup_order(&compose);
+ assert!(result.is_err());
+ if let ComposeError::DependencyCycle { services } = result.unwrap_err() {
+ assert_eq!(services.len(), 3);
+ assert!(services.contains(&"a".to_string()));
+ assert!(services.contains(&"b".to_string()));
+ assert!(services.contains(&"c".to_string()));
+ }
+ }
+
+ #[test]
+ fn test_invalid_dependency() {
+ let compose = make_compose(&[("web", &["nonexistent"])]);
+ let result = resolve_startup_order(&compose);
+ assert!(result.is_err());
+ assert!(matches!(
+ result.unwrap_err(),
+ ComposeError::ValidationError { .. }
+ ));
+ }
+
+ #[test]
+ fn test_deterministic_order() {
+ // Services with no deps should be sorted alphabetically
+ let compose = make_compose(&[("c", &[]), ("a", &[]), ("b", &[])]);
+ let order = resolve_startup_order(&compose).unwrap();
+ assert_eq!(order, vec!["a", "b", "c"]);
+ }
+
+ #[test]
+ fn test_isolated_nodes() {
+ // Mix of isolated and chained services
+ let compose = make_compose(&[
+ ("z", &[]),
+ ("a", &[]),
+ ("m", &["a"]),
+ ]);
+ let order = resolve_startup_order(&compose).unwrap();
+ let pos = |name: &str| order.iter().position(|s| s == name).unwrap();
+ assert!(pos("a") < pos("m"), "a must precede m");
+ // z and a are both zero-in-degree, sorted alphabetically
+ assert!(pos("a") < pos("z") || pos("z") < pos("m"),
+ "isolated nodes appear before their dependents");
+ }
+}
diff --git a/crates/perry-container-compose/src/config.rs b/crates/perry-container-compose/src/config.rs
new file mode 100644
index 00000000..d5e3857c
--- /dev/null
+++ b/crates/perry-container-compose/src/config.rs
@@ -0,0 +1,266 @@
+//! Project configuration and environment variable resolution.
+//!
+//! Implements the priority chain for compose file discovery and project naming
+//! as defined in the compose-spec and requirements 9.1–9.8.
+
+use crate::error::{ComposeError, Result};
+use std::path::{Path, PathBuf};
+
+/// Default compose file names to search for, in priority order (req 9.6).
+pub const DEFAULT_COMPOSE_FILES: &[&str] = &[
+ "compose.yaml",
+ "compose.yml",
+ "docker-compose.yaml",
+ "docker-compose.yml",
+];
+
+/// Project-level configuration holding raw CLI inputs for file paths, project name, and env files.
+///
+/// This is the *project-level* config struct — distinct from the compose-spec
+/// `ComposeConfig` type in `types.rs` which describes a top-level `configs:` entry.
+///
+/// Use [`ProjectConfig::new`] to construct from CLI args, then pass to
+/// [`crate::project::ComposeProject::load`] which runs the full resolution chain.
+#[derive(Debug, Clone)]
+pub struct ProjectConfig {
+ /// Compose file paths from `-f` flags (empty = use env var / default discovery).
+ pub compose_files: Vec,
+ /// Project name from `-p` flag (`None` = use env var / directory name).
+ pub project_name: Option,
+ /// Extra environment file paths from `--env-file` flags.
+ pub env_files: Vec,
+}
+
+impl ProjectConfig {
+ /// Create a `ProjectConfig` from raw CLI inputs.
+ ///
+ /// No resolution is performed here; call [`crate::project::ComposeProject::load`]
+ /// to run the full priority chain (req 9.1–9.8).
+ pub fn new(
+ compose_files: Vec,
+ project_name: Option,
+ env_files: Vec,
+ ) -> Self {
+ ProjectConfig {
+ compose_files,
+ project_name,
+ env_files,
+ }
+ }
+}
+
+/// Resolve the project name.
+///
+/// Priority (req 9.3, 9.4, 9.7):
+/// 1. CLI `-p` / `--project-name` flag
+/// 2. `COMPOSE_PROJECT_NAME` environment variable
+/// 3. Directory name of the directory containing the primary compose file
+pub fn resolve_project_name(cli_name: Option<&str>, project_dir: &Path) -> String {
+ if let Some(name) = cli_name {
+ if !name.is_empty() {
+ return name.to_string();
+ }
+ }
+
+ if let Ok(name) = std::env::var("COMPOSE_PROJECT_NAME") {
+ if !name.is_empty() {
+ return name;
+ }
+ }
+
+ // Fall back to the directory name (req 9.7).
+ project_dir
+ .file_name()
+ .map(|n| n.to_string_lossy().into_owned())
+ .unwrap_or_else(|| "project".to_string())
+}
+
+/// Resolve compose file paths.
+///
+/// Priority (req 9.1, 9.5, 9.6):
+/// 1. CLI `-f` / `--file` flags — returned as-is; missing files produce an error (req 9.8)
+/// 2. `COMPOSE_FILE` environment variable — colon-separated list of paths; missing files error
+/// 3. Default file search in CWD: `compose.yaml`, `compose.yml`, `docker-compose.yaml`,
+/// `docker-compose.yml` (in that order)
+pub fn resolve_compose_files(cli_files: &[PathBuf]) -> Result> {
+ if !cli_files.is_empty() {
+ // Validate every explicitly-specified file exists (req 9.8).
+ for path in cli_files {
+ if !path.exists() {
+ return Err(ComposeError::FileNotFound {
+ path: path.display().to_string(),
+ });
+ }
+ }
+ return Ok(cli_files.to_vec());
+ }
+
+ if let Ok(compose_file_env) = std::env::var("COMPOSE_FILE") {
+ if !compose_file_env.is_empty() {
+ // The compose-spec uses `:` on POSIX and `;` on Windows (req 9.5).
+ #[cfg(target_os = "windows")]
+ let separator = ";";
+ #[cfg(not(target_os = "windows"))]
+ let separator = ":";
+
+ let paths: Vec = compose_file_env
+ .split(separator)
+ .filter(|s| !s.is_empty())
+ .map(PathBuf::from)
+ .collect();
+
+ // Validate every path from the env var (req 9.8).
+ for path in &paths {
+ if !path.exists() {
+ return Err(ComposeError::FileNotFound {
+ path: path.display().to_string(),
+ });
+ }
+ }
+
+ if !paths.is_empty() {
+ return Ok(paths);
+ }
+ }
+ }
+
+ // Fall back to searching CWD for a default compose file (req 9.6).
+ let cwd = std::env::current_dir()?;
+ find_default_compose_file(&cwd)
+}
+
+/// Search `dir` for the first default compose file that exists (req 9.6).
+///
+/// Returns `Err(ComposeError::FileNotFound)` if none are found.
+pub fn find_default_compose_file(dir: &Path) -> Result> {
+ for name in DEFAULT_COMPOSE_FILES {
+ let candidate = dir.join(name);
+ if candidate.exists() {
+ return Ok(vec![candidate]);
+ }
+ }
+ Err(ComposeError::FileNotFound {
+ path: format!(
+ "No compose file found in '{}' (tried: {})",
+ dir.display(),
+ DEFAULT_COMPOSE_FILES.join(", ")
+ ),
+ })
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::fs;
+
+ fn make_temp_dir(suffix: &str) -> PathBuf {
+ let dir = std::env::temp_dir().join(format!("perry-config-test-{suffix}"));
+ fs::create_dir_all(&dir).expect("create temp dir");
+ dir
+ }
+
+ // ── resolve_project_name ──────────────────────────────────────────────────
+
+ #[test]
+ fn test_project_name_cli_takes_priority() {
+ let dir = make_temp_dir("cli-priority");
+ let name = resolve_project_name(Some("explicit-name"), &dir);
+ assert_eq!(name, "explicit-name");
+ }
+
+ #[test]
+ fn test_project_name_env_var_fallback() {
+ let dir = make_temp_dir("env-fallback");
+ // Temporarily set the env var; restore afterwards.
+ std::env::set_var("COMPOSE_PROJECT_NAME", "env-project");
+ let name = resolve_project_name(None, &dir);
+ std::env::remove_var("COMPOSE_PROJECT_NAME");
+ assert_eq!(name, "env-project");
+ }
+
+ #[test]
+ fn test_project_name_dir_fallback() {
+ // Ensure env var is not set for this test.
+ std::env::remove_var("COMPOSE_PROJECT_NAME");
+ let dir = make_temp_dir("dir-fallback");
+ let name = resolve_project_name(None, &dir);
+ assert_eq!(name, "perry-config-test-dir-fallback");
+ }
+
+ #[test]
+ fn test_project_name_empty_cli_falls_through_to_env() {
+ let dir = make_temp_dir("empty-cli");
+ std::env::set_var("COMPOSE_PROJECT_NAME", "from-env");
+ let name = resolve_project_name(Some(""), &dir);
+ std::env::remove_var("COMPOSE_PROJECT_NAME");
+ assert_eq!(name, "from-env");
+ }
+
+ // ── resolve_compose_files ─────────────────────────────────────────────────
+
+ #[test]
+ fn test_cli_files_returned_directly() {
+ let dir = make_temp_dir("cli-files");
+ let file = dir.join("compose.yaml");
+ fs::write(&file, "services: {}").unwrap();
+
+ let result = resolve_compose_files(&[file.clone()]).unwrap();
+ assert_eq!(result, vec![file]);
+ }
+
+ #[test]
+ fn test_cli_file_missing_returns_error() {
+ let missing = PathBuf::from("/nonexistent/path/compose.yaml");
+ let err = resolve_compose_files(&[missing.clone()]).unwrap_err();
+ match err {
+ ComposeError::FileNotFound { path } => {
+ assert!(path.contains("nonexistent"));
+ }
+ other => panic!("expected FileNotFound, got {other:?}"),
+ }
+ }
+
+ #[test]
+ fn test_default_file_discovery_compose_yaml() {
+ let dir = make_temp_dir("default-discovery");
+ let file = dir.join("compose.yaml");
+ fs::write(&file, "services: {}").unwrap();
+
+ // Use find_default_compose_file directly to avoid set_current_dir races.
+ let result = find_default_compose_file(&dir).unwrap();
+ assert_eq!(result.len(), 1);
+ assert_eq!(result[0].file_name().unwrap(), "compose.yaml");
+ }
+
+ #[test]
+ fn test_default_file_discovery_docker_compose_yml_fallback() {
+ let dir = make_temp_dir("docker-compose-fallback");
+ let file = dir.join("docker-compose.yml");
+ fs::write(&file, "services: {}").unwrap();
+
+ let result = find_default_compose_file(&dir).unwrap();
+ assert_eq!(result.len(), 1);
+ assert_eq!(result[0].file_name().unwrap(), "docker-compose.yml");
+ }
+
+ #[test]
+ fn test_no_compose_file_returns_error() {
+ let dir = make_temp_dir("no-file");
+ let result = find_default_compose_file(&dir);
+ assert!(matches!(result, Err(ComposeError::FileNotFound { .. })));
+ }
+
+ // ── ProjectConfig::new ────────────────────────────────────────────────────
+
+ #[test]
+ fn test_project_config_new_stores_raw_inputs() {
+ let dir = make_temp_dir("project-config");
+ let file = dir.join("compose.yaml");
+ fs::write(&file, "services: {}").unwrap();
+
+ let cfg = ProjectConfig::new(vec![file.clone()], Some("my-project".into()), vec![]);
+ assert_eq!(cfg.project_name, Some("my-project".to_string()));
+ assert_eq!(cfg.compose_files, vec![file]);
+ assert!(cfg.env_files.is_empty());
+ }
+}
diff --git a/crates/perry-container-compose/src/error.rs b/crates/perry-container-compose/src/error.rs
new file mode 100644
index 00000000..121f3c13
--- /dev/null
+++ b/crates/perry-container-compose/src/error.rs
@@ -0,0 +1,129 @@
+//! Error types for perry-container-compose.
+//!
+//! Defines the canonical `ComposeError` enum and FFI error mapping.
+
+use serde::{Deserialize, Serialize};
+use thiserror::Error;
+
+/// Result of probing a single container backend candidate.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct BackendProbeResult {
+ pub name: String,
+ pub available: bool,
+ pub reason: String,
+}
+
+/// Top-level crate error
+#[derive(Debug, Error)]
+pub enum ComposeError {
+ #[error("Dependency cycle detected in services: {services:?}")]
+ DependencyCycle { services: Vec },
+
+ #[error("Service '{service}' failed to start: {message}")]
+ ServiceStartupFailed { service: String, message: String },
+
+ #[error("Backend error (exit {code}): {message}")]
+ BackendError { code: i32, message: String },
+
+ #[error("Not found: {0}")]
+ NotFound(String),
+
+ #[error("Parse error: {0}")]
+ ParseError(#[from] serde_yaml::Error),
+
+ #[error("JSON error: {0}")]
+ JsonError(#[from] serde_json::Error),
+
+ #[error("I/O error: {0}")]
+ IoError(#[from] std::io::Error),
+
+ #[error("Validation error: {message}")]
+ ValidationError { message: String },
+
+ #[error("Image verification failed for '{image}': {reason}")]
+ VerificationFailed { image: String, reason: String },
+
+ #[error("File not found: {path}")]
+ FileNotFound { path: String },
+
+ #[error("No container backend found. Probed: {probed:?}")]
+ NoBackendFound { probed: Vec },
+
+ #[error("Backend '{name}' is not available: {reason}")]
+ BackendNotAvailable { name: String, reason: String },
+}
+
+impl ComposeError {
+ pub fn validation(msg: impl Into) -> Self {
+ ComposeError::ValidationError {
+ message: msg.into(),
+ }
+ }
+}
+
+pub type Result = std::result::Result;
+
+/// Convert a `ComposeError` to a JSON string `{ "message": "...", "code": N }`
+/// suitable for passing across the FFI boundary.
+pub fn compose_error_to_js(e: &ComposeError) -> String {
+ let code = match e {
+ ComposeError::NotFound(_) => 404,
+ ComposeError::BackendError { code, .. } => *code,
+ ComposeError::DependencyCycle { .. } => 422,
+ ComposeError::ValidationError { .. } => 400,
+ ComposeError::VerificationFailed { .. } => 403,
+ ComposeError::NoBackendFound { .. } => 503,
+ ComposeError::BackendNotAvailable { .. } => 503,
+ _ => 500,
+ };
+ serde_json::json!({
+ "message": e.to_string(),
+ "code": code
+ })
+ .to_string()
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_error_codes() {
+ let err = ComposeError::NotFound("foo".into());
+ assert_eq!(compose_error_to_js(&err).contains("\"code\":404"), true);
+
+ let err = ComposeError::DependencyCycle {
+ services: vec!["a".into()],
+ };
+ assert_eq!(compose_error_to_js(&err).contains("\"code\":422"), true);
+
+ let err = ComposeError::ValidationError {
+ message: "bad".into(),
+ };
+ assert_eq!(compose_error_to_js(&err).contains("\"code\":400"), true);
+
+ let err = ComposeError::VerificationFailed {
+ image: "img".into(),
+ reason: "fail".into(),
+ };
+ assert_eq!(compose_error_to_js(&err).contains("\"code\":403"), true);
+
+ let err = ComposeError::ParseError(serde_yaml::from_str::("bad: [1,2").unwrap_err());
+ assert_eq!(compose_error_to_js(&err).contains("\"code\":500"), true);
+
+ let err = ComposeError::NoBackendFound {
+ probed: vec![BackendProbeResult {
+ name: "docker".into(),
+ available: false,
+ reason: "not found".into(),
+ }],
+ };
+ assert_eq!(compose_error_to_js(&err).contains("\"code\":503"), true);
+
+ let err = ComposeError::BackendNotAvailable {
+ name: "podman".into(),
+ reason: "machine not running".into(),
+ };
+ assert_eq!(compose_error_to_js(&err).contains("\"code\":503"), true);
+ }
+}
diff --git a/crates/perry-container-compose/src/ffi.rs b/crates/perry-container-compose/src/ffi.rs
new file mode 100644
index 00000000..4f92968f
--- /dev/null
+++ b/crates/perry-container-compose/src/ffi.rs
@@ -0,0 +1,200 @@
+//! FFI exports for Perry TypeScript integration.
+//!
+//! Each function follows the Perry FFI convention:
+//! - String arguments arrive as `*const StringHeader` (Perry runtime layout)
+//! - Results are serialised to JSON strings before being handed back to JS
+
+use crate::compose::ComposeEngine;
+use std::path::PathBuf;
+use std::sync::Arc;
+
+// ──────────────────────────────────────────────────────────────
+// Minimal re-implementation of the Perry runtime string types
+// ──────────────────────────────────────────────────────────────
+
+#[repr(C)]
+pub struct StringHeader {
+ pub length: u32,
+}
+
+unsafe fn string_from_header(ptr: *const StringHeader) -> Option {
+ if ptr.is_null() || (ptr as usize) < 0x1000 {
+ return None;
+ }
+ let len = (*ptr).length as usize;
+ let data_ptr = (ptr as *const u8).add(std::mem::size_of::());
+ let bytes = std::slice::from_raw_parts(data_ptr, len);
+ Some(String::from_utf8_lossy(bytes).into_owned())
+}
+
+// ──────────────────────────────────────────────────────────────
+// Helpers
+// ──────────────────────────────────────────────────────────────
+
+fn json_ok(value: &str) -> *const StringHeader {
+ let payload = format!("{{\"ok\":true,\"result\":{}}}", value);
+ heap_string(payload)
+}
+
+fn json_err(message: &str) -> *const StringHeader {
+ let escaped = message.replace('"', "\\\"");
+ let payload = format!("{{\"ok\":false,\"error\":\"{}\"}}", escaped);
+ heap_string(payload)
+}
+
+fn heap_string(s: String) -> *const StringHeader {
+ let bytes = s.into_bytes();
+ let total = std::mem::size_of::() + bytes.len();
+ let layout = std::alloc::Layout::from_size_align(total, std::mem::align_of::())
+ .expect("layout");
+ unsafe {
+ let ptr = std::alloc::alloc(layout) as *mut StringHeader;
+ (*ptr).length = bytes.len() as u32;
+ let data_ptr = (ptr as *mut u8).add(std::mem::size_of::());
+ std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len());
+ ptr as *const StringHeader
+ }
+}
+
+fn block, T>(fut: F) -> T {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .expect("tokio runtime")
+ .block_on(fut)
+}
+
+fn parse_compose_file(file_ptr: *const StringHeader) -> Option {
+ unsafe { string_from_header(file_ptr) }.map(PathBuf::from)
+}
+
+fn make_engine(files: Vec) -> Result, String> {
+ let proj = crate::project::ComposeProject::load_from_files(&files, None, &[])
+ .map_err(|e| e.to_string())?;
+ let backend: Arc = block(crate::backend::detect_backend())
+ .map(Arc::from)
+ .map_err(|e| e.to_string())?;
+ Ok(Arc::new(ComposeEngine::new(proj.spec, proj.project_name, backend)))
+}
+
+// ──────────────────────────────────────────────────────────────
+// Exported FFI functions
+// ──────────────────────────────────────────────────────────────
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_start(file_ptr: *const StringHeader) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+ match make_engine(files) {
+ Err(e) => json_err(&e),
+ Ok(engine) => match block(engine.up(&[], true, false, false)) {
+ Ok(_) => json_ok("null"),
+ Err(e) => json_err(&e.to_string()),
+ },
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_stop(file_ptr: *const StringHeader) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+ match make_engine(files) {
+ Err(e) => json_err(&e),
+ Ok(engine) => match block(engine.down(false, false)) {
+ Ok(_) => json_ok("null"),
+ Err(e) => json_err(&e.to_string()),
+ },
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_ps(file_ptr: *const StringHeader) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+ match make_engine(files) {
+ Err(e) => json_err(&e),
+ Ok(engine) => match block(engine.ps()) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(infos) => {
+ let items: Vec = infos
+ .iter()
+ .map(|i| {
+ format!(
+ "{{\"service\":\"{}\",\"container\":\"{}\",\"status\":\"{}\"}}",
+ i.name, i.id, i.status
+ )
+ })
+ .collect();
+ let array = format!("[{}]", items.join(","));
+ json_ok(&array)
+ }
+ },
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_logs(
+ file_ptr: *const StringHeader,
+ services_ptr: *const StringHeader,
+ _follow: bool,
+) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+ let service: Option = string_from_header(services_ptr)
+ .and_then(|s| serde_json::from_str::>(&s).ok())
+ .and_then(|v| v.into_iter().next());
+
+ match make_engine(files) {
+ Err(e) => json_err(&e),
+ Ok(engine) => match block(engine.logs(service.as_deref(), None)) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(logs) => {
+ let stdout = logs.stdout.replace('"', "\\\"").replace('\n', "\\n");
+ let stderr = logs.stderr.replace('"', "\\\"").replace('\n', "\\n");
+ let payload = format!("{{\"stdout\":\"{}\",\"stderr\":\"{}\"}}", stdout, stderr);
+ json_ok(&payload)
+ }
+ },
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_exec(
+ file_ptr: *const StringHeader,
+ service_ptr: *const StringHeader,
+ cmd_ptr: *const StringHeader,
+) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+ let service = match string_from_header(service_ptr) {
+ Some(s) => s,
+ None => return json_err("service name is required"),
+ };
+ let cmd: Vec = string_from_header(cmd_ptr)
+ .and_then(|s| serde_json::from_str::>(&s).ok())
+ .unwrap_or_default();
+
+ match make_engine(files) {
+ Err(e) => json_err(&e),
+ Ok(engine) => match block(engine.exec(&service, &cmd)) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(result) => {
+ let stdout = result.stdout.replace('"', "\\\"").replace('\n', "\\n");
+ let stderr = result.stderr.replace('"', "\\\"").replace('\n', "\\n");
+ let payload = format!(
+ "{{\"stdout\":\"{}\",\"stderr\":\"{}\"}}",
+ stdout, stderr
+ );
+ json_ok(&payload)
+ }
+ },
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_config(file_ptr: *const StringHeader) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+ match crate::project::ComposeProject::load_from_files(&files, None, &[]) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(proj) => {
+ let yaml = proj.spec.to_yaml().unwrap_or_default();
+ let escaped = yaml.replace('"', "\\\"").replace('\n', "\\n");
+ json_ok(&format!("\"{}\"", escaped))
+ }
+ }
+}
diff --git a/crates/perry-container-compose/src/lib.rs b/crates/perry-container-compose/src/lib.rs
new file mode 100644
index 00000000..f7a568bb
--- /dev/null
+++ b/crates/perry-container-compose/src/lib.rs
@@ -0,0 +1,35 @@
+//! `perry-container-compose` — Docker Compose-like experience for Apple Container / Podman.
+//!
+//! Can be used:
+//!
+//! 1. As a standalone CLI binary (`perry-compose`)
+//! 2. As a library imported from Perry TypeScript applications
+//! 3. Via FFI from compiled Perry TypeScript code (requires `ffi` feature)
+
+pub mod backend;
+pub mod cli;
+pub mod compose;
+pub mod config;
+pub mod error;
+pub mod project;
+pub mod service;
+pub mod types;
+pub mod yaml;
+
+// FFI exports (Perry TypeScript integration)
+#[cfg(feature = "ffi")]
+pub mod ffi;
+
+// Re-exports
+pub use error::{ComposeError, Result};
+pub use types::{ComposeHandle, ComposeService, ComposeSpec};
+pub use compose::ComposeEngine;
+pub use project::ComposeProject;
+pub use backend::{
+ ContainerBackend, CliBackend, CliProtocol, DockerProtocol, AppleContainerProtocol,
+ LimaProtocol, detect_backend,
+ // Legacy shims kept for backward compatibility
+ Backend, ContainerStatus, ExecResult, get_backend, get_container_backend,
+ NetworkConfig, VolumeConfig,
+};
+pub use error::BackendProbeResult;
diff --git a/crates/perry-container-compose/src/main.rs b/crates/perry-container-compose/src/main.rs
new file mode 100644
index 00000000..73e014c7
--- /dev/null
+++ b/crates/perry-container-compose/src/main.rs
@@ -0,0 +1,21 @@
+//! CLI entry point for `perry-compose` binary.
+
+use clap::Parser;
+use perry_container_compose::cli::{run, Cli};
+use tracing_subscriber::{fmt, EnvFilter};
+
+#[tokio::main]
+async fn main() {
+ // Initialise tracing (RUST_LOG env controls verbosity)
+ fmt()
+ .with_env_filter(EnvFilter::from_default_env())
+ .with_target(false)
+ .init();
+
+ let cli = Cli::parse();
+
+ if let Err(e) = run(cli).await {
+ eprintln!("Error: {}", e);
+ std::process::exit(1);
+ }
+}
diff --git a/crates/perry-container-compose/src/project.rs b/crates/perry-container-compose/src/project.rs
new file mode 100644
index 00000000..3096e313
--- /dev/null
+++ b/crates/perry-container-compose/src/project.rs
@@ -0,0 +1,72 @@
+//! `ComposeProject` — project loading and file discovery.
+
+use crate::config::{self, ProjectConfig};
+use crate::error::Result;
+use crate::types::ComposeSpec;
+use crate::yaml;
+use std::path::{Path, PathBuf};
+
+/// A loaded and resolved compose project.
+pub struct ComposeProject {
+ /// Project name
+ pub project_name: String,
+ /// Working directory
+ pub project_dir: PathBuf,
+ /// Compose file paths
+ pub compose_files: Vec,
+ /// Merged and interpolated compose spec
+ pub spec: ComposeSpec,
+ /// Resolved environment variables
+ pub env: std::collections::HashMap,
+}
+
+impl ComposeProject {
+ /// Convenience: load from raw file paths, project name, and env files.
+ pub fn load_from_files(
+ files: &[PathBuf],
+ project_name: Option<&str>,
+ env_files: &[PathBuf],
+ ) -> Result {
+ let config = ProjectConfig::new(
+ files.to_vec(),
+ project_name.map(String::from),
+ env_files.to_vec(),
+ );
+ Self::load(&config)
+ }
+
+ /// Load a project from configuration.
+ pub fn load(config: &ProjectConfig) -> Result {
+ // Resolve compose file paths
+ let files = if config.compose_files.is_empty() {
+ config::resolve_compose_files(&[])? // Use default lookup
+ } else {
+ config.compose_files.clone()
+ };
+
+ let working_dir = files[0]
+ .parent()
+ .unwrap_or(Path::new("."))
+ .to_path_buf();
+
+ // Load environment
+ let env = yaml::load_env(&working_dir, &config.env_files);
+
+ // Parse and merge compose files
+ let spec = yaml::parse_and_merge_files(&files, &env)?;
+
+ // Determine project name
+ let name = config::resolve_project_name(
+ config.project_name.as_deref(),
+ &working_dir,
+ );
+
+ Ok(ComposeProject {
+ project_name: name,
+ project_dir: working_dir,
+ compose_files: files,
+ spec,
+ env,
+ })
+ }
+}
diff --git a/crates/perry-container-compose/src/service.rs b/crates/perry-container-compose/src/service.rs
new file mode 100644
index 00000000..03df03fd
--- /dev/null
+++ b/crates/perry-container-compose/src/service.rs
@@ -0,0 +1,120 @@
+//! Service runtime state and name generation.
+
+use crate::backend::ContainerBackend;
+use crate::types::ComposeService;
+use md5::{Digest, Md5};
+use std::sync::Arc;
+
+/// Generate a unique container name for a service.
+///
+/// Format: `{service_name}-{md5_prefix_8}-{random_hex_8}`
+/// e.g. `web-a1b2c3d4-f0e1d2c3`
+pub fn generate_name(image: &str, service_name: &str) -> String {
+ // MD5 hash of the image name for a stable prefix
+ let mut hasher = Md5::new();
+ hasher.update(image.as_bytes());
+ let hash = hasher.finalize();
+ let hash_str = hex::encode(hash);
+ let short_hash = &hash_str[..8];
+
+ // Random suffix for uniqueness across multiple instances of the same image
+ let random_suffix: u32 = rand::random();
+
+ // Sanitize service name: replace non-alphanumeric (except hyphen) with underscore
+ let safe_name: String = service_name
+ .chars()
+ .map(|c| if c.is_alphanumeric() || c == '-' { c } else { '_' })
+ .collect();
+
+ format!("{}-{}-{:08x}", safe_name, short_hash, random_suffix)
+}
+
+/// Service runtime state tracking.
+pub struct ServiceState {
+ /// Container ID
+ pub container_id: String,
+ /// Container name
+ pub container_name: String,
+ /// Whether the service container is running
+ pub running: bool,
+}
+
+impl ServiceState {
+ /// Create a service state from an explicit container name.
+ pub fn new(container_id: String, container_name: String, running: bool) -> Self {
+ ServiceState {
+ container_id,
+ container_name,
+ running,
+ }
+ }
+
+ /// Check whether the container exists in the backend.
+ ///
+ /// Returns `true` if the container can be inspected (regardless of running state).
+ pub async fn exists(&self, backend: &Arc) -> bool {
+ backend.inspect(&self.container_id).await.is_ok()
+ }
+
+ /// Check whether the container is currently running in the backend.
+ ///
+ /// Queries the backend's inspect output and checks the status field.
+ pub async fn is_running(&self, backend: &Arc) -> bool {
+ match backend.inspect(&self.container_id).await {
+ Ok(info) => {
+ let status = info.status.to_lowercase();
+ status.contains("running") || status.contains("up")
+ }
+ Err(_) => false,
+ }
+ }
+}
+
+/// Generate a container name for a service, using explicit name if set.
+pub fn service_container_name(svc: &ComposeService, service_name: &str) -> String {
+ if let Some(explicit) = svc.explicit_name() {
+ return explicit.to_string();
+ }
+
+ let image = svc.image.as_deref().unwrap_or(service_name);
+ generate_name(image, service_name)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_generate_name_format() {
+ let name = generate_name("nginx:latest", "web");
+ // Format: {safe_name}-{hash_8}-{random_8}
+ let parts: Vec<&str> = name.split('-').collect();
+ assert_eq!(parts[0], "web");
+ assert_eq!(parts[1].len(), 8);
+ assert_eq!(parts[2].len(), 8);
+ }
+
+ #[test]
+ fn test_same_image_same_hash_prefix() {
+ let name1 = generate_name("nginx:latest", "web");
+ let name2 = generate_name("nginx:latest", "api");
+ // Same image → same hash prefix
+ let hash1 = &name1[name1.find('-').unwrap() + 1..name1.find('-').unwrap() + 9];
+ let hash2 = &name2[name2.find('-').unwrap() + 1..name2.find('-').unwrap() + 9];
+ assert_eq!(hash1, hash2, "same image must produce same hash prefix");
+ }
+
+ #[test]
+ fn test_explicit_name() {
+ let mut svc = ComposeService::default();
+ svc.container_name = Some("my-container".to_string());
+ let name = service_container_name(&svc, "web");
+ assert_eq!(name, "my-container");
+ }
+
+ #[test]
+ fn test_sanitize_service_name() {
+ let name = generate_name("img", "my.service");
+ assert!(name.starts_with("my_service-"), "dots should be replaced");
+ }
+}
diff --git a/crates/perry-container-compose/src/types.rs b/crates/perry-container-compose/src/types.rs
new file mode 100644
index 00000000..0c902d47
--- /dev/null
+++ b/crates/perry-container-compose/src/types.rs
@@ -0,0 +1,724 @@
+//! All compose-spec Rust types.
+//!
+//! This module contains every struct and enum needed to represent a
+//! compose-spec YAML document, plus the opaque `ComposeHandle` returned by
+//! `ComposeEngine::up()`.
+
+use indexmap::IndexMap;
+use serde::{Deserialize, Serialize};
+
+/// Convert a `serde_yaml::Value` to a string representation.
+fn yaml_value_to_str(v: &serde_yaml::Value) -> String {
+ match v {
+ serde_yaml::Value::String(s) => s.clone(),
+ serde_yaml::Value::Number(n) => n.to_string(),
+ serde_yaml::Value::Bool(b) => b.to_string(),
+ serde_yaml::Value::Null => String::new(),
+ _ => format!("{}", serde_yaml::to_string(v).unwrap_or_default()).trim().to_owned(),
+ }
+}
+
+// ============ ListOrDict ============
+
+/// compose-spec `list_or_dict` pattern.
+/// Used for environment, labels, extra_hosts, sysctls, etc.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum ListOrDict {
+ Dict(IndexMap>),
+ List(Vec),
+}
+
+impl ListOrDict {
+ /// Convert to a flat `HashMap`.
+ /// Dict values are stringified; List entries are split on `=`.
+ pub fn to_map(&self) -> std::collections::HashMap {
+ match self {
+ ListOrDict::Dict(map) => map
+ .iter()
+ .map(|(k, v)| {
+ let val = match v {
+ Some(serde_yaml::Value::String(s)) => s.clone(),
+ Some(serde_yaml::Value::Number(n)) => n.to_string(),
+ Some(serde_yaml::Value::Bool(b)) => b.to_string(),
+ Some(serde_yaml::Value::Null) | None => String::new(),
+ Some(other) => {
+ match other {
+ serde_yaml::Value::String(s) => s.clone(),
+ _ => serde_yaml::to_string(other).unwrap_or_else(|_| "{}".to_string()),
+ }
+ }
+ };
+ (k.clone(), val)
+ })
+ .collect(),
+ ListOrDict::List(list) => list
+ .iter()
+ .filter_map(|entry| {
+ let mut parts = entry.splitn(2, '=');
+ let key = parts.next()?.to_owned();
+ let val = parts.next().unwrap_or("").to_owned();
+ Some((key, val))
+ })
+ .collect(),
+ }
+ }
+}
+
+// ============ StringOrList ============
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum StringOrList {
+ String(String),
+ List(Vec),
+}
+
+impl StringOrList {
+ pub fn to_list(&self) -> Vec {
+ match self {
+ StringOrList::String(s) => vec![s.clone()],
+ StringOrList::List(l) => l.clone(),
+ }
+ }
+}
+
+// ============ DependsOn ============
+
+/// `depends_on` condition values (compose-spec §service.depends_on)
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
+#[serde(rename_all = "snake_case")]
+pub enum DependsOnCondition {
+ ServiceStarted,
+ ServiceHealthy,
+ ServiceCompletedSuccessfully,
+}
+
+/// Per-dependency entry in the object form of depends_on
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ComposeDependsOn {
+ pub condition: Option,
+ #[serde(default)]
+ pub required: Option,
+ #[serde(default)]
+ pub restart: Option,
+}
+
+/// `depends_on` can be a list of service names or a map with conditions
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum DependsOnSpec {
+ List(Vec),
+ Map(IndexMap),
+}
+
+impl DependsOnSpec {
+ /// Return all dependency service names.
+ pub fn service_names(&self) -> Vec {
+ match self {
+ DependsOnSpec::List(names) => names.clone(),
+ DependsOnSpec::Map(map) => map.keys().cloned().collect(),
+ }
+ }
+}
+
+// ============ Volume ============
+
+/// Volume mount type (compose-spec §service.volumes[].type)
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
+#[serde(rename_all = "snake_case")]
+pub enum VolumeType {
+ Bind,
+ Volume,
+ Tmpfs,
+ Cluster,
+ Npipe,
+ Image,
+}
+
+/// Long-form volume mount (compose-spec §service.volumes[])
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ComposeServiceVolume {
+ #[serde(rename = "type")]
+ pub volume_type: VolumeType,
+ pub source: Option,
+ pub target: Option,
+ pub read_only: Option,
+ pub consistency: Option,
+ pub bind: Option,
+ pub volume: Option,
+ pub tmpfs: Option,
+ pub image: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ComposeServiceVolumeBind {
+ pub propagation: Option,
+ pub create_host_path: Option,
+ #[serde(rename = "recursive")]
+ pub recursive_opt: Option,
+ pub selinux: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ComposeServiceVolumeOpts {
+ pub labels: Option,
+ pub nocopy: Option,
+ pub subpath: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ComposeServiceVolumeTmpfs {
+ pub size: Option,
+ pub mode: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ComposeServiceVolumeImage {
+ pub subpath: Option,
+}
+
+/// Short or long volume form
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum VolumeEntry {
+ Short(String),
+ Long(ComposeServiceVolume),
+}
+
+impl VolumeEntry {
+ /// Convert to "source:target[:ro]" string form for backend CLI args.
+ pub fn to_string_form(&self) -> String {
+ match self {
+ VolumeEntry::Short(s) => s.clone(),
+ VolumeEntry::Long(v) => {
+ let src = v.source.as_deref().unwrap_or("");
+ let tgt = v.target.as_deref().unwrap_or("");
+ if v.read_only.unwrap_or(false) {
+ format!("{}:{}:ro", src, tgt)
+ } else {
+ format!("{}:{}", src, tgt)
+ }
+ }
+ }
+ }
+}
+
+// ============ Port ============
+
+/// Port mapping (long form, compose-spec §service.ports[])
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ComposeServicePort {
+ pub name: Option,
+ pub mode: Option,
+ pub host_ip: Option,
+ pub target: serde_yaml::Value,
+ pub published: Option,
+ pub protocol: Option,
+ pub app_protocol: Option,
+}
+
+/// Port can be a short string/number or a long-form object
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum PortSpec {
+ Short(serde_yaml::Value),
+ Long(ComposeServicePort),
+}
+
+impl PortSpec {
+ /// Convert to "host:container" string form for backend CLI args.
+ pub fn to_string_form(&self) -> String {
+ match self {
+ PortSpec::Short(v) => yaml_value_to_str(v),
+ PortSpec::Long(p) => {
+ let container = yaml_value_to_str(&p.target);
+ match &p.published {
+ Some(pub_) => {
+ let host = yaml_value_to_str(pub_);
+ format!("{}:{}", host, container)
+ }
+ None => container,
+ }
+ }
+ }
+ }
+}
+
+// ============ Networks on service ============
+
+/// Service network attachment config
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeServiceNetworkConfig {
+ pub aliases: Option>,
+ pub ipv4_address: Option,
+ pub ipv6_address: Option,
+ pub priority: Option,
+}
+
+/// `networks` field on a service: list or map
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum ServiceNetworks {
+ List(Vec),
+ Map(IndexMap>),
+}
+
+impl ServiceNetworks {
+ pub fn names(&self) -> Vec {
+ match self {
+ ServiceNetworks::List(v) => v.clone(),
+ ServiceNetworks::Map(m) => m.keys().cloned().collect(),
+ }
+ }
+}
+
+// ============ Build ============
+
+/// Build configuration (string shorthand or full object)
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum BuildSpec {
+ Context(String),
+ Config(ComposeServiceBuild),
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeServiceBuild {
+ pub context: Option,
+ pub dockerfile: Option,
+ pub dockerfile_inline: Option,
+ pub args: Option,
+ pub ssh: Option,
+ pub labels: Option,
+ pub cache_from: Option>,
+ pub cache_to: Option>,
+ pub no_cache: Option,
+ pub additional_contexts: Option>,
+ pub network: Option,
+ pub provenance: Option,
+ pub sbom: Option,
+ pub pull: Option,
+ pub target: Option,
+ pub shm_size: Option,
+ pub extra_hosts: Option,
+ pub isolation: Option,
+ pub privileged: Option,
+ pub secrets: Option>,
+ pub tags: Option>,
+ pub ulimits: Option,
+ pub platforms: Option>,
+ pub entitlements: Option>,
+}
+
+impl BuildSpec {
+ pub fn context(&self) -> Option<&str> {
+ match self {
+ BuildSpec::Context(s) => Some(s.as_str()),
+ BuildSpec::Config(b) => b.context.as_deref(),
+ }
+ }
+
+ pub fn as_build(&self) -> ComposeServiceBuild {
+ match self {
+ BuildSpec::Context(ctx) => ComposeServiceBuild {
+ context: Some(ctx.clone()),
+ ..Default::default()
+ },
+ BuildSpec::Config(b) => b.clone(),
+ }
+ }
+}
+
+// ============ Healthcheck ============
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ComposeHealthcheck {
+ pub test: serde_yaml::Value,
+ pub interval: Option,
+ pub timeout: Option,
+ pub retries: Option,
+ pub start_period: Option,
+ pub start_interval: Option,
+ pub disable: Option