CliBackend {
+ pub fn new(bin: PathBuf, protocol: P) -> Self { Self { bin, protocol } }
+
+ async fn exec_raw(&self, subcommand_args: Vec) -> Result {
+ let mut cmd = Command::new(&self.bin);
+ if let Some(prefix) = self.protocol.subcommand_prefix() {
+ cmd.args(prefix);
+ }
+ cmd.args(subcommand_args);
+ cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
+ cmd.output().await.map_err(ComposeError::IoError)
+ }
+
+ async fn exec_ok(&self, args: Vec) -> Result {
+ let output = self.exec_raw(args).await?;
+ if output.status.success() {
+ Ok(String::from_utf8_lossy(&output.stdout).to_string())
+ } else {
+ let code = output.status.code().unwrap_or(-1);
+ let stderr = String::from_utf8_lossy(&output.stderr).to_string();
+ Err(ComposeError::BackendError { code, message: stderr })
+ }
+ }
+}
+
+#[async_trait]
+impl ContainerBackend for CliBackend {
+ fn backend_name(&self) -> &str {
+ self.bin.file_name().and_then(|n| n.to_str()).unwrap_or("unknown")
+ }
+ async fn check_available(&self) -> Result<()> {
+ let mut args = self.protocol.subcommand_prefix().unwrap_or_default();
+ args.push("--version".into());
+ let mut cmd = Command::new(&self.bin);
+ cmd.args(args).stdout(Stdio::piped()).stderr(Stdio::piped());
+ let status = cmd.status().await.map_err(ComposeError::IoError)?;
+ if status.success() { Ok(()) } else {
+ Err(ComposeError::BackendNotAvailable {
+ name: self.backend_name().to_string(),
+ reason: "version check failed".to_string(),
+ })
+ }
+ }
+ async fn run(&self, spec: &ContainerSpec) -> Result {
+ let stdout = self.exec_ok(self.protocol.run_args(spec)).await?;
+ let id = self.protocol.parse_container_id(&stdout);
+ Ok(ContainerHandle { id, name: spec.name.clone() })
+ }
+ async fn create(&self, spec: &ContainerSpec) -> Result {
+ let stdout = self.exec_ok(self.protocol.create_args(spec)).await?;
+ let id = self.protocol.parse_container_id(&stdout);
+ Ok(ContainerHandle { id, name: spec.name.clone() })
+ }
+ async fn start(&self, id: &str) -> Result<()> { self.exec_ok(self.protocol.start_args(id)).await?; Ok(()) }
+ async fn stop(&self, id: &str, timeout: Option) -> Result<()> { self.exec_ok(self.protocol.stop_args(id, timeout)).await?; Ok(()) }
+ async fn remove(&self, id: &str, force: bool) -> Result<()> { self.exec_ok(self.protocol.remove_args(id, force)).await?; Ok(()) }
+ async fn list(&self, all: bool) -> Result> {
+ let stdout = self.exec_ok(self.protocol.list_args(all)).await?;
+ Ok(self.protocol.parse_list_output(&stdout))
+ }
+ async fn inspect(&self, id: &str) -> Result {
+ let stdout = self.exec_ok(self.protocol.inspect_args(id)).await?;
+ self.protocol.parse_inspect_output(id, &stdout).ok_or_else(|| ComposeError::NotFound(id.to_string()))
+ }
+ async fn logs(&self, id: &str, tail: Option) -> Result {
+ let stdout = self.exec_ok(self.protocol.logs_args(id, tail)).await?;
+ Ok(ContainerLogs { stdout, stderr: String::new(), exit_code: 0 })
+ }
+ async fn exec(&self, id: &str, cmd: &[String], env: Option<&HashMap>, workdir: Option<&str>) -> Result {
+ let output = self.exec_raw(self.protocol.exec_args(id, cmd, env, workdir)).await?;
+ Ok(ContainerLogs {
+ stdout: String::from_utf8_lossy(&output.stdout).to_string(),
+ stderr: String::from_utf8_lossy(&output.stderr).to_string(),
+ exit_code: output.status.code().unwrap_or(0),
+ })
+ }
+ async fn pull_image(&self, reference: &str) -> Result<()> { self.exec_ok(self.protocol.pull_image_args(reference)).await?; Ok(()) }
+ async fn list_images(&self) -> Result> {
+ let stdout = self.exec_ok(self.protocol.list_images_args()).await?;
+ Ok(self.protocol.parse_list_images_output(&stdout))
+ }
+ async fn remove_image(&self, reference: &str, force: bool) -> Result<()> { self.exec_ok(self.protocol.remove_image_args(reference, force)).await?; Ok(()) }
+ async fn create_network(&self, name: &str, config: &NetworkConfig) -> Result<()> { self.exec_ok(self.protocol.create_network_args(name, config)).await?; Ok(()) }
+ async fn remove_network(&self, name: &str) -> Result<()> { self.exec_ok(self.protocol.remove_network_args(name)).await?; Ok(()) }
+ async fn create_volume(&self, name: &str, config: &VolumeConfig) -> Result<()> { self.exec_ok(self.protocol.create_volume_args(name, config)).await?; Ok(()) }
+ async fn remove_volume(&self, name: &str) -> Result<()> { self.exec_ok(self.protocol.remove_volume_args(name)).await?; Ok(()) }
+}
+
+pub async fn detect_backend() -> Result> {
+ let mut probed = Vec::new();
+ if let Ok(name) = std::env::var("PERRY_CONTAINER_BACKEND") {
+ match probe_candidate(&name).await {
+ Ok(b) => return Ok(b),
+ Err(reason) => {
+ probed.push(BackendProbeResult { name: name.clone(), available: false, reason: reason.to_string() });
+ return Err(ComposeError::NoBackendFound { probed });
+ }
+ }
+ }
+
+ let candidates = if cfg!(target_os = "macos") {
+ vec!["apple/container", "orbstack", "colima", "rancher-desktop", "podman", "lima", "docker"]
+ } else {
+ vec!["podman", "nerdctl", "docker"]
+ };
+
+ for name in candidates {
+ match tokio::time::timeout(Duration::from_secs(2), probe_candidate(name)).await {
+ Ok(Ok(b)) => return Ok(b),
+ Ok(Err(reason)) => probed.push(BackendProbeResult { name: name.to_string(), available: false, reason: reason.to_string() }),
+ Err(_) => probed.push(BackendProbeResult { name: name.to_string(), available: false, reason: "timeout".into() }),
+ }
+ }
+ Err(ComposeError::NoBackendFound { probed })
+}
+
+pub async fn detect_backend_info() -> Result> {
+ let candidates = if cfg!(target_os = "macos") {
+ vec!["apple/container", "orbstack", "colima", "rancher-desktop", "podman", "lima", "docker"]
+ } else {
+ vec!["podman", "nerdctl", "docker"]
+ };
+
+ let mut results = Vec::new();
+ for name in candidates {
+ match tokio::time::timeout(Duration::from_secs(2), probe_candidate(name)).await {
+ Ok(Ok(_)) => results.push(BackendProbeResult { name: name.to_string(), available: true, reason: String::new() }),
+ Ok(Err(reason)) => results.push(BackendProbeResult { name: name.to_string(), available: false, reason: reason.to_string() }),
+ Err(_) => results.push(BackendProbeResult { name: name.to_string(), available: false, reason: "timeout".into() }),
+ }
+ }
+ Ok(results)
+}
+
+async fn probe_candidate(name: &str) -> Result> {
+ match name {
+ "apple/container" => {
+ let bin = which::which("container").map_err(|e| ComposeError::from(e.to_string()))?;
+ Ok(Box::new(CliBackend::new(bin, AppleContainerProtocol)))
+ }
+ "podman" => {
+ let bin = which::which("podman").map_err(|e| ComposeError::from(e.to_string()))?;
+ if cfg!(target_os = "macos") {
+ let output = Command::new(&bin).args(["machine", "list", "--format", "json"]).output().await.map_err(|e| ComposeError::from(e.to_string()))?;
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ if !stdout.contains("\"Running\":true") && !stdout.contains("\"Running\": true") {
+ return Err(ComposeError::BackendNotAvailable { name: "podman".into(), reason: "no running podman machine found".into() });
+ }
+ }
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ }
+ "docker" => {
+ let bin = which::which("docker").map_err(|e| ComposeError::from(e.to_string()))?;
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ }
+ "orbstack" => {
+ let bin = which::which("orb").map_err(|e| ComposeError::from(e.to_string()))?;
+ // OrbStack also checks for socket at ~/.orbstack/run/docker.sock or orb --version
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ }
+ "colima" => {
+ let bin = which::which("colima").map_err(|e| ComposeError::from(e.to_string()))?;
+ let output = Command::new(&bin).arg("status").output().await.map_err(|e| ComposeError::from(e.to_string()))?;
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ if !stdout.contains("running") {
+ return Err(ComposeError::BackendNotAvailable { name: "colima".into(), reason: "colima is not running".into() });
+ }
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ }
+ "lima" => {
+ let bin = which::which("limactl").map_err(|e| ComposeError::from(e.to_string()))?;
+ let output = Command::new(&bin).args(["list", "--json"]).output().await.map_err(|e| ComposeError::from(e.to_string()))?;
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ if !stdout.contains("\"Running\"") {
+ return Err(ComposeError::BackendNotAvailable { name: "lima".into(), reason: "no running lima instance found".into() });
+ }
+ Ok(Box::new(CliBackend::new(bin, LimaProtocol { instance: "default".into() })))
+ }
+ "nerdctl" => {
+ let bin = which::which("nerdctl").map_err(|e| ComposeError::from(e.to_string()))?;
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ }
+ "rancher-desktop" => {
+ let bin = which::which("nerdctl").map_err(|e| ComposeError::from(e.to_string()))?;
+ Ok(Box::new(CliBackend::new(bin, DockerProtocol)))
+ }
+ _ => Err(ComposeError::Generic("unknown backend".to_string())),
+ }
+}
+
+pub async fn get_backend() -> Result> {
+ detect_backend().await
+}
diff --git a/crates/perry-container-compose/src/cli.rs b/crates/perry-container-compose/src/cli.rs
new file mode 100644
index 000000000..519fd12e2
--- /dev/null
+++ b/crates/perry-container-compose/src/cli.rs
@@ -0,0 +1,258 @@
+//! CLI entry point for `perry-compose` binary.
+//!
+//! clap-based CLI with all subcommands.
+
+use crate::compose::ComposeEngine;
+use crate::error::Result;
+use crate::project::ComposeProject;
+use clap::{Args, Parser, Subcommand};
+use std::path::PathBuf;
+use std::sync::Arc;
+
+/// perry-compose: Docker Compose-like experience for Apple Container / Podman
+#[derive(Parser, Debug)]
+#[command(
+ name = "perry-compose",
+ version,
+ about = "Docker Compose-like CLI for container backends, powered by Perry",
+ long_about = None
+)]
+pub struct Cli {
+ /// Path to compose file(s)
+ #[arg(short = 'f', long = "file", value_name = "FILE", global = true)]
+ pub files: Vec,
+
+ /// Project name (default: directory name)
+ #[arg(short = 'p', long = "project-name", global = true)]
+ pub project_name: Option,
+
+ /// Environment file(s)
+ #[arg(long = "env-file", value_name = "FILE", global = true)]
+ pub env_files: Vec,
+
+ #[command(subcommand)]
+ pub command: Commands,
+}
+
+#[derive(Subcommand, Debug)]
+pub enum Commands {
+ /// Start services
+ Up(UpArgs),
+ /// Stop and remove services
+ Down(DownArgs),
+ /// Start existing stopped services
+ Start(ServiceArgs),
+ /// Stop running services
+ Stop(ServiceArgs),
+ /// Restart services
+ Restart(ServiceArgs),
+ /// List service status
+ Ps(PsArgs),
+ /// View output from containers
+ Logs(LogsArgs),
+ /// Execute a command in a running service
+ Exec(ExecArgs),
+ /// Validate and view the Compose file
+ Config(ConfigArgs),
+}
+
+#[derive(Args, Debug)]
+pub struct UpArgs {
+ #[arg(short = 'd', long = "detach")]
+ pub detach: bool,
+ #[arg(long = "build")]
+ pub build: bool,
+ #[arg(long = "remove-orphans")]
+ pub remove_orphans: bool,
+ pub services: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct DownArgs {
+ #[arg(short = 'v', long = "volumes")]
+ pub volumes: bool,
+ #[arg(long = "remove-orphans")]
+ pub remove_orphans: bool,
+ pub services: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct ServiceArgs {
+ pub services: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct PsArgs {
+ #[arg(short = 'a', long = "all")]
+ pub all: bool,
+ pub services: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct LogsArgs {
+ #[arg(short = 'f', long = "follow")]
+ pub follow: bool,
+ #[arg(long = "tail")]
+ pub tail: Option,
+ #[arg(short = 't', long = "timestamps")]
+ pub timestamps: bool,
+ pub services: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct ExecArgs {
+ pub service: String,
+ pub cmd: Vec,
+ #[arg(short = 'u', long = "user")]
+ pub user: Option,
+ #[arg(short = 'w', long = "workdir")]
+ pub workdir: Option,
+ #[arg(short = 'e', long = "env")]
+ pub env: Vec,
+}
+
+#[derive(Args, Debug)]
+pub struct ConfigArgs {
+ #[arg(long = "format", default_value = "yaml")]
+ pub format: String,
+ #[arg(long = "resolve-image-digests")]
+ pub resolve: bool,
+}
+
+// ============ Command dispatch ============
+
+pub async fn run(cli: Cli) -> Result<()> {
+ let config = crate::config::ProjectConfig::new(
+ cli.files.clone(),
+ cli.project_name.clone(),
+ cli.env_files.clone(),
+ );
+ let project = ComposeProject::load(&config)?;
+ let backend = Arc::from(crate::backend::get_backend().await?);
+ let engine = Arc::new(ComposeEngine::new(project.spec.clone(), project.project_name.clone(), backend));
+
+ match cli.command {
+ Commands::Up(args) => {
+ engine.clone()
+ .up(&args.services, args.detach, args.build, args.remove_orphans)
+ .await?;
+ }
+
+ Commands::Down(args) => {
+ engine
+ .down(&args.services, args.remove_orphans, args.volumes)
+ .await?;
+ }
+
+ Commands::Start(args) => {
+ engine.start(&args.services).await?;
+ }
+
+ Commands::Stop(args) => {
+ engine.stop(&args.services).await?;
+ }
+
+ Commands::Restart(args) => {
+ engine.restart(&args.services).await?;
+ }
+
+ Commands::Ps(_args) => {
+ let infos = engine.ps().await?;
+ print_ps_table(&infos);
+ }
+
+ Commands::Logs(args) => {
+ let logs_map = engine.logs(&args.services, args.tail).await?;
+
+ let mut names: Vec<&String> = logs_map.keys().collect();
+ names.sort();
+ for name in names {
+ let log = &logs_map[name];
+ if !log.is_empty() {
+ for line in log.lines() {
+ println!("{} | {}", name, line);
+ }
+ }
+ }
+ }
+
+ Commands::Exec(args) => {
+ let env: std::collections::HashMap = args
+ .env
+ .iter()
+ .filter_map(|e| {
+ let mut parts = e.splitn(2, '=');
+ let k = parts.next()?.to_owned();
+ let v = parts.next().unwrap_or("").to_owned();
+ Some((k, v))
+ })
+ .collect();
+
+ let cmd = args.cmd.clone();
+ if args.user.is_some() || args.workdir.is_some() || !env.is_empty() {
+ // Use backend directly for user/workdir/env support
+ let svc = engine
+ .spec
+ .services
+ .get(&args.service)
+ .ok_or_else(|| crate::error::ComposeError::NotFound(args.service.clone()))?;
+ let container_name =
+ crate::service::service_container_name(svc, &args.service);
+
+ let result = engine
+ .backend
+ .exec(
+ &container_name,
+ &cmd,
+ if env.is_empty() { None } else { Some(&env) },
+ args.workdir.as_deref(),
+ )
+ .await?;
+
+ print!("{}", result.stdout);
+ eprint!("{}", result.stderr);
+ } else {
+ let result = engine.exec(&args.service, &cmd).await?;
+ print!("{}", result.stdout);
+ eprint!("{}", result.stderr);
+ }
+ }
+
+ Commands::Config(_args) => {
+ let yaml = engine.config()?;
+ println!("{}", yaml);
+ }
+ }
+
+ Ok(())
+}
+
+fn print_ps_table(infos: &[crate::types::ContainerInfo]) {
+ let col_w_svc = 24usize;
+ let col_w_status = 12usize;
+ let col_w_container = 36usize;
+
+ println!(
+ "{:>>> =
+ once_cell::sync::Lazy::new(|| std::sync::Mutex::new(IndexMap::new()));
+
+static NEXT_STACK_ID: AtomicU64 = AtomicU64::new(1);
+
+pub struct ComposeEngine {
+ pub spec: ComposeSpec,
+ pub project_name: String,
+ pub backend: Arc,
+ started_containers: std::sync::Mutex>,
+}
+
+impl ComposeEngine {
+ pub fn new(
+ spec: ComposeSpec,
+ project_name: String,
+ backend: Arc,
+ ) -> Self {
+ ComposeEngine {
+ spec,
+ project_name,
+ backend,
+ started_containers: std::sync::Mutex::new(Vec::new()),
+ }
+ }
+
+ fn register(self: Arc) -> ComposeHandle {
+ let stack_id = NEXT_STACK_ID.fetch_add(1, Ordering::SeqCst);
+ let services: Vec = self.spec.services.keys().cloned().collect();
+ let handle = ComposeHandle {
+ stack_id,
+ project_name: self.project_name.clone(),
+ services,
+ };
+ COMPOSE_ENGINES.lock().unwrap().insert(stack_id, self);
+ handle
+ }
+
+ pub async fn up(
+ self: Arc,
+ services: &[String],
+ _detach: bool,
+ _build: bool,
+ _remove_orphans: bool,
+ ) -> Result {
+ let order = resolve_startup_order(&self.spec)?;
+ let target: Vec<&String> = if services.is_empty() {
+ order.iter().collect()
+ } else {
+ order.iter().filter(|s| services.contains(s)).collect()
+ };
+
+ if let Some(networks) = &self.spec.networks {
+ for (net_name, net_config_opt) in networks {
+ let external = net_config_opt.as_ref().map_or(false, |c| c.external.unwrap_or(false));
+ if external { continue; }
+ let resolved_name = net_config_opt.as_ref()
+ .and_then(|c| c.name.as_deref())
+ .unwrap_or(net_name.as_str());
+ let labels = net_config_opt.as_ref()
+ .and_then(|c| c.labels.as_ref())
+ .map(|l| l.to_map())
+ .unwrap_or_default();
+
+ let config = NetworkConfig {
+ driver: net_config_opt.as_ref().and_then(|c| c.driver.clone()),
+ labels,
+ internal: net_config_opt.as_ref().map_or(false, |c| c.internal.unwrap_or(false)),
+ enable_ipv6: net_config_opt.as_ref().map_or(false, |c| c.enable_ipv6.unwrap_or(false)),
+ };
+ self.backend.create_network(resolved_name, &config).await?;
+ }
+ }
+
+ if let Some(volumes) = &self.spec.volumes {
+ for (vol_name, vol_config_opt) in volumes {
+ let external = vol_config_opt.as_ref().map_or(false, |c| c.external.unwrap_or(false));
+ if external { continue; }
+ let resolved_name = vol_config_opt.as_ref()
+ .and_then(|c| c.name.as_deref())
+ .unwrap_or(vol_name.as_str());
+ let labels = vol_config_opt.as_ref()
+ .and_then(|c| c.labels.as_ref())
+ .map(|l| l.to_map())
+ .unwrap_or_default();
+
+ let config = VolumeConfig {
+ driver: vol_config_opt.as_ref().and_then(|c| c.driver.clone()),
+ labels,
+ };
+ self.backend.create_volume(resolved_name, &config).await?;
+ }
+ }
+
+ for svc_name in target {
+ let svc = self.spec.services.get(svc_name).ok_or_else(|| ComposeError::NotFound(svc_name.clone()))?;
+ let container_spec = svc.to_container_spec(svc_name);
+ match self.backend.run(&container_spec).await {
+ Ok(handle) => {
+ self.started_containers.lock().unwrap().push(handle.id);
+ }
+ Err(e) => {
+ // Rollback: stop and remove all started containers
+ let _ = self.down(&[], false, false).await;
+ return Err(e);
+ }
+ }
+ }
+
+ Ok(self.register())
+ }
+
+ pub async fn down(&self, _services: &[String], _remove_orphans: bool, _remove_volumes: bool) -> Result<()> {
+ let containers_to_stop: Vec = {
+ let containers = self.started_containers.lock().unwrap();
+ containers.iter().cloned().rev().collect()
+ };
+
+ for id in containers_to_stop {
+ let _ = self.backend.stop(&id, None).await;
+ let _ = self.backend.remove(&id, true).await;
+ }
+
+ let mut containers = self.started_containers.lock().unwrap();
+ containers.clear();
+ Ok(())
+ }
+
+ pub async fn start(&self, _services: &[String]) -> Result<()> {
+ let containers = self.started_containers.lock().unwrap();
+ for id in &*containers {
+ self.backend.start(id).await?;
+ }
+ Ok(())
+ }
+
+ pub async fn stop(&self, _services: &[String]) -> Result<()> {
+ let containers = self.started_containers.lock().unwrap();
+ for id in &*containers {
+ self.backend.stop(id, None).await?;
+ }
+ Ok(())
+ }
+
+ pub async fn restart(&self, services: &[String]) -> Result<()> {
+ self.stop(services).await?;
+ self.start(services).await?;
+ Ok(())
+ }
+
+ pub async fn ps(&self) -> Result> {
+ self.backend.list(true).await
+ }
+
+ pub async fn logs(&self, _services: &[String], tail: Option) -> Result> {
+ let mut logs = HashMap::new();
+ let containers = self.started_containers.lock().unwrap();
+ for id in &*containers {
+ let log = self.backend.logs(id, tail).await?;
+ logs.insert(id.clone(), log.stdout + &log.stderr);
+ }
+ Ok(logs)
+ }
+
+ pub async fn exec(&self, service: &str, cmd: &[String]) -> Result {
+ self.backend.exec(service, cmd, None, None).await
+ }
+
+ pub fn config(&self) -> Result {
+ self.spec.to_yaml()
+ }
+}
+
+pub fn resolve_startup_order(spec: &ComposeSpec) -> Result> {
+ let mut in_degree: IndexMap = IndexMap::new();
+ let mut dependents: IndexMap> = IndexMap::new();
+
+ for name in spec.services.keys() {
+ in_degree.insert(name.clone(), 0);
+ dependents.insert(name.clone(), Vec::new());
+ }
+
+ for (name, service) in &spec.services {
+ if let Some(deps) = &service.depends_on {
+ for dep in deps.service_names() {
+ if !spec.services.contains_key(&dep) {
+ return Err(ComposeError::validation(format!("Service '{}' depends on '{}' which is not defined", name, dep)));
+ }
+ *in_degree.get_mut(name).unwrap() += 1;
+ dependents.get_mut(&dep).unwrap().push(name.clone());
+ }
+ }
+ }
+
+ let mut queue: std::collections::BTreeSet = in_degree
+ .iter()
+ .filter(|(_, °)| deg == 0)
+ .map(|(name, _)| name.clone())
+ .collect();
+
+ let mut order: Vec = Vec::new();
+ while let Some(service) = queue.pop_first() {
+ order.push(service.clone());
+ for dependent in dependents.get(&service).unwrap_or(&Vec::new()).clone() {
+ let deg = in_degree.get_mut(&dependent).unwrap();
+ *deg -= 1;
+ if *deg == 0 {
+ queue.insert(dependent);
+ }
+ }
+ }
+
+ if order.len() != spec.services.len() {
+ let cycle_services: Vec = in_degree
+ .iter()
+ .filter(|(_, °)| deg > 0)
+ .map(|(name, _)| name.clone())
+ .collect();
+ return Err(ComposeError::DependencyCycle {
+ services: cycle_services,
+ });
+ }
+
+ Ok(order)
+}
diff --git a/crates/perry-container-compose/src/config.rs b/crates/perry-container-compose/src/config.rs
new file mode 100644
index 000000000..7925db0a4
--- /dev/null
+++ b/crates/perry-container-compose/src/config.rs
@@ -0,0 +1,128 @@
+//! Project configuration and environment variable resolution.
+
+use crate::error::{ComposeError, Result};
+use std::path::{Path, PathBuf};
+
+/// Default compose file names to search for (in priority order)
+pub const DEFAULT_COMPOSE_FILES: &[&str] = &[
+ "compose.yaml",
+ "compose.yml",
+ "docker-compose.yaml",
+ "docker-compose.yml",
+];
+
+/// Project-level configuration.
+pub struct ProjectConfig {
+ /// Compose file paths
+ pub compose_files: Vec,
+ /// Project name (from -p flag or COMPOSE_PROJECT_NAME or directory name)
+ pub project_name: Option,
+ /// Extra environment file paths (from --env-file flags)
+ pub env_files: Vec,
+}
+
+impl ProjectConfig {
+ /// Create a new project config from CLI options.
+ pub fn new(
+ compose_files: Vec,
+ project_name: Option,
+ env_files: Vec,
+ ) -> Self {
+ ProjectConfig {
+ compose_files,
+ project_name,
+ env_files,
+ }
+ }
+}
+
+/// Resolve project name.
+///
+/// Priority: CLI `-p` flag > `COMPOSE_PROJECT_NAME` env var > directory name
+pub fn resolve_project_name(
+ cli_name: Option<&str>,
+ project_dir: &Path,
+) -> String {
+ if let Some(name) = cli_name {
+ return name.to_string();
+ }
+
+ if let Ok(name) = std::env::var("COMPOSE_PROJECT_NAME") {
+ return name;
+ }
+
+ project_dir
+ .file_name()
+ .unwrap_or_default()
+ .to_string_lossy()
+ .to_string()
+}
+
+/// Resolve compose file paths.
+///
+/// Priority: CLI `-f` flags > `COMPOSE_FILE` env var (pathsep-separated) > default file search
+pub fn resolve_compose_files(cli_files: &[PathBuf]) -> Result> {
+ if !cli_files.is_empty() {
+ return Ok(cli_files.to_vec());
+ }
+
+ if let Ok(compose_file_env) = std::env::var("COMPOSE_FILE") {
+ #[cfg(target_os = "windows")]
+ let separator = ";";
+ #[cfg(not(target_os = "windows"))]
+ let separator = ":";
+
+ let files: Vec = compose_file_env
+ .split(separator)
+ .map(PathBuf::from)
+ .filter(|p| p.exists())
+ .collect();
+
+ if !files.is_empty() {
+ return Ok(files);
+ }
+ }
+
+ let cwd = std::env::current_dir()?;
+ find_default_compose_file(&cwd)
+}
+
+/// Find the default compose file in a directory.
+pub fn find_default_compose_file(dir: &Path) -> Result> {
+ for name in DEFAULT_COMPOSE_FILES {
+ let candidate = dir.join(name);
+ if candidate.exists() {
+ return Ok(vec![candidate]);
+ }
+ }
+ Err(ComposeError::FileNotFound {
+ path: format!(
+ "No compose file found in {} (tried: {})",
+ dir.display(),
+ DEFAULT_COMPOSE_FILES.join(", ")
+ ),
+ })
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_resolve_project_name_cli_priority() {
+ let tmp = std::env::temp_dir().join("perry-test-project");
+ std::fs::create_dir_all(&tmp).ok();
+
+ let name = resolve_project_name(Some("my-project"), &tmp);
+ assert_eq!(name, "my-project");
+ }
+
+ #[test]
+ fn test_resolve_project_name_dir_fallback() {
+ let tmp = std::env::temp_dir().join("perry-test-project-2");
+ std::fs::create_dir_all(&tmp).ok();
+
+ let name = resolve_project_name(None, &tmp);
+ assert_eq!(name, "perry-test-project-2");
+ }
+}
diff --git a/crates/perry-container-compose/src/error.rs b/crates/perry-container-compose/src/error.rs
new file mode 100644
index 000000000..03897bb1c
--- /dev/null
+++ b/crates/perry-container-compose/src/error.rs
@@ -0,0 +1,96 @@
+//! Error types for perry-container-compose.
+//!
+//! Defines the canonical `ComposeError` enum and FFI error mapping.
+
+use serde::{Serialize, Deserialize};
+use thiserror::Error;
+
+/// Result of probing a container backend candidate.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct BackendProbeResult {
+ pub name: String,
+ pub available: bool,
+ pub reason: String,
+}
+
+/// Top-level crate error
+#[derive(Debug, Error)]
+pub enum ComposeError {
+ #[error("Dependency cycle detected in services: {services:?}")]
+ DependencyCycle { services: Vec },
+
+ #[error("Service '{service}' failed to start: {message}")]
+ ServiceStartupFailed { service: String, message: String },
+
+ #[error("Backend error (exit {code}): {message}")]
+ BackendError { code: i32, message: String },
+
+ #[error("Not found: {0}")]
+ NotFound(String),
+
+ #[error("Parse error: {0}")]
+ ParseError(#[from] serde_yaml::Error),
+
+ #[error("JSON error: {0}")]
+ JsonError(#[from] serde_json::Error),
+
+ #[error("I/O error: {0}")]
+ IoError(#[from] std::io::Error),
+
+ #[error("Validation error: {message}")]
+ ValidationError { message: String },
+
+ #[error("Image verification failed for '{image}': {reason}")]
+ VerificationFailed { image: String, reason: String },
+
+ #[error("File not found: {path}")]
+ FileNotFound { path: String },
+
+ #[error("No container backend found. Probed: {probed:?}")]
+ NoBackendFound { probed: Vec },
+
+ #[error("Specified backend '{name}' is not available: {reason}")]
+ BackendNotAvailable { name: String, reason: String },
+
+ #[error("Generic error: {0}")]
+ Generic(String),
+}
+
+impl From for ComposeError {
+ fn from(s: String) -> Self {
+ ComposeError::Generic(s)
+ }
+}
+
+impl ComposeError {
+ pub fn validation(msg: impl Into) -> Self {
+ ComposeError::ValidationError {
+ message: msg.into(),
+ }
+ }
+}
+
+pub type Result = std::result::Result;
+
+/// Convert a `ComposeError` to a JSON string `{ "message": "...", "code": N }`
+/// suitable for passing across the FFI boundary.
+pub fn compose_error_to_js(e: &ComposeError) -> String {
+ let code = match e {
+ ComposeError::NotFound(_) => 404,
+ ComposeError::FileNotFound { .. } => 404,
+ ComposeError::BackendError { code, .. } => *code,
+ ComposeError::DependencyCycle { .. } => 422,
+ ComposeError::ValidationError { .. } => 400,
+ ComposeError::ParseError(_) => 400,
+ ComposeError::JsonError(_) => 400,
+ ComposeError::VerificationFailed { .. } => 403,
+ ComposeError::NoBackendFound { .. } => 503,
+ ComposeError::BackendNotAvailable { .. } => 503,
+ _ => 500,
+ };
+ serde_json::json!({
+ "message": e.to_string(),
+ "code": code
+ })
+ .to_string()
+}
diff --git a/crates/perry-container-compose/src/ffi.rs b/crates/perry-container-compose/src/ffi.rs
new file mode 100644
index 000000000..bef3eb1d0
--- /dev/null
+++ b/crates/perry-container-compose/src/ffi.rs
@@ -0,0 +1,235 @@
+//! FFI exports for Perry TypeScript integration.
+//!
+//! Each function follows the Perry FFI convention:
+//! - String arguments arrive as `*const StringHeader` (Perry runtime layout)
+//! - Async operations return `*mut Promise` which is resolved/rejected on the tokio runtime
+//! - Results are serialised to JSON strings before being handed back to JS
+
+use crate::compose::ComposeEngine;
+use std::path::PathBuf;
+
+// ──────────────────────────────────────────────────────────────
+// Minimal re-implementation of the Perry runtime string types
+// ──────────────────────────────────────────────────────────────
+
+#[repr(C)]
+pub struct StringHeader {
+ pub length: u32,
+}
+
+unsafe fn string_from_header(ptr: *const StringHeader) -> Option {
+ if ptr.is_null() || (ptr as usize) < 0x1000 {
+ return None;
+ }
+ let len = (*ptr).length as usize;
+ let data_ptr = (ptr as *const u8).add(std::mem::size_of::());
+ let bytes = std::slice::from_raw_parts(data_ptr, len);
+ Some(String::from_utf8_lossy(bytes).into_owned())
+}
+
+// ──────────────────────────────────────────────────────────────
+// Helpers
+// ──────────────────────────────────────────────────────────────
+
+fn json_ok(value: &str) -> *const StringHeader {
+ let payload = format!("{{\"ok\":true,\"result\":{}}}", value);
+ heap_string(payload)
+}
+
+fn json_err(message: &str) -> *const StringHeader {
+ let escaped = message.replace('"', "\\\"");
+ let payload = format!("{{\"ok\":false,\"error\":\"{}\"}}", escaped);
+ heap_string(payload)
+}
+
+fn heap_string(s: String) -> *const StringHeader {
+ let bytes = s.into_bytes();
+ let total = std::mem::size_of::() + bytes.len();
+ let layout = std::alloc::Layout::from_size_align(total, std::mem::align_of::())
+ .expect("layout");
+ unsafe {
+ let ptr = std::alloc::alloc(layout) as *mut StringHeader;
+ (*ptr).length = bytes.len() as u32;
+ let data_ptr = (ptr as *mut u8).add(std::mem::size_of::());
+ std::ptr::copy_nonoverlapping(bytes.as_ptr(), data_ptr, bytes.len());
+ ptr as *const StringHeader
+ }
+}
+
+fn block, T>(fut: F) -> T {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .expect("tokio runtime")
+ .block_on(fut)
+}
+
+fn parse_compose_file(file_ptr: *const StringHeader) -> Option {
+ unsafe { string_from_header(file_ptr) }.map(PathBuf::from)
+}
+
+// ──────────────────────────────────────────────────────────────
+// Exported FFI functions
+// ──────────────────────────────────────────────────────────────
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_start(file_ptr: *const StringHeader) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+
+ match crate::project::ComposeProject::load_from_files(&files, None, &[]) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(proj) => {
+ let backend = match crate::backend::get_backend() {
+ Ok(b) => std::sync::Arc::from(b),
+ Err(e) => return json_err(&e.to_string()),
+ };
+ let engine = ComposeEngine::new(proj.spec, proj.project_name, backend);
+ match block(engine.up(&[], true, false, false)) {
+ Ok(_) => json_ok("null"),
+ Err(e) => json_err(&e.to_string()),
+ }
+ }
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_stop(file_ptr: *const StringHeader) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+
+ match crate::project::ComposeProject::load_from_files(&files, None, &[]) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(proj) => {
+ let backend = match crate::backend::get_backend() {
+ Ok(b) => std::sync::Arc::from(b),
+ Err(e) => return json_err(&e.to_string()),
+ };
+ let engine = ComposeEngine::new(proj.spec, proj.project_name, backend);
+ match block(engine.down(&[], false, false)) {
+ Ok(_) => json_ok("null"),
+ Err(e) => json_err(&e.to_string()),
+ }
+ }
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_ps(file_ptr: *const StringHeader) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+
+ match crate::project::ComposeProject::load_from_files(&files, None, &[]) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(proj) => {
+ let backend = match crate::backend::get_backend() {
+ Ok(b) => std::sync::Arc::from(b),
+ Err(e) => return json_err(&e.to_string()),
+ };
+ let engine = ComposeEngine::new(proj.spec, proj.project_name, backend);
+ match block(engine.ps()) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(infos) => {
+ let items: Vec = infos
+ .iter()
+ .map(|i| {
+ format!(
+ "{{\"service\":\"{}\",\"container\":\"{}\",\"status\":\"{}\"}}",
+ i.name, i.id, i.status
+ )
+ })
+ .collect();
+ let array = format!("[{}]", items.join(","));
+ json_ok(&array)
+ }
+ }
+ }
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_logs(
+ file_ptr: *const StringHeader,
+ services_ptr: *const StringHeader,
+ follow: bool,
+) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+ let services: Vec = string_from_header(services_ptr)
+ .and_then(|s| serde_json::from_str::>(&s).ok())
+ .unwrap_or_default();
+
+ match crate::project::ComposeProject::load_from_files(&files, None, &[]) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(proj) => {
+ let backend = match crate::backend::get_backend() {
+ Ok(b) => std::sync::Arc::from(b),
+ Err(e) => return json_err(&e.to_string()),
+ };
+ let engine = ComposeEngine::new(proj.spec, proj.project_name, backend);
+ match block(engine.logs(&services, None)) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(logs_map) => {
+ let pairs: Vec = logs_map
+ .iter()
+ .map(|(k, v)| {
+ let escaped = v.replace('"', "\\\"").replace('\n', "\\n");
+ format!("\"{}\":\"{}\"", k, escaped)
+ })
+ .collect();
+ let obj = format!("{{{}}}", pairs.join(","));
+ json_ok(&obj)
+ }
+ }
+ }
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_exec(
+ file_ptr: *const StringHeader,
+ service_ptr: *const StringHeader,
+ cmd_ptr: *const StringHeader,
+) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+ let service = match string_from_header(service_ptr) {
+ Some(s) => s,
+ None => return json_err("service name is required"),
+ };
+ let cmd: Vec = string_from_header(cmd_ptr)
+ .and_then(|s| serde_json::from_str::>(&s).ok())
+ .unwrap_or_default();
+
+ match crate::project::ComposeProject::load_from_files(&files, None, &[]) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(proj) => {
+ let backend = match crate::backend::get_backend() {
+ Ok(b) => std::sync::Arc::from(b),
+ Err(e) => return json_err(&e.to_string()),
+ };
+ let engine = ComposeEngine::new(proj.spec, proj.project_name, backend);
+ match block(engine.exec(&service, &cmd)) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(result) => {
+ let stdout = result.stdout.replace('"', "\\\"").replace('\n', "\\n");
+ let stderr = result.stderr.replace('"', "\\\"").replace('\n', "\\n");
+ let payload = format!(
+ "{{\"stdout\":\"{}\",\"stderr\":\"{}\",\"exitCode\":{}}}",
+ stdout, stderr, result.exit_code
+ );
+ json_ok(&payload)
+ }
+ }
+ }
+ }
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn js_compose_config(file_ptr: *const StringHeader) -> *const StringHeader {
+ let files: Vec = parse_compose_file(file_ptr).into_iter().collect();
+
+ match crate::project::ComposeProject::load_from_files(&files, None, &[]) {
+ Err(e) => json_err(&e.to_string()),
+ Ok(proj) => {
+ let yaml = proj.spec.to_yaml().unwrap_or_default();
+ let escaped = yaml.replace('"', "\\\"").replace('\n', "\\n");
+ json_ok(&format!("\"{}\"", escaped))
+ }
+ }
+}
diff --git a/crates/perry-container-compose/src/lib.rs b/crates/perry-container-compose/src/lib.rs
new file mode 100644
index 000000000..f8f73c3fc
--- /dev/null
+++ b/crates/perry-container-compose/src/lib.rs
@@ -0,0 +1,28 @@
+//! `perry-container-compose` — Docker Compose-like experience for Apple Container / Podman.
+//!
+//! Can be used:
+//!
+//! 1. As a standalone CLI binary (`perry-compose`)
+//! 2. As a library imported from Perry TypeScript applications
+//! 3. Via FFI from compiled Perry TypeScript code (requires `ffi` feature)
+
+pub mod backend;
+pub mod cli;
+pub mod compose;
+pub mod config;
+pub mod error;
+pub mod project;
+pub mod service;
+pub mod types;
+pub mod yaml;
+
+// FFI exports (Perry TypeScript integration)
+#[cfg(feature = "ffi")]
+pub mod ffi;
+
+// Re-exports
+pub use error::{ComposeError, Result, BackendProbeResult};
+pub use types::{ComposeHandle, ComposeService, ComposeSpec};
+pub use compose::ComposeEngine;
+pub use project::ComposeProject;
+pub use backend::{ContainerBackend, get_backend, detect_backend};
diff --git a/crates/perry-container-compose/src/main.rs b/crates/perry-container-compose/src/main.rs
new file mode 100644
index 000000000..73e014c72
--- /dev/null
+++ b/crates/perry-container-compose/src/main.rs
@@ -0,0 +1,21 @@
+//! CLI entry point for `perry-compose` binary.
+
+use clap::Parser;
+use perry_container_compose::cli::{run, Cli};
+use tracing_subscriber::{fmt, EnvFilter};
+
+#[tokio::main]
+async fn main() {
+ // Initialise tracing (RUST_LOG env controls verbosity)
+ fmt()
+ .with_env_filter(EnvFilter::from_default_env())
+ .with_target(false)
+ .init();
+
+ let cli = Cli::parse();
+
+ if let Err(e) = run(cli).await {
+ eprintln!("Error: {}", e);
+ std::process::exit(1);
+ }
+}
diff --git a/crates/perry-container-compose/src/project.rs b/crates/perry-container-compose/src/project.rs
new file mode 100644
index 000000000..3096e313e
--- /dev/null
+++ b/crates/perry-container-compose/src/project.rs
@@ -0,0 +1,72 @@
+//! `ComposeProject` — project loading and file discovery.
+
+use crate::config::{self, ProjectConfig};
+use crate::error::Result;
+use crate::types::ComposeSpec;
+use crate::yaml;
+use std::path::{Path, PathBuf};
+
+/// A loaded and resolved compose project.
+pub struct ComposeProject {
+ /// Project name
+ pub project_name: String,
+ /// Working directory
+ pub project_dir: PathBuf,
+ /// Compose file paths
+ pub compose_files: Vec,
+ /// Merged and interpolated compose spec
+ pub spec: ComposeSpec,
+ /// Resolved environment variables
+ pub env: std::collections::HashMap,
+}
+
+impl ComposeProject {
+ /// Convenience: load from raw file paths, project name, and env files.
+ pub fn load_from_files(
+ files: &[PathBuf],
+ project_name: Option<&str>,
+ env_files: &[PathBuf],
+ ) -> Result {
+ let config = ProjectConfig::new(
+ files.to_vec(),
+ project_name.map(String::from),
+ env_files.to_vec(),
+ );
+ Self::load(&config)
+ }
+
+ /// Load a project from configuration.
+ pub fn load(config: &ProjectConfig) -> Result {
+ // Resolve compose file paths
+ let files = if config.compose_files.is_empty() {
+ config::resolve_compose_files(&[])? // Use default lookup
+ } else {
+ config.compose_files.clone()
+ };
+
+ let working_dir = files[0]
+ .parent()
+ .unwrap_or(Path::new("."))
+ .to_path_buf();
+
+ // Load environment
+ let env = yaml::load_env(&working_dir, &config.env_files);
+
+ // Parse and merge compose files
+ let spec = yaml::parse_and_merge_files(&files, &env)?;
+
+ // Determine project name
+ let name = config::resolve_project_name(
+ config.project_name.as_deref(),
+ &working_dir,
+ );
+
+ Ok(ComposeProject {
+ project_name: name,
+ project_dir: working_dir,
+ compose_files: files,
+ spec,
+ env,
+ })
+ }
+}
diff --git a/crates/perry-container-compose/src/service.rs b/crates/perry-container-compose/src/service.rs
new file mode 100644
index 000000000..b16ef59e5
--- /dev/null
+++ b/crates/perry-container-compose/src/service.rs
@@ -0,0 +1,45 @@
+//! Service runtime state and name generation.
+
+use crate::types::{ComposeService, ContainerSpec};
+use md5::{Digest, Md5};
+
+pub fn generate_name(image: &str, service_name: &str) -> String {
+ let mut hasher = Md5::new();
+ hasher.update(image.as_bytes());
+ let hash = hasher.finalize();
+ let hash_str = hex::encode(hash);
+ let short_hash = &hash_str[..8];
+
+ let random_suffix: u32 = rand::random();
+
+ let safe_name: String = service_name
+ .chars()
+ .map(|c| if c.is_alphanumeric() || c == '-' { c } else { '_' })
+ .collect();
+
+ format!("{}-{}-{:08x}", safe_name, short_hash, random_suffix)
+}
+
+pub fn service_container_name(svc: &ComposeService, service_name: &str) -> String {
+ if let Some(name) = svc.explicit_name() {
+ name.to_string()
+ } else {
+ generate_name(&svc.image_ref(service_name), service_name)
+ }
+}
+
+impl ComposeService {
+ pub fn to_container_spec(&self, service_name: &str) -> ContainerSpec {
+ ContainerSpec {
+ image: self.image_ref(service_name),
+ name: Some(service_container_name(self, service_name)),
+ ports: Some(self.port_strings()),
+ volumes: Some(self.volume_strings()),
+ env: Some(self.resolved_env()),
+ cmd: self.command_list(),
+ entrypoint: None,
+ network: None,
+ rm: Some(true),
+ }
+ }
+}
diff --git a/crates/perry-container-compose/src/types.rs b/crates/perry-container-compose/src/types.rs
new file mode 100644
index 000000000..ab3045b59
--- /dev/null
+++ b/crates/perry-container-compose/src/types.rs
@@ -0,0 +1,492 @@
+//! Root types for perry-container-compose.
+
+use indexmap::IndexMap;
+use serde::{Deserialize, Serialize};
+
+// ============ compose-spec §list_or_dict ============
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum ListOrDict {
+ List(Vec),
+ Dict(IndexMap),
+}
+
+impl Default for ListOrDict {
+ fn default() -> Self {
+ ListOrDict::List(Vec::new())
+ }
+}
+
+impl ListOrDict {
+ pub fn to_map(&self) -> std::collections::HashMap {
+ match self {
+ ListOrDict::Dict(m) => m.iter().map(|(k, v)| (k.clone(), v.clone())).collect(),
+ ListOrDict::List(v) => v
+ .iter()
+ .filter_map(|s| {
+ let mut parts = s.splitn(2, '=');
+ let k = parts.next()?.to_owned();
+ let v = parts.next().unwrap_or("").to_owned();
+ Some((k, v))
+ })
+ .collect(),
+ }
+ }
+}
+
+// ============ compose-spec §depends_on ============
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "snake_case")]
+pub enum DependsOnCondition {
+ ServiceStarted,
+ ServiceHealthy,
+ ServiceCompletedSuccessfully,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ComposeDependsOn {
+ pub condition: DependsOnCondition,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum DependsOnSpec {
+ List(Vec),
+ Dict(IndexMap),
+}
+
+impl DependsOnSpec {
+ pub fn service_names(&self) -> Vec {
+ match self {
+ DependsOnSpec::List(v) => v.clone(),
+ DependsOnSpec::Dict(m) => m.keys().cloned().collect(),
+ }
+ }
+}
+
+// ============ compose-spec §build ============
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct BuildSpec {
+ pub context: Option,
+ pub dockerfile: Option,
+ pub args: Option,
+ pub target: Option,
+}
+
+// ============ compose-spec §healthcheck ============
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeHealthcheck {
+ pub test: Option,
+ pub interval: Option,
+ pub timeout: Option,
+ pub retries: Option,
+ pub start_period: Option,
+}
+
+// ============ compose-spec §deploy ============
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeDeployment {
+ pub resources: Option,
+ pub replicas: Option,
+ pub restart_policy: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeDeploymentResources {
+ pub limits: Option,
+ pub reservations: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeResourceSpec {
+ pub cpus: Option,
+ pub memory: Option,
+}
+
+// ============ compose-spec §logging ============
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeLogging {
+ pub driver: Option,
+ pub options: Option>,
+}
+
+// ============ Ports ============
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum PortSpec {
+ Short(String),
+ Long(ComposeServicePort),
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeServicePort {
+ pub target: u32,
+ pub published: Option,
+ pub protocol: Option,
+ pub mode: Option,
+}
+
+impl PortSpec {
+ pub fn to_string_form(&self) -> String {
+ match self {
+ PortSpec::Short(s) => s.clone(),
+ PortSpec::Long(p) => {
+ if let Some(pub_port) = p.published {
+ format!("{}:{}/{}", pub_port, p.target, p.protocol.as_deref().unwrap_or("tcp"))
+ } else {
+ format!("{}/{}", p.target, p.protocol.as_deref().unwrap_or("tcp"))
+ }
+ }
+ }
+ }
+}
+
+// ============ Networks ============
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ServiceNetworks {
+ #[serde(flatten)]
+ pub networks: IndexMap>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeServiceNetworkConfig {
+ pub aliases: Option>,
+ pub ipv4_address: Option,
+ pub ipv6_address: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeNetwork {
+ pub name: Option,
+ pub driver: Option,
+ pub driver_opts: Option>,
+ pub external: Option,
+ pub internal: Option,
+ pub enable_ipv6: Option,
+ pub labels: Option,
+}
+
+// ============ Volumes ============
+
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
+#[serde(rename_all = "snake_case")]
+pub enum VolumeType {
+ Bind,
+ Volume,
+ Tmpfs,
+ Cluster,
+ Npipe,
+ Image,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum VolumeEntry {
+ Short(String),
+ Long(ComposeServiceVolume),
+}
+
+impl VolumeEntry {
+ pub fn to_string_form(&self) -> String {
+ match self {
+ VolumeEntry::Short(s) => s.clone(),
+ VolumeEntry::Long(v) => {
+ format!("{}:{}:{}", v.source.as_deref().unwrap_or(""), v.target, v.read_only.map(|r| if r { "ro" } else { "rw" }).unwrap_or("rw"))
+ }
+ }
+ }
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeServiceVolume {
+ #[serde(rename = "type")]
+ pub volume_type: Option,
+ pub source: Option,
+ pub target: String,
+ pub read_only: Option,
+ pub bind: Option,
+ pub volume: Option,
+ pub tmpfs: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeServiceVolumeBind {
+ pub propagation: Option,
+ pub create_host_path: Option,
+ pub selinux: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeServiceVolumeOpts {
+ pub nocopy: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeServiceVolumeTmpfs {
+ pub size: Option,
+ pub mode: Option,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeVolume {
+ pub name: Option,
+ pub driver: Option,
+ pub driver_opts: Option>,
+ pub external: Option,
+ pub labels: Option,
+}
+
+// ============ Secret ============
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeSecret {
+ pub name: Option,
+ pub environment: Option,
+ pub file: Option,
+ pub external: Option,
+ pub labels: Option,
+ pub driver: Option,
+ pub driver_opts: Option>,
+ pub template_driver: Option,
+}
+
+// ============ Config ============
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeConfigObj {
+ pub name: Option,
+ pub content: Option,
+ pub environment: Option,
+ pub file: Option,
+ pub external: Option,
+ pub labels: Option,
+ pub template_driver: Option,
+}
+
+// ============ ComposeService ============
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+pub struct ComposeService {
+ pub image: Option,
+ pub build: Option,
+ pub command: Option,
+ pub entrypoint: Option,
+ pub environment: Option,
+ pub env_file: Option,
+ pub ports: Option>,
+ pub volumes: Option>,
+ pub networks: Option,
+ pub depends_on: Option,
+ pub restart: Option,
+ pub healthcheck: Option,
+ pub container_name: Option