Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 10 additions & 8 deletions crates/tower-cmd/src/run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,8 @@ use tokio::sync::{
Mutex,
};
use tokio::time::{sleep, timeout, Duration};
use tower_runtime::execution::ExecutionHandle;
use tower_runtime::execution::{
CacheBackend, CacheConfig, CacheIsolation, ExecutionBackend, ExecutionSpec, ResourceLimits,
App as _, Backend, CacheBackend, CacheConfig, CacheIsolation, ExecutionSpec, ResourceLimits,
RuntimeConfig as ExecRuntimeConfig,
};
use tower_runtime::subprocess::SubprocessBackend;
Expand Down Expand Up @@ -204,7 +203,7 @@ where

// Monitor app status concurrently
let handle = Arc::new(Mutex::new(handle));
let status_task = tokio::spawn(monitor_cli_status(Arc::clone(&handle)));
let status_task = tokio::spawn(monitor_app_status(Arc::clone(&handle)));

// Wait for app to complete or SIGTERM
let status_result = tokio::select! {
Expand All @@ -223,6 +222,7 @@ where
// And if we crashed, err out
match status_result {
Status::Exited => output::success("Your local run exited cleanly."),
Status::Cancelled => output::success("Your local run was cancelled."),
Status::Crashed { code } => {
output::error(&format!("Your local run crashed with exit code: {}", code));
return Err(Error::AppCrashed);
Expand Down Expand Up @@ -654,12 +654,10 @@ async fn monitor_output(mut output: OutputReceiver) {
}
}

/// monitor_local_status is a helper function that will monitor the status of a given app and waits for
/// monitor_app_status is a helper function that will monitor the status of a given app and waits for
/// it to progress to a terminal state.
async fn monitor_cli_status(
handle: Arc<Mutex<tower_runtime::subprocess::SubprocessHandle>>,
) -> Status {
use tower_runtime::execution::ExecutionHandle as _;
async fn monitor_app_status(handle: Arc<Mutex<tower_runtime::local::LocalApp>>) -> Status {
use tower_runtime::execution::App as _;

debug!("Starting status monitoring for CLI execution");
let mut check_count = 0;
Expand All @@ -683,6 +681,10 @@ async fn monitor_cli_status(
debug!("Run exited cleanly, stopping status monitoring");
return status;
}
Status::Cancelled => {
debug!("Run was cancelled, stopping status monitoring");
return status;
}
Status::Crashed { .. } => {
debug!("Run crashed, stopping status monitoring");
return status;
Expand Down
3 changes: 3 additions & 0 deletions crates/tower-runtime/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,9 @@ pub enum Error {

#[snafu(display("dependency installation failed"))]
DependencyInstallationFailed,

#[snafu(display("failed to wait for process: {message}"))]
ProcessWaitFailed { message: String },
}

impl From<std::io::Error> for Error {
Expand Down
54 changes: 37 additions & 17 deletions crates/tower-runtime/src/execution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,17 +149,19 @@ pub struct NetworkingSpec {
}

// ============================================================================
// Execution Backend Trait
// Backend Trait
// ============================================================================

/// ExecutionBackend abstracts the compute substrate
/// Backend creates App instances for a specific compute substrate.
///
/// Implementations: SubprocessBackend (subprocess), K8sBackend (Kubernetes)
#[async_trait]
pub trait ExecutionBackend: Send + Sync {
/// The handle type this backend returns
type Handle: ExecutionHandle;
pub trait Backend: Send + Sync {
/// The App type this backend creates
type App: App;

/// Create a new execution environment
async fn create(&self, spec: ExecutionSpec) -> Result<Self::Handle, Error>;
/// Create a new app execution
async fn create(&self, spec: ExecutionSpec) -> Result<Self::App, Error>;

/// Get backend capabilities
fn capabilities(&self) -> BackendCapabilities;
Expand Down Expand Up @@ -195,13 +197,15 @@ pub struct BackendCapabilities {
}

// ============================================================================
// Execution Handle Trait
// App Trait
// ============================================================================

/// ExecutionHandle represents a running execution
/// App represents a running Tower application instance.
///
/// Implementations: LocalApp (subprocess), K8sApp (Kubernetes pod)
#[async_trait]
pub trait ExecutionHandle: Send + Sync {
/// Get a unique identifier for this execution
pub trait App: Send + Sync {
Copy link
Contributor

@sammuti sammuti Jan 22, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note this would require a change in the runner side for the k8s impl. I propose just keeping the new name rather than renaming since I believe it's better name than "App"

/// Unique identifier for this execution
fn id(&self) -> &str;

/// Get current execution status
Expand All @@ -210,20 +214,26 @@ pub trait ExecutionHandle: Send + Sync {
/// Subscribe to log stream
async fn logs(&self) -> Result<OutputReceiver, Error>;

/// Terminate execution gracefully
/// Terminate execution gracefully (SIGTERM equivalent)
async fn terminate(&mut self) -> Result<(), Error>;

/// Force kill execution
async fn kill(&mut self) -> Result<(), Error>;
/// Force kill execution (SIGKILL equivalent)
async fn kill(&mut self) -> Result<(), Error> {
self.terminate().await // default: same as terminate
}

/// Wait for execution to complete
async fn wait_for_completion(&self) -> Result<Status, Error>;

/// Get service endpoint
async fn service_endpoint(&self) -> Result<Option<ServiceEndpoint>, Error>;
/// Get service endpoint (for long-running apps)
async fn service_endpoint(&self) -> Result<Option<ServiceEndpoint>, Error> {
Ok(None) // default: no endpoint
}

/// Cleanup resources
async fn cleanup(&mut self) -> Result<(), Error>;
async fn cleanup(&mut self) -> Result<(), Error> {
self.terminate().await // default: just terminate
}
}

/// ServiceEndpoint describes how to reach a running service
Expand All @@ -241,3 +251,13 @@ pub struct ServiceEndpoint {
/// Full URL if applicable (e.g., "http://app-run-123.default.svc.cluster.local:8080")
pub url: Option<String>,
}

// ============================================================================
// Deprecated Aliases (for backwards compatibility with tower-runner)
// ============================================================================

#[deprecated(note = "use `Backend` instead")]
pub use self::Backend as ExecutionBackend;

#[deprecated(note = "use `App` instead")]
pub use self::App as ExecutionHandle;
8 changes: 5 additions & 3 deletions crates/tower-runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ pub mod execution;
pub mod local;
pub mod subprocess;

// Re-export SubprocessBackend from subprocess module
pub use subprocess::SubprocessBackend;

use errors::Error;

#[derive(Copy, Clone)]
Expand Down Expand Up @@ -41,23 +44,22 @@ pub enum Status {
None,
Running,
Exited,
Cancelled,
Crashed { code: i32 },
}

pub type OutputReceiver = UnboundedReceiver<Output>;

pub type OutputSender = UnboundedSender<Output>;

#[deprecated(note = "use `execution::App` instead")]
pub trait App: Send + Sync {
// start will start the process
fn start(opts: StartOptions) -> impl Future<Output = Result<Self, Error>> + Send
where
Self: Sized;

// terminate will terminate the subprocess
fn terminate(&mut self) -> impl Future<Output = Result<(), Error>> + Send;

// status checks the status of an app
fn status(&self) -> impl Future<Output = Result<Status, Error>> + Send;
}

Expand Down
Loading