diff --git a/.gitignore b/.gitignore index 05923927..c59e2de8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ /target .DS_Store +.codspeed diff --git a/Cargo.lock b/Cargo.lock index fbb557bf..81bd1994 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -494,7 +494,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -2594,7 +2594,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] diff --git a/crates/exec-harness/src/analysis.rs b/crates/exec-harness/src/analysis.rs index 9ad46571..4469380c 100644 --- a/crates/exec-harness/src/analysis.rs +++ b/crates/exec-harness/src/analysis.rs @@ -1,24 +1,29 @@ use crate::prelude::*; -use crate::uri::NameAndUri; +use crate::BenchmarkCommand; +use crate::uri; use codspeed::instrument_hooks::InstrumentHooks; use std::process::Command; -pub fn perform(name_and_uri: NameAndUri, command: Vec) -> Result<()> { +pub fn perform(commands: Vec) -> Result<()> { let hooks = InstrumentHooks::instance(); - let mut cmd = Command::new(&command[0]); - cmd.args(&command[1..]); - hooks.start_benchmark().unwrap(); - let status = cmd.status(); - hooks.stop_benchmark().unwrap(); - let status = status.context("Failed to execute command")?; + for benchmark_cmd in commands { + let name_and_uri = uri::generate_name_and_uri(&benchmark_cmd.name, &benchmark_cmd.command); - if !status.success() { - bail!("Command exited with non-zero status: {status}"); - } + let mut cmd = Command::new(&benchmark_cmd.command[0]); + cmd.args(&benchmark_cmd.command[1..]); + hooks.start_benchmark().unwrap(); + let status = cmd.status(); + hooks.stop_benchmark().unwrap(); + let status = status.context("Failed to execute command")?; + + if !status.success() { + bail!("Command exited with non-zero status: {status}"); + } - hooks.set_executed_benchmark(&name_and_uri.uri).unwrap(); + hooks.set_executed_benchmark(&name_and_uri.uri).unwrap(); + } Ok(()) } diff --git a/crates/exec-harness/src/lib.rs b/crates/exec-harness/src/lib.rs index 2bfb757b..8fea15ee 100644 --- a/crates/exec-harness/src/lib.rs +++ b/crates/exec-harness/src/lib.rs @@ -1,15 +1,83 @@ use clap::ValueEnum; +use prelude::*; use serde::{Deserialize, Serialize}; +use std::io::{self, BufRead}; pub mod analysis; pub mod prelude; -pub mod uri; +mod uri; pub mod walltime; -#[derive(ValueEnum, Clone, Debug, Serialize, Deserialize, PartialEq)] +#[derive(ValueEnum, Clone, Copy, Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum MeasurementMode { Walltime, Memory, Simulation, } + +/// A single benchmark command for stdin mode input. +/// +/// This struct defines the JSON format for passing benchmark commands to exec-harness +/// via stdin (when invoked with `-`). The runner uses this same struct to serialize +/// targets from codspeed.yaml. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BenchmarkCommand { + /// The command and arguments to execute + pub command: Vec, + + /// Optional benchmark name + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + + /// Walltime execution options (flattened into the JSON object) + #[serde(default)] + pub walltime_args: walltime::WalltimeExecutionArgs, +} + +/// Read and parse benchmark commands from stdin as JSON +pub fn read_commands_from_stdin() -> Result> { + let stdin = io::stdin(); + let mut input = String::new(); + + for line in stdin.lock().lines() { + let line = line.context("Failed to read line from stdin")?; + input.push_str(&line); + input.push('\n'); + } + + let commands: Vec = + serde_json::from_str(&input).context("Failed to parse JSON from stdin")?; + + if commands.is_empty() { + bail!("No commands provided in stdin input"); + } + + for cmd in &commands { + if cmd.command.is_empty() { + bail!("Empty command in stdin input"); + } + } + + Ok(commands) +} + +/// Execute benchmark commands +pub fn execute_benchmarks( + commands: Vec, + measurement_mode: Option, +) -> Result<()> { + match measurement_mode { + Some(MeasurementMode::Walltime) | None => { + walltime::perform(commands)?; + } + Some(MeasurementMode::Memory) => { + analysis::perform(commands)?; + } + Some(MeasurementMode::Simulation) => { + bail!("Simulation measurement mode is not yet supported by exec-harness"); + } + } + + Ok(()) +} diff --git a/crates/exec-harness/src/main.rs b/crates/exec-harness/src/main.rs index 1417f345..df5f203b 100644 --- a/crates/exec-harness/src/main.rs +++ b/crates/exec-harness/src/main.rs @@ -1,9 +1,9 @@ use clap::Parser; -use exec_harness::MeasurementMode; -use exec_harness::analysis; use exec_harness::prelude::*; -use exec_harness::uri; -use exec_harness::walltime; +use exec_harness::walltime::WalltimeExecutionArgs; +use exec_harness::{ + BenchmarkCommand, MeasurementMode, execute_benchmarks, read_commands_from_stdin, +}; #[derive(Parser, Debug)] #[command(name = "exec-harness")] @@ -21,9 +21,10 @@ struct Args { measurement_mode: Option, #[command(flatten)] - execution_args: walltime::WalltimeExecutionArgs, + walltime_args: WalltimeExecutionArgs, - /// The command and arguments to execute + /// The command and arguments to execute. + /// Use "-" as the only argument to read a JSON payload from stdin. #[arg(trailing_var_arg = true, allow_hyphen_values = true)] command: Vec, } @@ -37,26 +38,20 @@ fn main() -> Result<()> { debug!("Starting exec-harness with pid {}", std::process::id()); let args = Args::parse(); - - if args.command.is_empty() { - bail!("Error: No command provided"); - } - - let bench_name_and_uri = uri::generate_name_and_uri(&args.name, &args.command); - - match args.measurement_mode { - Some(MeasurementMode::Walltime) | None => { - let execution_options: walltime::ExecutionOptions = args.execution_args.try_into()?; - - walltime::perform(bench_name_and_uri, args.command, &execution_options)?; - } - Some(MeasurementMode::Memory) => { - analysis::perform(bench_name_and_uri, args.command)?; - } - Some(MeasurementMode::Simulation) => { - bail!("Simulation measurement mode is not yet supported by exec-harness"); - } - } + let measurement_mode = args.measurement_mode; + + // Determine if we're in stdin mode or CLI mode + let commands = match args.command.as_slice() { + [single] if single == "-" => read_commands_from_stdin()?, + [] => bail!("No command provided"), + _ => vec![BenchmarkCommand { + command: args.command, + name: args.name, + walltime_args: args.walltime_args, + }], + }; + + execute_benchmarks(commands, measurement_mode)?; Ok(()) } diff --git a/crates/exec-harness/src/walltime/config.rs b/crates/exec-harness/src/walltime/config.rs index ccb87001..d36fa569 100644 --- a/crates/exec-harness/src/walltime/config.rs +++ b/crates/exec-harness/src/walltime/config.rs @@ -1,4 +1,5 @@ use crate::prelude::*; +use serde::{Deserialize, Serialize}; use std::time::Duration; const DEFAULT_WARMUP_TIME_NS: u64 = 1_000_000_000; // 1 second @@ -27,7 +28,7 @@ fn parse_duration_to_ns(s: &str) -> Result { /// /// ⚠️ Make sure to update WalltimeExecutionArgs::to_cli_args() when fields change, else the runner /// will not properly forward arguments -#[derive(Debug, Clone, Default, clap::Args)] +#[derive(Debug, Clone, Default, clap::Args, Serialize, Deserialize)] pub struct WalltimeExecutionArgs { /// Duration of the warmup phase before measurement starts. /// During warmup, the benchmark runs to stabilize performance (e.g., JIT compilation, cache warming). diff --git a/crates/exec-harness/src/walltime/mod.rs b/crates/exec-harness/src/walltime/mod.rs index 7fadc1f9..c32a52c5 100644 --- a/crates/exec-harness/src/walltime/mod.rs +++ b/crates/exec-harness/src/walltime/mod.rs @@ -6,35 +6,42 @@ pub use config::WalltimeExecutionArgs; use runner_shared::walltime_results::WalltimeBenchmark; pub use runner_shared::walltime_results::WalltimeResults; +use crate::BenchmarkCommand; use crate::prelude::*; use crate::uri::NameAndUri; +use crate::uri::generate_name_and_uri; use codspeed::instrument_hooks::InstrumentHooks; use std::process::Command; -pub fn perform( - name_and_uri: NameAndUri, - command: Vec, - execution_options: &ExecutionOptions, -) -> Result<()> { - let NameAndUri { - name: bench_name, - uri: bench_uri, - } = name_and_uri; - - let times_per_round_ns = run_rounds(bench_uri.clone(), command, execution_options)?; - - // Collect walltime results - let max_time_ns = times_per_round_ns.iter().copied().max(); - - let walltime_benchmark = WalltimeBenchmark::from_runtime_data( - bench_name.clone(), - bench_uri.clone(), - vec![1; times_per_round_ns.len()], - times_per_round_ns, - max_time_ns, - ); - - let walltime_results = WalltimeResults::from_benchmarks(vec![walltime_benchmark]) +pub fn perform(commands: Vec) -> Result<()> { + let mut walltime_benchmarks = Vec::with_capacity(commands.len()); + + for cmd in commands { + let name_and_uri = generate_name_and_uri(&cmd.name, &cmd.command); + let execution_options: ExecutionOptions = cmd.walltime_args.try_into()?; + + let NameAndUri { + name: bench_name, + uri: bench_uri, + } = name_and_uri; + + let times_per_round_ns = run_rounds(bench_uri.clone(), cmd.command, &execution_options)?; + + // Collect walltime results + let max_time_ns = times_per_round_ns.iter().copied().max(); + + let walltime_benchmark = WalltimeBenchmark::from_runtime_data( + bench_name.clone(), + bench_uri.clone(), + vec![1; times_per_round_ns.len()], + times_per_round_ns, + max_time_ns, + ); + + walltime_benchmarks.push(walltime_benchmark); + } + + let walltime_results = WalltimeResults::from_benchmarks(walltime_benchmarks) .expect("Failed to create walltime results"); walltime_results @@ -56,7 +63,7 @@ fn run_rounds( let warmup_time_ns = config.warmup_time_ns; let hooks = InstrumentHooks::instance(); - let do_one_round = |times_per_round_ns: &mut Vec| { + let do_one_round = |times_per_round_ns: &mut Vec, add_markers: bool| { let mut child = Command::new(&command[0]) .args(&command[1..]) .spawn() @@ -67,27 +74,28 @@ fn run_rounds( .context("Failed to wait for command to finish")?; let bench_round_end_ts_ns = InstrumentHooks::current_timestamp(); - hooks.add_benchmark_timestamps(bench_round_start_ts_ns, bench_round_end_ts_ns); + + if add_markers { + hooks.add_benchmark_timestamps(bench_round_start_ts_ns, bench_round_end_ts_ns); + } + times_per_round_ns.push((bench_round_end_ts_ns - bench_round_start_ts_ns) as u128); if !status.success() { bail!("Command exited with non-zero status: {status}"); } - times_per_round_ns.push((bench_round_end_ts_ns - bench_round_start_ts_ns) as u128); - Ok(()) }; // Compute the number of rounds to perform, either from warmup or directly from config + hooks.start_benchmark().unwrap(); let rounds_to_perform = if warmup_time_ns > 0 { let mut warmup_times_ns = Vec::new(); let warmup_start_ts_ns = InstrumentHooks::current_timestamp(); - hooks.start_benchmark().unwrap(); while InstrumentHooks::current_timestamp() < warmup_start_ts_ns + warmup_time_ns { - do_one_round(&mut warmup_times_ns)?; + do_one_round(&mut warmup_times_ns, false)?; } - hooks.stop_benchmark().unwrap(); let warmup_end_ts_ns = InstrumentHooks::current_timestamp(); if let [single_warmup_round_duration_ns] = warmup_times_ns.as_slice() { @@ -98,6 +106,8 @@ fn run_rounds( "Warmup duration ({single_warmup_round_duration_ns} ns) exceeded or met max_time ({time_ns} ns). No more rounds will be performed." ); // Mark benchmark as executed for the runner to register + hooks.add_benchmark_timestamps(warmup_start_ts_ns, warmup_end_ts_ns); + hooks.stop_benchmark().unwrap(); hooks.set_executed_benchmark(&bench_uri).unwrap(); return Ok(warmup_times_ns); } @@ -170,9 +180,8 @@ fn run_rounds( let round_start_ts_ns = InstrumentHooks::current_timestamp(); let mut times_per_round_ns = Vec::with_capacity(rounds_to_perform.try_into().unwrap()); - hooks.start_benchmark().unwrap(); for round in 0..rounds_to_perform { - do_one_round(&mut times_per_round_ns)?; + do_one_round(&mut times_per_round_ns, true)?; // Check if we've exceeded max time let max_time_ns = match &config.max { diff --git a/src/exec/mod.rs b/src/exec/mod.rs index 36599193..8a3989de 100644 --- a/src/exec/mod.rs +++ b/src/exec/mod.rs @@ -9,6 +9,7 @@ use crate::run::uploader::UploadResult; use clap::Args; use std::path::Path; +pub mod multi_targets; mod poll_results; /// We temporarily force this name for all exec runs @@ -78,8 +79,8 @@ pub async fn run( setup_cache_dir: Option<&Path>, ) -> Result<()> { let merged_args = args.merge_with_project_config(project_config); - let config = crate::executor::Config::try_from(merged_args)?; + let mut execution_context = executor::ExecutionContext::try_from((config, codspeed_config))?; debug!("config: {:#?}", execution_context.config); let executor = executor::get_executor_from_mode( diff --git a/src/exec/multi_targets.rs b/src/exec/multi_targets.rs new file mode 100644 index 00000000..b7c25c62 --- /dev/null +++ b/src/exec/multi_targets.rs @@ -0,0 +1,84 @@ +use super::EXEC_HARNESS_COMMAND; +use crate::prelude::*; +use crate::project_config::Target; +use crate::project_config::WalltimeOptions; +use exec_harness::BenchmarkCommand; + +/// Convert targets from project config to exec-harness JSON input format +pub fn targets_to_exec_harness_json( + targets: &[Target], + default_walltime: Option<&WalltimeOptions>, +) -> Result { + let inputs: Vec = targets + .iter() + .map(|target| { + // Parse the exec string into command parts + let command = shell_words::split(&target.exec) + .with_context(|| format!("Failed to parse command: {}", target.exec))?; + + // Merge target-specific walltime options with defaults + let target_walltime = target.options.as_ref().and_then(|o| o.walltime.as_ref()); + let walltime_args = merge_walltime_options(default_walltime, target_walltime); + + Ok(BenchmarkCommand { + command, + name: target.name.clone(), + walltime_args, + }) + }) + .collect::>>()?; + + serde_json::to_string(&inputs).context("Failed to serialize targets to JSON") +} + +/// Merge default walltime options with target-specific overrides +fn merge_walltime_options( + default: Option<&WalltimeOptions>, + target: Option<&WalltimeOptions>, +) -> exec_harness::walltime::WalltimeExecutionArgs { + let default_args = default.map(walltime_options_to_args); + let target_args = target.map(walltime_options_to_args); + + match (default_args, target_args) { + (None, None) => exec_harness::walltime::WalltimeExecutionArgs::default(), + (Some(d), None) => d, + (None, Some(t)) => t, + (Some(d), Some(t)) => exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: t.warmup_time.or(d.warmup_time), + max_time: t.max_time.or(d.max_time), + min_time: t.min_time.or(d.min_time), + max_rounds: t.max_rounds.or(d.max_rounds), + min_rounds: t.min_rounds.or(d.min_rounds), + }, + } +} + +/// Convert project config WalltimeOptions to exec-harness WalltimeExecutionArgs +fn walltime_options_to_args( + opts: &WalltimeOptions, +) -> exec_harness::walltime::WalltimeExecutionArgs { + exec_harness::walltime::WalltimeExecutionArgs { + warmup_time: opts.warmup_time.clone(), + max_time: opts.max_time.clone(), + min_time: opts.min_time.clone(), + max_rounds: opts.max_rounds, + min_rounds: opts.min_rounds, + } +} + +/// Build a command that pipes targets JSON to exec-harness via stdin +pub fn build_pipe_command( + targets: &[Target], + default_walltime: Option<&WalltimeOptions>, +) -> Result> { + let json = targets_to_exec_harness_json(targets, default_walltime)?; + // Use a heredoc to safely pass the JSON to exec-harness + Ok(vec![ + EXEC_HARNESS_COMMAND.to_owned(), + "-".to_owned(), + "<<".to_owned(), + "'CODSPEED_EOF'\n".to_owned(), + json, + "\nCODSPEED_EOF".to_owned(), + ]) +} diff --git a/src/executor/config.rs b/src/executor/config.rs index 362a6c1f..e0673aa8 100644 --- a/src/executor/config.rs +++ b/src/executor/config.rs @@ -125,9 +125,9 @@ impl TryFrom for Config { } } -impl TryFrom for Config { - type Error = Error; - fn try_from(args: crate::exec::ExecArgs) -> Result { +impl Config { + /// Create a Config from ExecArgs with a custom command (used for targets mode) + pub fn try_from_with_command(args: crate::exec::ExecArgs, command: String) -> Result { let raw_upload_url = args .shared .upload_url @@ -135,8 +135,6 @@ impl TryFrom for Config { let upload_url = Url::parse(&raw_upload_url) .map_err(|e| anyhow!("Invalid upload URL: {raw_upload_url}, {e}"))?; - let wrapped_command = wrap_with_exec_harness(&args.walltime_args, &args.command); - Ok(Self { upload_url, token: args.shared.token, @@ -150,7 +148,7 @@ impl TryFrom for Config { instruments: Instruments { mongodb: None }, // exec doesn't support MongoDB perf_unwinding_mode: args.shared.perf_run_args.perf_unwinding_mode, enable_perf: args.shared.perf_run_args.enable_perf, - command: wrapped_command, + command, profile_folder: args.shared.profile_folder, skip_upload: args.shared.skip_upload, skip_run: args.shared.skip_run, @@ -160,6 +158,14 @@ impl TryFrom for Config { } } +impl TryFrom for Config { + type Error = Error; + fn try_from(args: crate::exec::ExecArgs) -> Result { + let wrapped_command = wrap_with_exec_harness(&args.walltime_args, &args.command); + Self::try_from_with_command(args, wrapped_command) + } +} + #[cfg(test)] mod tests { use crate::instruments::MongoDBConfig; diff --git a/src/executor/shared/fifo.rs b/src/executor/shared/fifo.rs index 60b5825b..b9e5ac04 100644 --- a/src/executor/shared/fifo.rs +++ b/src/executor/shared/fifo.rs @@ -156,7 +156,7 @@ impl RunnerFifo { } Err(_) => continue, }; - trace!("Received command: {cmd:?}"); + debug!("Received command: {cmd:?}"); match &cmd { FifoCommand::CurrentBenchmark { pid, uri } => { diff --git a/src/project_config/interfaces.rs b/src/project_config/interfaces.rs new file mode 100644 index 00000000..9fe720b9 --- /dev/null +++ b/src/project_config/interfaces.rs @@ -0,0 +1,60 @@ +use crate::runner_mode::RunnerMode; +use serde::{Deserialize, Serialize}; + +/// Project-level configuration from codspeed.yaml file +/// +/// This configuration provides default options for the run and exec commands. +/// CLI arguments always take precedence over config file values. +#[derive(Debug, Deserialize, Serialize, PartialEq)] +#[serde(rename_all = "kebab-case")] +pub struct ProjectConfig { + /// Default options to apply to all benchmark runs + pub options: Option, + /// List of benchmark targets to execute + pub targets: Option>, +} + +/// A benchmark target to execute +#[derive(Debug, Deserialize, Serialize, PartialEq)] +#[serde(rename_all = "kebab-case")] +pub struct Target { + /// Optional name for this target + pub name: Option, + /// Command to execute + pub exec: String, + /// Target-specific options + pub options: Option, +} + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct TargetOptions { + pub walltime: Option, +} + +/// Root-level options that apply to all benchmark runs unless overridden by CLI +#[derive(Debug, Deserialize, Serialize, PartialEq)] +#[serde(rename_all = "kebab-case")] +pub struct ProjectOptions { + /// Walltime execution configuration + pub walltime: Option, + /// Working directory where commands will be executed (relative to config file) + pub working_directory: Option, + /// Runner mode (walltime, memory, or simulation) + pub mode: Option, +} + +/// Walltime execution options matching WalltimeExecutionArgs structure +#[derive(Debug, Deserialize, Serialize, PartialEq)] +#[serde(rename_all = "kebab-case")] +pub struct WalltimeOptions { + /// Duration of warmup phase (e.g., "1s", "500ms") + pub warmup_time: Option, + /// Maximum total execution time + pub max_time: Option, + /// Minimum total execution time + pub min_time: Option, + /// Maximum number of rounds + pub max_rounds: Option, + /// Minimum number of rounds + pub min_rounds: Option, +} diff --git a/src/project_config/mod.rs b/src/project_config/mod.rs index b009c649..93978b50 100644 --- a/src/project_config/mod.rs +++ b/src/project_config/mod.rs @@ -1,49 +1,11 @@ use crate::prelude::*; -use crate::runner_mode::RunnerMode; -use serde::{Deserialize, Serialize}; use std::fs; use std::path::{Path, PathBuf}; +mod interfaces; pub mod merger; -/// Project-level configuration from codspeed.yaml file -/// -/// This configuration provides default options for the run and exec commands. -/// CLI arguments always take precedence over config file values. -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -pub struct ProjectConfig { - /// Default options to apply to all benchmark runs - pub options: Option, -} - -/// Root-level options that apply to all benchmark runs unless overridden by CLI -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -pub struct ProjectOptions { - /// Walltime execution configuration - pub walltime: Option, - /// Working directory where commands will be executed (relative to config file) - pub working_directory: Option, - /// Runner mode (walltime, memory, or simulation) - pub mode: Option, -} - -/// Walltime execution options matching WalltimeExecutionArgs structure -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] -#[serde(rename_all = "kebab-case")] -pub struct WalltimeOptions { - /// Duration of warmup phase (e.g., "1s", "500ms") - pub warmup_time: Option, - /// Maximum total execution time - pub max_time: Option, - /// Minimum total execution time - pub min_time: Option, - /// Maximum number of rounds - pub max_rounds: Option, - /// Minimum number of rounds - pub min_rounds: Option, -} +pub use interfaces::*; /// Config file names in priority order const CONFIG_FILENAMES: &[&str] = &[ @@ -211,6 +173,7 @@ impl ProjectConfig { #[cfg(test)] mod tests { use super::*; + use crate::runner_mode::RunnerMode; use tempfile::TempDir; #[test] @@ -277,6 +240,7 @@ options: working_directory: None, mode: None, }), + targets: None, }; let result = config.validate(); @@ -303,6 +267,7 @@ options: working_directory: None, mode: None, }), + targets: None, }; let result = config.validate(); @@ -329,6 +294,7 @@ options: working_directory: Some("./bench".to_string()), mode: Some(RunnerMode::Walltime), }), + targets: None, }; assert!(config.validate().is_ok()); diff --git a/src/run/mod.rs b/src/run/mod.rs index 2b65ea96..6997e788 100644 --- a/src/run/mod.rs +++ b/src/run/mod.rs @@ -194,6 +194,21 @@ impl RunArgs { } } +use crate::project_config::Target; +use crate::project_config::WalltimeOptions; +/// Determines the execution mode based on CLI args and project config +enum RunTarget<'a> { + /// Single command from CLI args + SingleCommand(RunArgs), + /// Multiple targets from project config + /// Note: for now, only `codspeed exec` targets are supported in the project config + ConfigTargets { + args: RunArgs, + targets: &'a [Target], + default_walltime: Option<&'a WalltimeOptions>, + }, +} + pub async fn run( args: RunArgs, api_client: &CodSpeedAPIClient, @@ -203,9 +218,44 @@ pub async fn run( ) -> Result<()> { let output_json = args.message_format == Some(MessageFormat::Json); - let merged_args = args.merge_with_project_config(project_config); + let args = args.merge_with_project_config(project_config); + + let run_target = if args.command.is_empty() { + // No command provided - check for targets in project config + let targets = project_config + .and_then(|c| c.targets.as_ref()) + .filter(|t| !t.is_empty()) + .ok_or_else(|| { + anyhow!("No command provided and no targets defined in codspeed.yaml") + })?; + + let default_walltime = project_config + .and_then(|c| c.options.as_ref()) + .and_then(|o| o.walltime.as_ref()); + + RunTarget::ConfigTargets { + args, + targets, + default_walltime, + } + } else { + RunTarget::SingleCommand(args) + }; + + let config = match run_target { + RunTarget::SingleCommand(args) => Config::try_from(args)?, + + RunTarget::ConfigTargets { + mut args, + targets, + default_walltime, + } => { + args.command = + crate::exec::multi_targets::build_pipe_command(targets, default_walltime)?; - let config = Config::try_from(merged_args)?; + Config::try_from(args)? + } + }; // Create execution context let mut execution_context = executor::ExecutionContext::try_from((config, codspeed_config))?;