From 1d09c8fb5ef0680da503e2c80e8aac15d77e019e Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Mon, 15 Sep 2025 15:44:28 -0500 Subject: [PATCH 1/5] artifacts: Add Artifact[Reader/Writer]::new(AsyncFile) There are cases where we get the files from places that are irrelevant to the shared artifacts code, so those callers need to be able to still place the AsyncFiles inside the wrappers. --- obo-core/src/artifacts.rs | 12 ++++++++++-- obs-gitlab-runner/src/handler.rs | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/obo-core/src/artifacts.rs b/obo-core/src/artifacts.rs index 325b45f..473d421 100644 --- a/obo-core/src/artifacts.rs +++ b/obo-core/src/artifacts.rs @@ -40,8 +40,12 @@ pub struct ArtifactWriter { } impl ArtifactWriter { + pub fn new(inner: AsyncFile) -> Self { + Self { inner } + } + #[instrument] - pub async fn new() -> Result { + pub async fn new_anon() -> Result { let file = tokio::task::spawn_blocking(tempfile::tempfile).await??; Ok(Self { inner: AsyncFile::from_std(file), @@ -112,6 +116,10 @@ pub struct ArtifactReader { } impl ArtifactReader { + pub fn new(inner: AsyncFile) -> Self { + Self { inner } + } + pub async fn from_async_file(file: &AsyncFile) -> Result { let inner = AsyncFile::options() .read(true) @@ -234,7 +242,7 @@ pub mod test_support { F: for<'a> SaveCallback<'a, Ret, Err> + Send, P: AsRef + Send, { - let mut writer = ArtifactWriter::new().await?; + let mut writer = ArtifactWriter::new_anon().await?; let ret = func(&mut writer).await?; self.artifacts .insert(path.as_ref().to_owned(), writer.into_reader().await?); diff --git a/obs-gitlab-runner/src/handler.rs b/obs-gitlab-runner/src/handler.rs index ae825c0..b307b91 100644 --- a/obs-gitlab-runner/src/handler.rs +++ b/obs-gitlab-runner/src/handler.rs @@ -153,7 +153,7 @@ impl ArtifactDirectory for GitLabArtifacts<'_> { F: for<'a> SaveCallback<'a, Ret, Err> + Send, P: AsRef + Send, { - let mut writer = ArtifactWriter::new().await?; + let mut writer = ArtifactWriter::new_anon().await?; let ret = func(&mut writer).await?; self.artifacts .insert(path.as_ref().to_owned(), writer.into_reader().await?); From cd7f7ce89d745602717ab735fe5f5b598b0b3c02 Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Wed, 6 Aug 2025 15:48:45 -0500 Subject: [PATCH 2/5] Add a standalone CLI This adds a new crate `obo-cli` that behaves much like the commands already available to the runner, but as a separate entity. As replacement for child pipeline generation, the `generate-monitor` command will create a JSON table containing the commands needed to run monitoring and download binaries for a given repo/arch combination. --- .github/workflows/ci.yml | 7 +- Cargo.lock | 38 +++ Cargo.toml | 1 + Dockerfile.obo-cli | 18 ++ Dockerfile => Dockerfile.obs-gitlab-runner | 2 +- obo-cli/Cargo.toml | 37 +++ obo-cli/src/lib.rs | 234 ++++++++++++++ obo-cli/src/main.rs | 120 ++++++++ obo-cli/tests/test_cli.rs | 335 +++++++++++++++++++++ 9 files changed, 789 insertions(+), 3 deletions(-) create mode 100644 Dockerfile.obo-cli rename Dockerfile => Dockerfile.obs-gitlab-runner (92%) create mode 100644 obo-cli/Cargo.toml create mode 100644 obo-cli/src/lib.rs create mode 100644 obo-cli/src/main.rs create mode 100644 obo-cli/tests/test_cli.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 195cc42..1091693 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,7 +8,6 @@ on: env: REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} jobs: check: @@ -87,6 +86,9 @@ jobs: name: Docker image build runs-on: ubuntu-latest needs: [check, fmt, test, clippy] + strategy: + matrix: + target: [obo-cli, obs-gitlab-runner] permissions: contents: read @@ -102,12 +104,13 @@ jobs: - id: meta uses: docker/metadata-action@v5 with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + images: ${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ matrix.target }} - name: Build and push Docker image uses: docker/build-push-action@v6 if: github.event_name != 'pull_request' with: context: . + file: Dockerfile.${{ matrix.target }} push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} diff --git a/Cargo.lock b/Cargo.lock index 8308f3e..638c411 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1493,6 +1493,33 @@ dependencies = [ "memchr", ] +[[package]] +name = "obo-cli" +version = "0.1.8" +dependencies = [ + "async-trait", + "camino", + "claims", + "clap", + "color-eyre", + "gitlab-runner-mock", + "obo-core", + "obo-test-support", + "obo-tests", + "open-build-service-api", + "open-build-service-mock", + "rstest", + "serde", + "serde_json", + "tempfile", + "tokio", + "tokio-stream", + "tracing", + "tracing-error", + "tracing-subscriber", + "url", +] + [[package]] name = "obo-core" version = "0.1.0" @@ -2582,6 +2609,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.16" diff --git a/Cargo.toml b/Cargo.toml index 176b3e2..f37861b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] resolver = "3" members = [ + "obo-cli", "obo-core", "obo-tests", "obo-test-support", diff --git a/Dockerfile.obo-cli b/Dockerfile.obo-cli new file mode 100644 index 0000000..dddc252 --- /dev/null +++ b/Dockerfile.obo-cli @@ -0,0 +1,18 @@ +FROM rust:1.88.0-slim-bookworm AS build +ARG DEBIAN_FRONTEND=noninteractive + +ADD . /app +WORKDIR /app +RUN apt-get update \ + && apt-get install -y pkg-config libssl-dev \ + && cargo build -p obo-cli --release + +FROM debian:bookworm-slim +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update \ + && apt-get install -y libssl3 ca-certificates \ + && rm -rf /var/lib/apt/lists/ +COPY --from=build /app/target/release/obo /usr/local/bin/ + +ENTRYPOINT ["/usr/local/bin/obo"] diff --git a/Dockerfile b/Dockerfile.obs-gitlab-runner similarity index 92% rename from Dockerfile rename to Dockerfile.obs-gitlab-runner index 392bc22..b5ca074 100644 --- a/Dockerfile +++ b/Dockerfile.obs-gitlab-runner @@ -5,7 +5,7 @@ ADD . /app WORKDIR /app RUN apt-get update \ && apt-get install -y pkg-config libssl-dev \ - && cargo build --release + && cargo build -p obs-gitlab-runner --release FROM debian:bookworm-slim ARG DEBIAN_FRONTEND=noninteractive diff --git a/obo-cli/Cargo.toml b/obo-cli/Cargo.toml new file mode 100644 index 0000000..827057d --- /dev/null +++ b/obo-cli/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "obo-cli" +description = "OBS Build Orchestrator — command-line frontend" +version = "0.1.8" +edition = "2024" +license = "MIT OR Apache-2.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[[bin]] +name = "obo" +path = "src/main.rs" + +[dependencies] +async-trait.workspace = true +camino.workspace = true +clap.workspace = true +color-eyre.workspace = true +obo-core = { path = "../obo-core" } +open-build-service-api.workspace = true +serde.workspace = true +serde_json.workspace = true +tempfile.workspace = true +tokio.workspace = true +tracing.workspace = true +tracing-error = "0.2" +tracing-subscriber = { version = "0.3", features = ["default", "json"] } +url = "2.5" + +[dev-dependencies] +claims.workspace = true +gitlab-runner-mock = "0.2.1" +obo-test-support = { path = "../obo-test-support" } +obo-tests = { path = "../obo-tests" } +open-build-service-mock.workspace = true +rstest.workspace = true +tokio-stream = { version = "0.1.17", features = ["io-util"] } diff --git a/obo-cli/src/lib.rs b/obo-cli/src/lib.rs new file mode 100644 index 0000000..57b800d --- /dev/null +++ b/obo-cli/src/lib.rs @@ -0,0 +1,234 @@ +use std::time::Duration; + +use async_trait::async_trait; +use camino::{Utf8Path, Utf8PathBuf}; +use clap::{Args, Subcommand}; +use color_eyre::eyre::{Context, Report, Result, bail, eyre}; +use obo_core::{ + actions::{ + Actions, DEFAULT_BUILD_INFO, DEFAULT_BUILD_LOG, DownloadBinariesAction, DputAction, + LOG_TAIL_2MB, MonitorAction, ObsBuildInfo, PruneAction, + }, + artifacts::{ArtifactDirectory, ArtifactReader, ArtifactWriter, MissingArtifact, SaveCallback}, + build_meta::RepoArch, + monitor::PackageMonitoringOptions, + outputln, +}; +use open_build_service_api as obs; +use serde::{Deserialize, Serialize}; +use tempfile::NamedTempFile; +use tokio::{ + fs::File as AsyncFile, + io::{AsyncBufReadExt, AsyncWriteExt, BufReader}, +}; + +pub const DEFAULT_MONITOR_TABLE: &str = "obs-monitor.json"; + +#[derive(Debug, Deserialize, Serialize)] +pub struct MonitorCommands { + pub monitor: String, + pub download_binaries: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct MonitorEntry { + #[serde(flatten)] + pub repo_arch: RepoArch, + pub commands: MonitorCommands, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct MonitorTable { + pub entries: Vec, +} + +#[derive(Args)] +pub struct GenerateMonitorAction { + #[clap(long, default_value_t = DEFAULT_BUILD_INFO.to_owned())] + build_info: String, + #[clap(long, default_value_t = DEFAULT_MONITOR_TABLE.to_owned())] + monitor_out: String, + #[clap(long, default_value_t = DEFAULT_BUILD_LOG.into())] + build_log_out: String, + #[clap(long = "download-build-results-to")] + build_results_dir: Option, +} + +#[derive(Subcommand)] +pub enum CliAction { + Dput(DputAction), + Monitor { + #[clap(flatten)] + args: MonitorAction, + + // These are needed by the integration tests. + #[clap(long, hide = true, env = "OBO_TEST_LOG_TAIL", default_value_t = LOG_TAIL_2MB)] + log_tail: u64, + #[clap(long, hide = true, env = "OBO_TEST_SLEEP_ON_BUILDING_MS")] + sleep_on_building_ms: Option, + #[clap(long, hide = true, env = "OBO_TEST_SLEEP_ON_OLD_STATUS_MS")] + sleep_on_old_status_ms: Option, + }, + GenerateMonitor(GenerateMonitorAction), + DownloadBinaries(DownloadBinariesAction), + Prune(PruneAction), +} + +#[derive(Default)] +pub struct LocalFsArtifacts(pub Utf8PathBuf); + +#[async_trait] +impl ArtifactDirectory for LocalFsArtifacts { + async fn open(&self, path: impl AsRef + Send) -> Result { + let path = self.0.join(path.as_ref()); + AsyncFile::open(&path) + .await + .map(ArtifactReader::new) + .map_err(|e| { + if e.kind() == std::io::ErrorKind::NotFound { + eyre!(MissingArtifact(path)) + } else { + eyre!(e) + } + }) + } + + async fn save_with(&mut self, path: P, func: F) -> Result + where + Report: From, + Ret: Send, + Err: Send, + F: for<'a> SaveCallback<'a, Ret, Err> + Send, + P: AsRef + Send, + { + let path = self.0.join(path.as_ref()); + let parent = path.parent().unwrap_or_else(|| Utf8Path::new(".")); + tokio::fs::create_dir_all(&parent) + .await + .wrap_err_with(|| format!("Failed to create parents of '{path}'"))?; + + let Some(basename) = path.file_name() else { + bail!("Invalid path: {path}"); + }; + let temp = NamedTempFile::with_prefix_in(basename, parent) + .wrap_err("Failed to create temporary file")?; + + let mut writer = ArtifactWriter::new(AsyncFile::from_std(temp.as_file().try_clone()?)); + let ret = func(&mut writer).await?; + + writer.flush().await?; + temp.persist(&path)?; + Ok(ret) + } +} + +pub struct Handler { + actions: Actions, + artifacts: LocalFsArtifacts, +} + +impl Handler { + pub fn new(client: obs::Client, artifacts_dir: Utf8PathBuf) -> Self { + Self { + actions: Actions { client }, + artifacts: LocalFsArtifacts(artifacts_dir), + } + } + + async fn generate_monitor(&mut self, args: GenerateMonitorAction) -> Result<()> { + let build_info_data = self.artifacts.read_string(&args.build_info).await?; + let build_info: ObsBuildInfo = serde_json::from_str(&build_info_data) + .wrap_err("Failed to parse provided build info file")?; + + let rev = build_info + .rev + .ok_or_else(|| eyre!("Build revision was not set"))?; + let srcmd5 = build_info + .srcmd5 + .ok_or_else(|| eyre!("Build srcmd5 was not set"))?; + + let mut table = MonitorTable { entries: vec![] }; + for enabled_repo in build_info.enabled_repos { + table.entries.push(MonitorEntry { + repo_arch: enabled_repo.repo_arch.clone(), + commands: MonitorCommands { + monitor: MonitorAction { + project: build_info.project.clone(), + package: build_info.package.clone(), + repository: enabled_repo.repo_arch.repo.clone(), + arch: enabled_repo.repo_arch.arch.clone(), + rev: rev.clone(), + srcmd5: srcmd5.clone(), + prev_endtime_for_commit: enabled_repo.prev_endtime_for_commit, + build_log_out: args.build_log_out.clone(), + } + .generate_command(), + download_binaries: args.build_results_dir.clone().map(|build_results_dir| { + DownloadBinariesAction { + project: build_info.project.clone(), + package: build_info.package.clone(), + repository: enabled_repo.repo_arch.repo, + arch: enabled_repo.repo_arch.arch, + build_results_dir, + } + .generate_command() + }), + }, + }); + } + + let data = serde_json::to_string(&table).wrap_err("Failed to serialize data")?; + + self.artifacts + .write(&args.monitor_out, data.as_bytes()) + .await?; + outputln!("Wrote monitor file '{}'.", args.monitor_out); + + Ok(()) + } + + pub async fn run(&mut self, action: CliAction) -> Result<()> { + match action { + CliAction::Dput(args) => self.actions.dput(args, &mut self.artifacts).await?, + CliAction::Monitor { + log_tail, + sleep_on_building_ms, + sleep_on_old_status_ms, + args, + } => { + let mut options = PackageMonitoringOptions::default(); + if let Some(value) = sleep_on_building_ms { + options.sleep_on_building = Duration::from_millis(value); + } + if let Some(value) = sleep_on_old_status_ms { + options.sleep_on_old_status = Duration::from_millis(value); + } + + self.actions + .monitor( + args, + options, + |file| async { + let mut lines = BufReader::new(file).lines(); + while let Some(line) = lines.next_line().await? { + eprintln!("{line}"); + } + Ok(()) + }, + log_tail, + &mut self.artifacts, + ) + .await? + } + CliAction::GenerateMonitor(args) => self.generate_monitor(args).await?, + CliAction::DownloadBinaries(args) => { + self.actions + .download_binaries(args, &mut self.artifacts) + .await? + } + CliAction::Prune(args) => self.actions.prune(args, &self.artifacts).await?, + } + + Ok(()) + } +} diff --git a/obo-cli/src/main.rs b/obo-cli/src/main.rs new file mode 100644 index 0000000..dbb0445 --- /dev/null +++ b/obo-cli/src/main.rs @@ -0,0 +1,120 @@ +use std::{fmt, str::FromStr}; + +use camino::Utf8PathBuf; +use clap::Parser; +use color_eyre::eyre::Result; +use obo_cli::{CliAction, Handler}; +use obo_core::logging::{ + get_event_message, is_output_field_in_metadata, is_output_field_set_in_event, +}; +use open_build_service_api as obs; +use tracing::{Event, Subscriber}; +use tracing_subscriber::{ + filter::Targets, + fmt::{FmtContext, FormatEvent, FormatFields, format}, + layer::{self, Filter}, + prelude::*, + registry::LookupSpan, +}; +use url::Url; + +#[derive(Debug, Clone)] +struct TargetsArg { + targets: Targets, + parsed_from: String, +} + +impl FromStr for TargetsArg { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + Targets::from_str(s).map(|targets| TargetsArg { + targets, + parsed_from: s.to_owned(), + }) + } +} + +impl Default for TargetsArg { + fn default() -> Self { + "".parse().unwrap() + } +} + +impl fmt::Display for TargetsArg { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.parsed_from) + } +} + +struct OutputFilter; + +impl Filter for OutputFilter { + fn enabled(&self, meta: &tracing::Metadata<'_>, _cx: &layer::Context<'_, S>) -> bool { + is_output_field_in_metadata(meta) + } + + fn event_enabled(&self, event: &Event<'_>, _cx: &layer::Context<'_, S>) -> bool { + is_output_field_set_in_event(event) + } +} + +struct OutputFormatter; + +impl FormatEvent for OutputFormatter +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, +{ + fn format_event( + &self, + _ctx: &FmtContext<'_, S, N>, + mut writer: format::Writer<'_>, + event: &Event<'_>, + ) -> fmt::Result { + let Some(message) = get_event_message(event) else { + return Ok(()); + }; + writeln!(writer, "{message}") + } +} + +#[derive(Parser)] +struct Args { + #[clap(long, env = "OBO_LOG", default_value_t = TargetsArg::default())] + log: TargetsArg, + + #[clap(long, env = "OBS_SERVER")] + obs_server: Url, + #[clap(long, env = "OBS_USER")] + obs_user: String, + #[clap(long, env = "OBS_PASSWORD")] + obs_password: String, + + #[clap(subcommand)] + action: CliAction, +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = Args::parse(); + + tracing_subscriber::registry() + .with(tracing_error::ErrorLayer::default()) + .with(tracing_subscriber::fmt::layer().with_filter(args.log.targets)) + .with( + tracing_subscriber::fmt::layer() + .event_format(OutputFormatter) + .with_filter(OutputFilter), + ) + .init(); + + color_eyre::install().unwrap(); + + let client = obs::Client::new(args.obs_server, args.obs_user, args.obs_password); + Handler::new(client, Utf8PathBuf::new()) + .run(args.action) + .await?; + + Ok(()) +} diff --git a/obo-cli/tests/test_cli.rs b/obo-cli/tests/test_cli.rs new file mode 100644 index 0000000..e9a9e0f --- /dev/null +++ b/obo-cli/tests/test_cli.rs @@ -0,0 +1,335 @@ +use std::{ + collections::HashMap, + path::Path, + process::{ExitStatus, Stdio}, + sync::Arc, + time::Duration, +}; + +use async_trait::async_trait; +use camino::Utf8Path; +use claims::*; +use obo_cli::{DEFAULT_MONITOR_TABLE, MonitorTable}; +use obo_core::actions::ObsBuildInfo; +use obo_test_support::*; +use obo_tests::*; +use rstest::rstest; +use tempfile::TempDir; +use tokio::{ + io::{AsyncBufReadExt, BufReader}, + process::Command, +}; +use tokio_stream::{StreamExt, wrappers::LinesStream}; + +#[derive(Debug, Clone)] +struct CliArtifactsHandle(Arc); + +impl ArtifactsHandle for CliArtifactsHandle {} + +#[derive(Clone, Debug)] +struct CliExecutionResult { + status: ExitStatus, + out: String, + artifacts: Arc, +} + +impl ExecutionResult for CliExecutionResult { + type Artifacts = CliArtifactsHandle; + + fn ok(&self) -> bool { + self.status.success() + } + + fn log(&self) -> String { + self.out.clone() + } + + fn artifacts(&self) -> Self::Artifacts { + CliArtifactsHandle(self.artifacts.clone()) + } +} + +struct CliRunBuilder { + obs_server: String, + script: Vec, + dependencies: Vec>, + timeout: Duration, +} + +#[async_trait] +impl RunBuilder<'_> for CliRunBuilder { + type ArtifactsHandle = CliArtifactsHandle; + type ExecutionResult = CliExecutionResult; + + fn script(mut self, script: &[String]) -> Self { + for line in script { + self.script + .push(format!("{} {line}", env!("CARGO_BIN_EXE_obo"))); + } + self + } + + fn artifacts(mut self, artifacts: Self::ArtifactsHandle) -> Self { + self.dependencies.push(artifacts.0); + self + } + + fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + // TODO: timeouts!! + async fn go(self) -> Self::ExecutionResult { + let temp = TempDir::new().unwrap(); + + // Symlink all the dependency artifacts into the cwd, and clean them up + // at the end. + let mut dep_files = vec![]; + for dep in &self.dependencies { + let mut reader = tokio::fs::read_dir(dep.path()).await.unwrap(); + while let Some(entry) = reader.next_entry().await.unwrap() { + let dest = temp.path().join(entry.file_name()); + tokio::fs::symlink(entry.path(), &dest).await.unwrap(); + dep_files.push(dest); + } + } + + let mut child = Command::new("sh") + .arg("-exc") + .arg(self.script.join("\n")) + .kill_on_drop(true) + .env("OBS_SERVER", self.obs_server) + .env("OBS_USER", TEST_USER) + .env("OBS_PASSWORD", TEST_PASS) + .env("OBO_TEST_LOG_TAIL", MONITOR_TEST_LOG_TAIL.to_string()) + .env("OBO_TEST_SLEEP_ON_BUILDING_MS", "0") + .env( + "OBO_TEST_SLEEP_ON_OLD_STATUS_MS", + MONITOR_TEST_OLD_STATUS_SLEEP_DURATION + .as_millis() + .to_string(), + ) + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(temp.path()) + .spawn() + .unwrap(); + + let (status, lines) = tokio::time::timeout(self.timeout, async move { + let stdout = BufReader::new(child.stdout.take().unwrap()); + let stderr = BufReader::new(child.stderr.take().unwrap()); + + let mut output = + LinesStream::new(stdout.lines()).merge(LinesStream::new(stderr.lines())); + let mut lines = vec![]; + + while let Some(line) = output.try_next().await.unwrap() { + // Forward the lines back to the output. + eprintln!("{line}"); + lines.push(line); + } + + let status = child.wait().await.unwrap(); + (status, lines) + }) + .await + .unwrap(); + + for file in dep_files { + tokio::fs::remove_file(&file).await.unwrap(); + } + + Self::ExecutionResult { + status, + out: lines.join("\n"), + artifacts: Arc::new(temp), + } + } +} + +struct CliTestContext { + obs: ObsContext, +} + +fn collect_artifacts_from_dir(out: &mut HashMap>, root: &Path, subdir: &Utf8Path) { + for entry in std::fs::read_dir(root.join(subdir.as_std_path())).unwrap() { + let entry = entry.unwrap(); + let name = entry.file_name().into_string().unwrap(); + let ft = entry.file_type().unwrap(); + if ft.is_dir() { + collect_artifacts_from_dir(out, root, &subdir.join(name)); + } else if ft.is_file() { + let contents = std::fs::read(entry.path()) + .unwrap_or_else(|err| panic!("Fetching {}: {err}", entry.path().display())); + out.insert(subdir.join(name).into_string(), contents); + } + } +} + +#[async_trait] +impl TestContext for CliTestContext { + type ArtifactsHandle = CliArtifactsHandle; + type ExecutionResult = CliExecutionResult; + type RunBuilder<'context> = CliRunBuilder; + + fn obs(&self) -> &ObsContext { + &self.obs + } + + async fn inject_artifacts( + &mut self, + artifacts: HashMap>, + ) -> Self::ArtifactsHandle { + let temp = TempDir::new().unwrap(); + + for (name, contents) in artifacts { + tokio::fs::write(temp.path().join(name), contents) + .await + .unwrap(); + } + + CliArtifactsHandle(Arc::new(temp)) + } + + async fn fetch_artifacts(&self, handle: &Self::ArtifactsHandle) -> HashMap> { + let mut ret = HashMap::new(); + collect_artifacts_from_dir(&mut ret, handle.0.path(), Utf8Path::new("")); + ret + } + + fn run(&mut self) -> Self::RunBuilder<'_> { + CliRunBuilder { + obs_server: self.obs.client.url().to_string(), + script: vec![], + dependencies: vec![], + timeout: EXECUTION_DEFAULT_TIMEOUT, + } + } +} + +async fn with_context(func: impl AsyncFnOnce(CliTestContext) -> T) -> T { + let obs_mock = create_default_mock().await; + let obs_client = create_default_client(&obs_mock); + + let ctx = CliTestContext { + obs: ObsContext { + client: obs_client, + mock: obs_mock, + }, + }; + + func(ctx).await +} + +async fn test_monitor_table( + context: &mut CliTestContext, + dput: CliArtifactsHandle, + build_info: &ObsBuildInfo, + success: bool, + dput_test: DputTest, + log_test: MonitorLogTest, + download_binaries: bool, +) { + let mut generate_command = "generate-monitor".to_owned(); + if download_binaries { + generate_command += + &format!(" --download-build-results-to {MONITOR_TEST_BUILD_RESULTS_DIR}"); + } + + let generate = context + .run() + .command(generate_command) + .artifacts(dput.clone()) + .go() + .await; + assert!(generate.ok()); + + let results = context.fetch_artifacts(&generate.artifacts()).await; + let table: MonitorTable = assert_ok!(serde_json::from_slice( + results.get(DEFAULT_MONITOR_TABLE).unwrap() + )); + + for enabled in &build_info.enabled_repos { + let entry = table + .entries + .iter() + .find(|m| m.repo_arch == enabled.repo_arch) + .unwrap(); + + let mut script = vec![entry.commands.monitor.clone()]; + if download_binaries { + script.push(entry.commands.download_binaries.clone().unwrap()); + } else { + assert_none!(&entry.commands.download_binaries); + } + + test_monitoring( + context, + dput.clone(), + build_info, + &enabled.repo_arch, + &script, + success, + dput_test, + log_test, + download_binaries, + ) + .await; + } +} + +async fn test_prune( + context: &mut CliTestContext, + dput: CliArtifactsHandle, + build_info: &ObsBuildInfo, +) { + test_prune_missing_build_info(context).await; + + let prune = context + .run() + .command("prune") + .artifacts(dput.clone()) + .go() + .await; + test_prune_deleted_package_1_if_branched(context, build_info, &prune).await; +} + +#[rstest] +#[tokio::test] +async fn test_cli_flow( + #[values( + DputTest::Basic, + DputTest::Rebuild, + DputTest::ReusePreviousBuild, + DputTest::Branch + )] + dput_test: DputTest, + #[values(true, false)] build_success: bool, + #[values( + MonitorLogTest::Long, + MonitorLogTest::Short, + MonitorLogTest::Unavailable + )] + log_test: MonitorLogTest, + #[values(true, false)] download_binaries: bool, +) { + with_context(async |mut context| { + let (dput, build_info) = test_dput(&mut context, dput_test).await; + + test_monitor_table( + &mut context, + dput.clone(), + &build_info, + build_success, + dput_test, + log_test, + download_binaries, + ) + .await; + + test_prune(&mut context, dput.clone(), &build_info).await; + }) + .await; +} From af889471d418b29bda4f6a6a5d240bc131f63e8d Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Tue, 7 Oct 2025 11:19:30 -0500 Subject: [PATCH 3/5] Fix outdated OBS docs link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cb35248..171b27b 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,7 @@ be cleaned up. ##### `--branch-to BRANCHED_PROJECT` Before starting an upload, -[branch](https://openbuildservice.org/help/manuals/obs-user-guide/art.obs.bg.html#sec.obsbg.uc.branchprj) +[branch](https://openbuildservice.org/help/manuals/obs-user-guide/art-obs-bg#sec-obsbg-uc-branchprj) the package to a new project, named with the value passed to the argument. Any uploads will now go to the branched project, and `generate-monitor` / `prune` will both used the branched project / package. This is particularly useful to run From 24baca06f45bc2e7db124377e1af4f4594ff2efd Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Tue, 7 Oct 2025 14:36:25 -0500 Subject: [PATCH 4/5] Split-up the README and update for obo-cli This splits the single readme into three, a top-level one with all the useful shared info, and much smaller project-specific ones with the relevant setup. --- README.md | 233 ++++++++++++------------------------ obo-cli/README.md | 20 ++++ obs-gitlab-runner/README.md | 150 +++++++++++++++++++++++ 3 files changed, 248 insertions(+), 155 deletions(-) create mode 100644 obo-cli/README.md create mode 100644 obs-gitlab-runner/README.md diff --git a/README.md b/README.md index 171b27b..4c2f621 100644 --- a/README.md +++ b/README.md @@ -1,72 +1,18 @@ -# OBS GitLab Runner +# OBS Build Orchestrator -This is a custom [GitLab Runner](https://docs.gitlab.com/runner/) implementation -exposing a custom command language for starting, monitoring, and cleaning up -builds on [OBS](https://build.opensuse.org/), specifically targeting Debian -packages. +This repo contains tools for starting, monitoring, and cleaning up builds on +[OBS](https://build.opensuse.org/), specifically targeting Debian packages. +It's usable in two forms: -## Usage - -### Sending Jobs - -In order to send commands in a job to the runner, set the job's `tags:` to -include the tag you used during [the deployment](#deployment), e.g.: - -```yaml -my-job: - tags: - - obs-runner # <-- set to run on runners tagged "obs-runner" - stage: some-stage - script: - # [...] -``` - -This will run all the commands inside `before_script`, `script`, and -`after_script` with the runner. - -### Supported Syntax - -A subset of shell syntax is supported for commands: - -- Commands are split at spaces, but parts can be quoted, just like in the shell: - ```bash - some-command this-is-argument-1 "this entire string is argument 2" - ``` -- Variable substitution is supported, as well as the `${VARIABLE:-DEFAULT}` - syntax to use the value `DEFAULT` if `$VARIABLE` is unset: - ```bash - some-command $a_variable ${a_variable_with_default:-this is used if unset} - ``` - The variables are sourced from the pipeline-wide and job-specific variables - set. - - A significant departure from shell argument parsing is that **variable - contents are auto-quoted, thus spaces inside a variable do not split - arguments**. For example, given `MYVAR=a b c`, this: - ```bash - some-command $MYVAR - ``` - is interpreted as: - ```bash - some-command 'a b c' - ``` - *not*: - ```bash - some-command a b c - ``` - There is no way to use a variable without auto-quoting its contents. - -#### Flags +- [obo-cli](obo-cli/README.md), a standalone CLI. +- [obs-gitlab-runner](obs-gitlab-runner/README.md), a custom [GitLab + Runner](https://docs.gitlab.com/runner/). -Any flag arguments shown below can also explicitly take a true/false value, e.g. -`--rebuild-if-unchanged`, `--rebuild-if-unchanged=true`, and -`--rebuild-if-unchanged=false`. This is primarily useful to conditionally set -the value for a flag; you can set `SOME_VARIABLE=true/false` in your GitLab -pipeline, then use that variable in a flag value as `--flag=$SOME_VARIABLE`. +## Usage ### Required Environment -In order to connect to OBS, three variables must be set (generally within the -"CI/CD" section of the settings): +In order to connect to OBS, three environment variables must be set: - `OBS_SERVER`: The URL of the OBS instance, e.g. `https://obs.somewhere.com/`. - `OBS_USER`: The username used to authenticate with OBS (any commits created @@ -75,6 +21,24 @@ In order to connect to OBS, three variables must be set (generally within the there are no places where this value should be logged, **for safety purposes, it is highly recommended to mark this variable as *Masked*.**. +For obs-gitlab-runner, these should generally be configured within the "CI/CD" +section of the repository / group settings. **For safety purposes, it is highly +recommended to mark the `OBS_PASSWORD` variable as *Masked*.**. (It should not +be logged anywhere regardless, but that will provide insulation against +mistakes.) + +For obo-cli, you can additionally use the `--obs-server`, `--obs-user`, and +`--obs-password` options, but care should be taken to avoid accidentally saving +the values into shell history or other tenuous locations. + +### Flag syntax + +Any flag arguments shown below can also explicitly take a true/false value, e.g. +`--rebuild-if-unchanged`, `--rebuild-if-unchanged=true`, and +`--rebuild-if-unchanged=false`. This is primarily useful to conditionally set +the value for a flag; you can set `SOME_VARIABLE=true/false` in your CI +pipeline, then use that variable in a flag value as `--flag=$SOME_VARIABLE`. + ### Commands #### `dput` @@ -124,7 +88,7 @@ Note that, if `--branch-to` was specified, this will, in practice, never be triggered: due to the way metadata files are handled, right after a branching operation, there will *always* be a change to upload. -#### `generate-monitor` +#### `generate-monitor` (*obs-gitlab-runner version*) ```bash generate-monitor RUNNER_TAG @@ -229,6 +193,56 @@ Changes the expiration of the build results & logs. Changes the filename each monitoring job will save the build log into. +#### `generate-monitor` (*obo-cli version*) + +```bash +generate-monitor + [--download-build-results-to BUILD_RESULTS_DIR] + [--build-info BUILD_INFO_FILE=build-info.json] + [--monitor-out MONITOR_OUT=obs-monitor.json] + [--build-log-out BUILD_LOG_FILE=build.log] +``` + +Generates a JSON file `MONITOR_OUT` structured as: + +```json5 +{ + "entries": [ + { + "repo": "REPO", + "arch": "ARCH", + "commands": { + "monitor": "monitor [...]", + "download-binaries": "download-binaries [...]", + } + }, + // ... + ] +} +``` + +`entries` contains a list of OBS repository + architecture combinations, along +with the subcommands to run to monitor a build and download the results (to be +used as `obo THE_SUBCOMMAND`). + +##### `--download-build-results-to BUILD_RESULTS_DIR` + +Fills in `entries[*].commands.download-binaries` with a command that will +download the build results from OBS to the given `BUILD_RESULTS_DIR`. If this +option is not given, `commands.download-binaries` will be `null`. + +##### `--build-info BUILD_INFO_FILE=build-info.json` + +Specifies the name of the build info file to read. In particular, if a different +build info filename was used with `dput` via +[`--build-info-out`](#--build-info-out), then `--build-info` should be used here +to specify the same filename. + +##### `--build-log-out BUILD_LOG_FILE=build.log` + +Changes the filename each subcommand in `entries[*].commands.monitor` will save +the build log into. + #### `prune` ```bash @@ -261,94 +275,3 @@ is written. Only run the prune if a previous command in the same job failed. This is primarily useful if `prune` is used inside of `after_script`, to only remove the branched project/package if e.g. the upload failed. - -## Deployment - -### Registering the Runner - -In order to use the runner, you must first register it with your GitLab -instance. This requires the use of a registration token, which can be obtained -via the following steps: - -- Enter the GitLab admin area. -- Navigate to Overview -> Runners. -- Click "Register an instance runner". -- Copy the registration token within. - -(Per-group/-project registration tokens can also be retrieved from the CI/CD -settings of the group or project.) - -With this token, you can now register the runner via the [GitLab -API](https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner). - -Example using curl: - -```bash -curl --request POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ - --form description='OBS runner' \ - --form run_untagged=false \ - --form tag_list=obs-runner \ - --form token="$REGISTRATION_TOKEN" -``` - -httpie: - -```bash -http --form POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ - description='OBS runner' \ - run_untagged=false \ - tag_list=obs-runner \ - token="$REGISTRATION_TOKEN" -``` - -**It is critical that you set `run_untagged=false`,** otherwise this runner -will be used for *all* jobs that don't explicitly set a tag, rather than just -the jobs explicitly targeting the runner. - -This API call will return a JSON object containing a `token` key, whose value -is a _runner token_ that is used by the runner to connect to GitLab. - -### Docker - -Docker images are built on every commit, available at -`ghcr.io/collabora/obs-gitlab-runner:main`. The entry point takes two arguments: - -- The GitLab server URL. -- The runner token acquired previously. - -Simple example usage via the Docker CLI: - -```bash -$ docker run --rm -it ghcr.io/collabora/obs-gitlab-runner:main \ - "$GITLAB_SERVER_URL" "$GITLAB_RUNNER_TOKEN" -``` - -In addition, you can instead opt to set the `GITLAB_URL` and `GITLAB_TOKEN` -environment variables: - -```bash -$ docker run --rm -it \ - -e GITLAB_URL="$GITLAB_SERVER_URL" \ - -e GITLAB_TOKEN="$GITLAB_RUNNER_TOKEN" \ - ghcr.io/collabora/obs-gitlab-runner:main -``` - -### Kubernetes - -A [Helm](https://helm.sh/) chart has been provided in the `chart/` directory, -installable via: - -```bash -$ helm install \ - --set-string gitlab.url="$GITLAB_SERVER_URL" \ - --set-string gitlab.token="$GITLAB_RUNNER_TOKEN" \ - obs-gitlab-runner chart -``` - -Upgrades can skip setting `gitlab.token` to re-use the previously set value: - -```bash -$ helm upgrade \ - --set-string gitlab.url="$GITLAB_SERVER_URL" \ - obs-gitlab-runner chart -``` diff --git a/obo-cli/README.md b/obo-cli/README.md new file mode 100644 index 0000000..ba22e53 --- /dev/null +++ b/obo-cli/README.md @@ -0,0 +1,20 @@ +# obo-cli + +This is a CLI for starting, monitoring, and cleaning up builds on +[OBS](https://build.opensuse.org/), specifically targeting Debian packages. + +## Usage + +For information on OBS authentication and the commands supported, see [the +project-wide README](../README.md). + +Docker images are built on every commit, available at +`ghcr.io/collabora/obo-cli:main`. The entry point directly takes the +subcommands, e.g: + +``` +docker run --rm -it -v $PWD:/work -w /work ghcr.io/collabora/obo-cli:main prune +``` + +will mount the current directory as `/work` and then run the `prune` command +from within. diff --git a/obs-gitlab-runner/README.md b/obs-gitlab-runner/README.md new file mode 100644 index 0000000..5881b3f --- /dev/null +++ b/obs-gitlab-runner/README.md @@ -0,0 +1,150 @@ +# obs-gitlab-runner + +This is a custom [GitLab Runner](https://docs.gitlab.com/runner/) implementation +providing a shell-like command language for starting, monitoring, and cleaning up +builds on [OBS](https://build.opensuse.org/), specifically targeting Debian +packages. + +## Usage + +For information on OBS authentication and the commands supported, see [the +project-wide README](../README.md). + +### Sending Jobs + +In order to send commands in a job to the runner, set the job's `tags:` to +include the tag you used during [the deployment](#deployment), e.g.: + +```yaml +my-job: + tags: + - obs-runner # <-- set to run on runners tagged "obs-runner" + stage: some-stage + script: + # [...] +``` + +This will run all the commands inside `before_script`, `script`, and +`after_script` with the runner. + +### Supported Syntax + +A subset of shell syntax is supported for commands: + +- Commands are split at spaces, but parts can be quoted, just like in the shell: + ```bash + some-command this-is-argument-1 "this entire string is argument 2" + ``` +- Variable substitution is supported, as well as the `${VARIABLE:-DEFAULT}` + syntax to use the value `DEFAULT` if `$VARIABLE` is unset: + ```bash + some-command $a_variable ${a_variable_with_default:-this is used if unset} + ``` + The variables are sourced from the pipeline-wide and job-specific variables + set. + - A significant departure from shell argument parsing is that **variable + contents are auto-quoted, thus spaces inside a variable do not split + arguments**. For example, given `MYVAR=a b c`, this: + ```bash + some-command $MYVAR + ``` + is interpreted as: + ```bash + some-command 'a b c' + ``` + *not*: + ```bash + some-command a b c + ``` + There is no way to use a variable without auto-quoting its contents. + +## Deployment + +### Registering the Runner + +In order to use the runner, you must first register it with your GitLab +instance. This requires the use of a registration token, which can be obtained +via the following steps: + +- Enter the GitLab admin area. +- Navigate to Overview -> Runners. +- Click "Register an instance runner". +- Copy the registration token within. + +(Per-group/-project registration tokens can also be retrieved from the CI/CD +settings of the group or project.) + +With this token, you can now register the runner via the [GitLab +API](https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner). + +Example using curl: + +```bash +curl --request POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ + --form description='OBS runner' \ + --form run_untagged=false \ + --form tag_list=obs-runner \ + --form token="$REGISTRATION_TOKEN" +``` + +httpie: + +```bash +http --form POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ + description='OBS runner' \ + run_untagged=false \ + tag_list=obs-runner \ + token="$REGISTRATION_TOKEN" +``` + +**It is critical that you set `run_untagged=false`,** otherwise this runner +will be used for *all* jobs that don't explicitly set a tag, rather than just +the jobs explicitly targeting the runner. + +This API call will return a JSON object containing a `token` key, whose value +is a _runner token_ that is used by the runner to connect to GitLab. + +### Docker + +Docker images are built on every commit, available at +`ghcr.io/collabora/obs-gitlab-runner:main`. The entry point takes two arguments: + +- The GitLab server URL. +- The runner token acquired previously. + +Simple example usage via the Docker CLI: + +```bash +$ docker run --rm -it ghcr.io/collabora/obs-gitlab-runner:main \ + "$GITLAB_SERVER_URL" "$GITLAB_RUNNER_TOKEN" +``` + +In addition, you can instead opt to set the `GITLAB_URL` and `GITLAB_TOKEN` +environment variables: + +```bash +$ docker run --rm -it \ + -e GITLAB_URL="$GITLAB_SERVER_URL" \ + -e GITLAB_TOKEN="$GITLAB_RUNNER_TOKEN" \ + ghcr.io/collabora/obs-gitlab-runner:main +``` + +### Kubernetes + +A [Helm](https://helm.sh/) chart has been provided in the `chart/` directory, +installable via: + +```bash +$ helm install \ + --set-string gitlab.url="$GITLAB_SERVER_URL" \ + --set-string gitlab.token="$GITLAB_RUNNER_TOKEN" \ + obs-gitlab-runner chart +``` + +Upgrades can skip setting `gitlab.token` to re-use the previously set value: + +```bash +$ helm upgrade \ + --set-string gitlab.url="$GITLAB_SERVER_URL" \ + obs-gitlab-runner chart +``` From 318b0bf234827960c516220894c672f142a33ea8 Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Fri, 13 Feb 2026 15:07:30 -0600 Subject: [PATCH 5/5] Silence some collapsible_if warnings from let-chains --- obo-core/src/build_meta.rs | 8 +++----- obo-core/src/prune.rs | 16 ++++++++-------- obs-gitlab-runner/src/handler.rs | 13 ++++++------- 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/obo-core/src/build_meta.rs b/obo-core/src/build_meta.rs index 41ebad7..ba7142b 100644 --- a/obo-core/src/build_meta.rs +++ b/obo-core/src/build_meta.rs @@ -125,10 +125,9 @@ impl BuildMeta { for cause in e.chain() { if let Some(obs::Error::ApiError(obs::ApiError { code, .. })) = cause.downcast_ref::() + && code == "unknown_package" { - if code == "unknown_package" { - return Ok(None); - } + return Ok(None); } } @@ -247,8 +246,7 @@ impl BuildMeta { prev_endtime_for_commit: jobhist .jobhist .iter() - .filter(|e| e.srcmd5 == srcmd5) - .next_back() + .rfind(|e| e.srcmd5 == srcmd5) .map(|e| e.endtime), }) .collect() diff --git a/obo-core/src/prune.rs b/obo-core/src/prune.rs index 9178328..52bf994 100644 --- a/obo-core/src/prune.rs +++ b/obo-core/src/prune.rs @@ -45,14 +45,14 @@ pub async fn prune_branch( .wrap_err("Failed to list package") )?; - if let Some(expected_rev) = expected_rev { - if dir.rev.as_deref() != Some(expected_rev) { - outputln!( - "Latest revision is {}, skipping prune", - dir.rev.as_deref().unwrap_or("[unknown]") - ); - return Ok(()); - } + if let Some(expected_rev) = expected_rev + && dir.rev.as_deref() != Some(expected_rev) + { + outputln!( + "Latest revision is {}, skipping prune", + dir.rev.as_deref().unwrap_or("[unknown]") + ); + return Ok(()); } retry_request!( diff --git a/obs-gitlab-runner/src/handler.rs b/obs-gitlab-runner/src/handler.rs index b307b91..a506e66 100644 --- a/obs-gitlab-runner/src/handler.rs +++ b/obs-gitlab-runner/src/handler.rs @@ -413,8 +413,8 @@ async fn check_for_artifact( let path = path.components().collect::(); // TODO: not spawn a sync environment for *every single artifact* - if let Some(mut artifact) = dep.download().await? { - if let Some(file) = tokio::task::spawn_blocking(move || { + if let Some(mut artifact) = dep.download().await? + && let Some(file) = tokio::task::spawn_blocking(move || { artifact .file(path.as_str()) .map(|mut file| { @@ -426,11 +426,10 @@ async fn check_for_artifact( .transpose() }) .await?? - { - return Ok(Some( - ArtifactReader::from_async_file(&AsyncFile::from_std(file)).await?, - )); - } + { + return Ok(Some( + ArtifactReader::from_async_file(&AsyncFile::from_std(file)).await?, + )); } Ok(None)