From c0aac9131ec0b29302973d81fad66c2656a02639 Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Thu, 17 Jul 2025 15:27:36 -0500 Subject: [PATCH 1/7] Refactor out the OBS interaction code into a new 'obo-core' crate This is largely just a rename, as well as the slightly-silly `obo-test-support` crate that exists to be depended on by `obo-core` *and* `obs-gitlab-runner`. --- Cargo.lock | 46 ++++++++++++-- Cargo.toml | 50 +++++---------- obo-core/Cargo.toml | 37 +++++++++++ {src => obo-core/src}/actions.rs | 52 ++++++++++++++++ {src => obo-core/src}/artifacts.rs | 0 {src => obo-core/src}/binaries.rs | 3 +- {src => obo-core/src}/build_meta.rs | 3 +- {src => obo-core/src}/dsc.rs | 0 obo-core/src/lib.rs | 10 +++ obo-core/src/logging.rs | 57 +++++++++++++++++ {src => obo-core/src}/monitor.rs | 3 +- {src => obo-core/src}/prune.rs | 3 +- {src => obo-core/src}/retry.rs | 3 +- {src => obo-core/src}/upload.rs | 3 +- obo-test-support/Cargo.toml | 9 +++ .../src/lib.rs | 0 obs-gitlab-runner/Cargo.toml | 42 +++++++++++++ .../chart}/.helmignore | 0 {chart => obs-gitlab-runner/chart}/Chart.yaml | 0 .../chart}/templates/_helpers.tpl | 0 .../chart}/templates/deployment.yaml | 0 .../chart}/templates/gitlab-token-secret.yaml | 0 .../chart}/templates/hpa.yaml | 0 .../chart}/values.yaml | 0 {src => obs-gitlab-runner/src}/handler.rs | 30 ++++----- {src => obs-gitlab-runner/src}/logging.rs | 51 +++------------ {src => obs-gitlab-runner/src}/main.rs | 12 ---- {src => obs-gitlab-runner/src}/pipeline.rs | 62 ++++++++----------- 28 files changed, 322 insertions(+), 154 deletions(-) create mode 100644 obo-core/Cargo.toml rename {src => obo-core/src}/actions.rs (89%) rename {src => obo-core/src}/artifacts.rs (100%) rename {src => obo-core/src}/binaries.rs (97%) rename {src => obo-core/src}/build_meta.rs (99%) rename {src => obo-core/src}/dsc.rs (100%) create mode 100644 obo-core/src/lib.rs create mode 100644 obo-core/src/logging.rs rename {src => obo-core/src}/monitor.rs (99%) rename {src => obo-core/src}/prune.rs (99%) rename {src => obo-core/src}/retry.rs (99%) rename {src => obo-core/src}/upload.rs (99%) create mode 100644 obo-test-support/Cargo.toml rename src/test_support.rs => obo-test-support/src/lib.rs (100%) create mode 100644 obs-gitlab-runner/Cargo.toml rename {chart => obs-gitlab-runner/chart}/.helmignore (100%) rename {chart => obs-gitlab-runner/chart}/Chart.yaml (100%) rename {chart => obs-gitlab-runner/chart}/templates/_helpers.tpl (100%) rename {chart => obs-gitlab-runner/chart}/templates/deployment.yaml (100%) rename {chart => obs-gitlab-runner/chart}/templates/gitlab-token-secret.yaml (100%) rename {chart => obs-gitlab-runner/chart}/templates/hpa.yaml (100%) rename {chart => obs-gitlab-runner/chart}/values.yaml (100%) rename {src => obs-gitlab-runner/src}/handler.rs (99%) rename {src => obs-gitlab-runner/src}/logging.rs (77%) rename {src => obs-gitlab-runner/src}/main.rs (96%) rename {src => obs-gitlab-runner/src}/pipeline.rs (68%) diff --git a/Cargo.lock b/Cargo.lock index 6971eb0..97810fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1494,8 +1494,8 @@ dependencies = [ ] [[package]] -name = "obs-gitlab-runner" -version = "0.1.8" +name = "obo-core" +version = "0.1.0" dependencies = [ "async-trait", "base16ct", @@ -1506,9 +1506,8 @@ dependencies = [ "color-eyre", "derivative", "futures-util", - "gitlab-runner", - "gitlab-runner-mock", "md-5", + "obo-test-support", "open-build-service-api", "open-build-service-mock", "reqwest", @@ -1517,12 +1516,49 @@ dependencies = [ "serde", "serde_yaml", "shell-words", + "tempfile", + "thiserror 2.0.16", + "tokio", + "tokio-retry2 0.6.0", + "tokio-util", + "tracing", + "wiremock", +] + +[[package]] +name = "obo-test-support" +version = "0.1.0" +dependencies = [ + "open-build-service-api", + "open-build-service-mock", +] + +[[package]] +name = "obs-gitlab-runner" +version = "0.1.8" +dependencies = [ + "async-trait", + "camino", + "claims", + "clap", + "color-eyre", + "derivative", + "futures-util", + "gitlab-runner", + "gitlab-runner-mock", + "obo-core", + "obo-test-support", + "open-build-service-api", + "open-build-service-mock", + "rstest", + "serde", + "serde_yaml", + "shell-words", "shellexpand", "strum", "tempfile", "thiserror 2.0.16", "tokio", - "tokio-retry2 0.6.0", "tokio-util", "tracing", "tracing-error", diff --git a/Cargo.toml b/Cargo.toml index 5a66c3e..6f8e8bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,50 +1,30 @@ -[package] -name = "obs-gitlab-runner" -version = "0.1.8" -edition = "2024" -license = "MIT OR Apache-2.0" +[workspace] +resolver = "3" +members = [ + "obo-core", + "obo-test-support", + "obs-gitlab-runner" +] -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] +[workspace.dependencies] async-trait = "0.1" -base16ct = { version = "0.2", features = ["std"] } -bytes = "1.10" camino = "1.1" +claims = "0.8" clap = { version = "4.5", features = ["default", "derive", "env"] } color-eyre = "0.6" derivative = "2.2" futures-util = "0.3" -md-5 = "0.10" -reqwest = "0.12" -rfc822-like = "0.2" +open-build-service-api = { git = "https://github.com/collabora/open-build-service-rs" } +# open-build-service-api = { path = "../open-build-service-rs/open-build-service-api" } +open-build-service-mock = { git = "https://github.com/collabora/open-build-service-rs" } +# open-build-service-mock = { path = "../open-build-service-rs/open-build-service-api" } +rstest = "0.26" serde = "1.0" serde_yaml = "0.9" -shellexpand = "3.1" shell-words = "1.1" -strum = { version = "0.27", features = ["derive"] } tempfile = "3.20" +thiserror = "2.0" tokio = { version = "1.45", features = ["full"] } -tokio-retry2 = { version = "0.6.0", features = ["jitter"] } tokio-util = { version = "0.7", features = ["full"] } tracing = "0.1" -tracing-error = "0.2" -tracing-subscriber = { version = "0.3", features = ["default", "json"] } -url = "2.5" - -gitlab-runner = "0.3.0-rc1" -# gitlab-runner = { path = "../gitlab-runner-rs/gitlab-runner" } -open-build-service-api = { git = "https://github.com/collabora/open-build-service-rs" } -thiserror = "2.0.12" -# open-build-service-api = { path = "../open-build-service-rs/open-build-service-api" } - -[dev-dependencies] -claims = "0.8" -rstest = "0.26" wiremock = "0.6" -zip = "5.1" - -gitlab-runner-mock = "0.2.1" -# gitlab-runner-mock = { path = "../gitlab-runner-rs/gitlab-runner-mock" } -open-build-service-mock = { git = "https://github.com/collabora/open-build-service-rs" } -# open-build-service-mock = { path = "../open-build-service-rs/open-build-service-mock" } diff --git a/obo-core/Cargo.toml b/obo-core/Cargo.toml new file mode 100644 index 0000000..146640a --- /dev/null +++ b/obo-core/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "obo-core" +description = "OBS Build Orchestrator — core" +version = "0.1.0" +edition = "2024" +license = "MIT OR Apache-2.0" + +[dependencies] +async-trait.workspace = true +base16ct = { version = "0.2", features = ["std"] } +bytes = "1.10" +camino.workspace = true +clap.workspace = true +color-eyre.workspace = true +derivative.workspace = true +futures-util.workspace = true +md-5 = "0.10" +obo-test-support = { path = "../obo-test-support" } +open-build-service-api.workspace = true +reqwest = "0.12" +rfc822-like = "0.2" +serde.workspace = true +serde_yaml.workspace = true +shell-words.workspace = true +tempfile.workspace = true +thiserror.workspace = true +tokio.workspace = true +tokio-retry2 = { version = "0.6.0", features = ["jitter"] } +tokio-util.workspace = true +tracing.workspace = true + +[dev-dependencies] +claims.workspace = true +rstest.workspace = true +open-build-service-mock.workspace = true +wiremock.workspace = true + diff --git a/src/actions.rs b/obo-core/src/actions.rs similarity index 89% rename from src/actions.rs rename to obo-core/src/actions.rs index 140f8e0..1dfee2c 100644 --- a/src/actions.rs +++ b/obo-core/src/actions.rs @@ -42,6 +42,27 @@ impl FlagSupportingExplicitValue for clap::Arg { } } +#[derive(Debug)] +struct CommandBuilder { + args: Vec, +} + +impl CommandBuilder { + fn new(name: String) -> Self { + Self { args: vec![name] } + } + + fn add(&mut self, arg: &str, value: &str) -> &mut Self { + self.args + .push(format!("--{arg}={}", shell_words::quote(value))); + self + } + + fn build(self) -> String { + self.args.join(" ") + } +} + #[derive(Parser, Debug)] pub struct DputAction { pub project: String, @@ -74,6 +95,24 @@ pub struct MonitorAction { pub build_log_out: String, } +impl MonitorAction { + pub fn generate_command(&self) -> String { + let mut builder = CommandBuilder::new("monitor".to_owned()); + builder + .add("project", &self.project) + .add("package", &self.package) + .add("rev", &self.rev) + .add("srcmd5", &self.srcmd5) + .add("repository", &self.repository) + .add("arch", &self.arch) + .add("build-log-out", &self.build_log_out); + if let Some(endtime) = &self.prev_endtime_for_commit { + builder.add("prev-endtime-for-commit", &endtime.to_string()); + } + builder.build() + } +} + #[derive(Parser, Debug)] pub struct DownloadBinariesAction { #[clap(long)] @@ -88,6 +127,19 @@ pub struct DownloadBinariesAction { pub build_results_dir: Utf8PathBuf, } +impl DownloadBinariesAction { + pub fn generate_command(&self) -> String { + let mut builder = CommandBuilder::new("download-binaries".to_owned()); + builder + .add("project", &self.project) + .add("package", &self.package) + .add("repository", &self.repository) + .add("arch", &self.arch) + .add("build-results-dir", self.build_results_dir.as_str()); + builder.build() + } +} + #[derive(Parser, Debug)] pub struct PruneAction { #[clap(long, default_value_t = DEFAULT_BUILD_INFO.to_owned())] diff --git a/src/artifacts.rs b/obo-core/src/artifacts.rs similarity index 100% rename from src/artifacts.rs rename to obo-core/src/artifacts.rs diff --git a/src/binaries.rs b/obo-core/src/binaries.rs similarity index 97% rename from src/binaries.rs rename to obo-core/src/binaries.rs index a010537..86b0cb5 100644 --- a/src/binaries.rs +++ b/obo-core/src/binaries.rs @@ -80,9 +80,10 @@ mod tests { use std::time::SystemTime; use claims::*; + use obo_test_support::*; use open_build_service_mock::*; - use crate::{artifacts::test_support::MockArtifactDirectory, test_support::*}; + use crate::artifacts::test_support::MockArtifactDirectory; use super::*; diff --git a/src/build_meta.rs b/obo-core/src/build_meta.rs similarity index 99% rename from src/build_meta.rs rename to obo-core/src/build_meta.rs index 3588110..c2330b6 100644 --- a/src/build_meta.rs +++ b/obo-core/src/build_meta.rs @@ -265,11 +265,10 @@ mod tests { use std::time::{Duration, SystemTime}; use claims::*; + use obo_test_support::*; use open_build_service_mock::*; use rstest::rstest; - use crate::test_support::*; - use super::*; #[tokio::test] diff --git a/src/dsc.rs b/obo-core/src/dsc.rs similarity index 100% rename from src/dsc.rs rename to obo-core/src/dsc.rs diff --git a/obo-core/src/lib.rs b/obo-core/src/lib.rs new file mode 100644 index 0000000..9e5a517 --- /dev/null +++ b/obo-core/src/lib.rs @@ -0,0 +1,10 @@ +pub mod actions; +pub mod artifacts; +pub mod binaries; +pub mod build_meta; +pub mod dsc; +pub mod logging; +pub mod monitor; +pub mod prune; +pub mod retry; +pub mod upload; diff --git a/obo-core/src/logging.rs b/obo-core/src/logging.rs new file mode 100644 index 0000000..ae94cfc --- /dev/null +++ b/obo-core/src/logging.rs @@ -0,0 +1,57 @@ +use tracing::{ + Event, Metadata, + field::{self, Field}, +}; + +pub const TRACING_FIELD: &str = "obo_core.output"; + +#[macro_export] +macro_rules! outputln { + ($($args:tt)*) => { + ::tracing::trace!(obo_core.output = true, $($args)*) + }; +} + +struct OutputTester(bool); + +impl field::Visit for OutputTester { + fn record_bool(&mut self, field: &Field, value: bool) { + if field.name() == TRACING_FIELD { + self.0 = value; + } + } + + fn record_debug(&mut self, _field: &Field, _value: &dyn std::fmt::Debug) {} +} + +pub fn is_output_field_set_in_event(event: &Event<'_>) -> bool { + let mut visitor = OutputTester(false); + event.record(&mut visitor); + visitor.0 +} + +pub fn is_output_field_in_metadata(metadata: &Metadata<'_>) -> bool { + metadata.fields().iter().any(|f| f.name() == TRACING_FIELD) +} + +struct MessageExtractor(Option); + +impl field::Visit for MessageExtractor { + fn record_str(&mut self, field: &Field, value: &str) { + if field.name() == "message" { + self.0 = Some(value.to_owned()); + } + } + + fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { + if field.name() == "message" { + self.0 = Some(format!("{value:?}")); + } + } +} + +pub fn get_event_message(event: &Event) -> Option { + let mut visitor = MessageExtractor(None); + event.record(&mut visitor); + visitor.0 +} diff --git a/src/monitor.rs b/obo-core/src/monitor.rs similarity index 99% rename from src/monitor.rs rename to obo-core/src/monitor.rs index 95c6c7c..ecf8b81 100644 --- a/src/monitor.rs +++ b/obo-core/src/monitor.rs @@ -287,10 +287,11 @@ mod tests { use std::{collections::HashMap, time::SystemTime}; use claims::*; + use obo_test_support::*; use obs::PackageCode; use open_build_service_mock::*; - use crate::{artifacts::test_support::MockArtifactDirectory, test_support::*}; + use crate::artifacts::test_support::MockArtifactDirectory; use super::*; diff --git a/src/prune.rs b/obo-core/src/prune.rs similarity index 99% rename from src/prune.rs rename to obo-core/src/prune.rs index 088f871..9178328 100644 --- a/src/prune.rs +++ b/obo-core/src/prune.rs @@ -95,10 +95,9 @@ mod tests { use std::time::SystemTime; use claims::*; + use obo_test_support::*; use open_build_service_mock::*; - use crate::test_support::*; - use super::*; #[tokio::test] diff --git a/src/retry.rs b/obo-core/src/retry.rs similarity index 99% rename from src/retry.rs rename to obo-core/src/retry.rs index 857a6d8..6b90f39 100644 --- a/src/retry.rs +++ b/obo-core/src/retry.rs @@ -79,6 +79,7 @@ macro_rules! retry_request { mod tests { use claims::*; use futures_util::Future; + use obo_test_support::*; use open_build_service_api as obs; use rstest::*; use wiremock::{ @@ -86,8 +87,6 @@ mod tests { matchers::{method, path_regex}, }; - use crate::test_support::*; - use super::*; #[fixture] diff --git a/src/upload.rs b/obo-core/src/upload.rs similarity index 99% rename from src/upload.rs rename to obo-core/src/upload.rs index 844698b..a2dbd17 100644 --- a/src/upload.rs +++ b/obo-core/src/upload.rs @@ -328,9 +328,10 @@ mod tests { use std::time::SystemTime; use claims::*; + use obo_test_support::*; use open_build_service_mock::*; - use crate::{artifacts::test_support::MockArtifactDirectory, test_support::*}; + use crate::artifacts::test_support::MockArtifactDirectory; use super::*; diff --git a/obo-test-support/Cargo.toml b/obo-test-support/Cargo.toml new file mode 100644 index 0000000..e6c281c --- /dev/null +++ b/obo-test-support/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "obo-test-support" +version = "0.1.0" +edition = "2024" +license = "MIT OR Apache-2.0" + +[dependencies] +open-build-service-api.workspace = true +open-build-service-mock.workspace = true diff --git a/src/test_support.rs b/obo-test-support/src/lib.rs similarity index 100% rename from src/test_support.rs rename to obo-test-support/src/lib.rs diff --git a/obs-gitlab-runner/Cargo.toml b/obs-gitlab-runner/Cargo.toml new file mode 100644 index 0000000..4e0c2cb --- /dev/null +++ b/obs-gitlab-runner/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "obs-gitlab-runner" +version = "0.1.8" +edition = "2024" +license = "MIT OR Apache-2.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait.workspace = true +camino.workspace = true +clap.workspace = true +color-eyre.workspace = true +derivative.workspace = true +futures-util.workspace = true +gitlab-runner = "0.3.0-rc1" +# gitlab-runner = { path = "../gitlab-runner-rs/gitlab-runner" } +obo-core = { path = "../obo-core" } +obo-test-support = { path = "../obo-test-support" } +open-build-service-api.workspace = true +serde.workspace = true +serde_yaml.workspace = true +shellexpand = "3.1" +shell-words.workspace = true +strum = { version = "0.27", features = ["derive"] } +tempfile.workspace = true +thiserror.workspace = true +tokio.workspace = true +tokio-util.workspace = true +tracing.workspace = true +tracing-error = "0.2" +tracing-subscriber = { version = "0.3", features = ["default", "json"] } +url = "2.5" + +[dev-dependencies] +claims.workspace = true +gitlab-runner-mock = "0.2.1" +# gitlab-runner-mock = { path = "../gitlab-runner-rs/gitlab-runner-mock" } +open-build-service-mock.workspace = true +rstest.workspace = true +wiremock.workspace = true +zip = "5.1" diff --git a/chart/.helmignore b/obs-gitlab-runner/chart/.helmignore similarity index 100% rename from chart/.helmignore rename to obs-gitlab-runner/chart/.helmignore diff --git a/chart/Chart.yaml b/obs-gitlab-runner/chart/Chart.yaml similarity index 100% rename from chart/Chart.yaml rename to obs-gitlab-runner/chart/Chart.yaml diff --git a/chart/templates/_helpers.tpl b/obs-gitlab-runner/chart/templates/_helpers.tpl similarity index 100% rename from chart/templates/_helpers.tpl rename to obs-gitlab-runner/chart/templates/_helpers.tpl diff --git a/chart/templates/deployment.yaml b/obs-gitlab-runner/chart/templates/deployment.yaml similarity index 100% rename from chart/templates/deployment.yaml rename to obs-gitlab-runner/chart/templates/deployment.yaml diff --git a/chart/templates/gitlab-token-secret.yaml b/obs-gitlab-runner/chart/templates/gitlab-token-secret.yaml similarity index 100% rename from chart/templates/gitlab-token-secret.yaml rename to obs-gitlab-runner/chart/templates/gitlab-token-secret.yaml diff --git a/chart/templates/hpa.yaml b/obs-gitlab-runner/chart/templates/hpa.yaml similarity index 100% rename from chart/templates/hpa.yaml rename to obs-gitlab-runner/chart/templates/hpa.yaml diff --git a/chart/values.yaml b/obs-gitlab-runner/chart/values.yaml similarity index 100% rename from chart/values.yaml rename to obs-gitlab-runner/chart/values.yaml diff --git a/src/handler.rs b/obs-gitlab-runner/src/handler.rs similarity index 99% rename from src/handler.rs rename to obs-gitlab-runner/src/handler.rs index 6b761e6..8b7751e 100644 --- a/src/handler.rs +++ b/obs-gitlab-runner/src/handler.rs @@ -14,15 +14,7 @@ use gitlab_runner::{ JobHandler, JobResult, Phase, UploadableFile, job::{Dependency, Job, Variable}, }; -use open_build_service_api as obs; -use tokio::fs::File as AsyncFile; -use tokio_util::{ - compat::{Compat, TokioAsyncReadCompatExt}, - io::ReaderStream, -}; -use tracing::{error, instrument, warn}; - -use crate::{ +use obo_core::{ actions::{ Actions, DEFAULT_BUILD_INFO, DEFAULT_BUILD_LOG, DownloadBinariesAction, DputAction, FailedBuild, FlagSupportingExplicitValue, LOG_TAIL_2MB, MonitorAction, ObsBuildInfo, @@ -31,7 +23,17 @@ use crate::{ artifacts::{ArtifactDirectory, ArtifactReader, ArtifactWriter, MissingArtifact, SaveCallback}, monitor::PackageMonitoringOptions, outputln, - pipeline::{GeneratePipelineOptions, PipelineDownloadBinaries, generate_monitor_pipeline}, +}; +use open_build_service_api as obs; +use tokio::fs::File as AsyncFile; +use tokio_util::{ + compat::{Compat, TokioAsyncReadCompatExt}, + io::ReaderStream, +}; +use tracing::{error, instrument, warn}; + +use crate::pipeline::{ + GeneratePipelineOptions, PipelineDownloadBinaries, generate_monitor_pipeline, }; const DEFAULT_MONITOR_PIPELINE: &str = "obs.yml"; @@ -447,6 +449,8 @@ mod tests { use claims::*; use gitlab_runner::{GitlabLayer, Runner, RunnerBuilder}; use gitlab_runner_mock::*; + use obo_core::{build_meta::RepoArch, upload::compute_md5}; + use obo_test_support::*; use open_build_service_mock::*; use rstest::rstest; use tempfile::TempDir; @@ -454,9 +458,7 @@ mod tests { use tracing_subscriber::{Layer, Registry, filter::Targets, prelude::*}; use zip::ZipArchive; - use crate::{ - build_meta::RepoArch, logging::GitLabForwarder, test_support::*, upload::compute_md5, - }; + use crate::logging::GitLabForwarder; use super::*; @@ -1567,7 +1569,7 @@ mod tests { )] test: Option, ) { - use crate::build_meta::CommitBuildInfo; + use obo_core::build_meta::CommitBuildInfo; const TEST_MONITOR_TIMEOUT: &str = "10 minutes"; diff --git a/src/logging.rs b/obs-gitlab-runner/src/logging.rs similarity index 77% rename from src/logging.rs rename to obs-gitlab-runner/src/logging.rs index 138c9ab..41e11b9 100644 --- a/src/logging.rs +++ b/obs-gitlab-runner/src/logging.rs @@ -1,7 +1,8 @@ use gitlab_runner::GitlabLayer; +use obo_core::logging::{get_event_message, is_output_field_set_in_event}; use tracing::{ Event, Level, Metadata, Subscriber, - field::{self, Field, FieldSet}, + field::FieldSet, span::{Attributes, Id}, subscriber::Interest, }; @@ -12,37 +13,9 @@ use tracing_subscriber::{ registry::LookupSpan, }; -struct OutputTester(bool); - -impl field::Visit for OutputTester { - fn record_bool(&mut self, field: &field::Field, value: bool) { - if field.name() == "obs_gitlab_runner.output" { - self.0 = value - } - } - - fn record_debug(&mut self, _field: &field::Field, _value: &dyn std::fmt::Debug) {} -} - -struct MessageExtractor(Option); - -impl field::Visit for MessageExtractor { - fn record_str(&mut self, field: &Field, value: &str) { - if field.name() == "message" { - self.0 = Some(value.to_owned()); - } - } - - fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { - if field.name() == "message" { - self.0 = Some(format!("{value:?}")); - } - } -} - // This mostly wraps a standard GitlabLayer, but it bypasses the filter to pass -// through any events with `obs_gitlab_runner.output` set, rewriting them to -// instead use `gitlab.output`. +// through any events with TRACING_FIELD set set, rewriting them to instead use +// `gitlab.output`. pub struct GitLabForwarder>(Filtered); impl LookupSpan<'span>, F: Filter + 'static> @@ -50,6 +23,7 @@ impl LookupSpan<'span>, F: Fi { pub fn new(inner: Filtered) -> Filtered { GitLabForwarder(inner).with_filter(Targets::new().with_targets([ + ("obo_core", Level::TRACE), ("obs_gitlab_runner", Level::TRACE), // This target is used to inject the current job ID, which // gitlab-runner needs to actually send the logs out. @@ -99,17 +73,13 @@ impl LookupSpan<'span>, F: Fi } fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { - let mut visitor = OutputTester(false); - event.record(&mut visitor); - if !visitor.0 { + if !is_output_field_set_in_event(event) { // No special behavior needed, so just forward it as-is. self.0.on_event(event, ctx); return; } - let mut visitor = MessageExtractor(None); - event.record(&mut visitor); - let Some(message) = visitor.0 else { + let Some(message) = get_event_message(event) else { return; }; @@ -157,10 +127,3 @@ impl LookupSpan<'span>, F: Fi self.0.on_id_change(old, new, ctx); } } - -#[macro_export] -macro_rules! outputln { - ($($args:tt)*) => { - ::tracing::trace!(obs_gitlab_runner.output = true, $($args)*) - }; -} diff --git a/src/main.rs b/obs-gitlab-runner/src/main.rs similarity index 96% rename from src/main.rs rename to obs-gitlab-runner/src/main.rs index 5e03b55..60a79db 100644 --- a/src/main.rs +++ b/obs-gitlab-runner/src/main.rs @@ -18,21 +18,9 @@ use url::Url; use crate::handler::{HandlerOptions, ObsJobHandler}; -mod actions; -mod artifacts; -mod binaries; -mod build_meta; -mod dsc; mod handler; mod logging; -mod monitor; mod pipeline; -mod prune; -mod retry; -mod upload; - -#[cfg(test)] -mod test_support; #[derive(Debug, Clone)] struct TargetsArg { diff --git a/src/pipeline.rs b/obs-gitlab-runner/src/pipeline.rs similarity index 68% rename from src/pipeline.rs rename to obs-gitlab-runner/src/pipeline.rs index 6936603..375bb19 100644 --- a/src/pipeline.rs +++ b/obs-gitlab-runner/src/pipeline.rs @@ -1,11 +1,13 @@ use std::collections::HashMap; use color_eyre::eyre::{Context, Result}; +use obo_core::{ + actions::{DownloadBinariesAction, MonitorAction}, + build_meta::{CommitBuildInfo, RepoArch}, +}; use serde::Serialize; use tracing::instrument; -use crate::build_meta::{CommitBuildInfo, RepoArch}; - #[derive(Clone, Debug, PartialEq, Eq)] pub enum PipelineDownloadBinaries { OnSuccess { build_results_dir: String }, @@ -43,16 +45,6 @@ struct JobSpec { rules: Option, } -fn generate_command(command_name: String, args: &[(&str, String)]) -> String { - let mut command = vec![command_name]; - - for (arg, value) in args { - command.extend_from_slice(&[format!("--{arg}"), shell_words::quote(value).into_owned()]); - } - - command.join(" ") -} - #[instrument] pub fn generate_monitor_pipeline( project: &str, @@ -74,34 +66,34 @@ pub fn generate_monitor_pipeline( let mut script = vec![]; let mut artifact_paths = vec![]; - let common_args = vec![ - ("project", project.to_owned()), - ("package", package.to_owned()), - ("repository", repo.to_owned()), - ("arch", arch.to_owned()), - ]; - - let mut monitor_args = vec![ - ("rev", rev.to_owned()), - ("srcmd5", srcmd5.to_owned()), - ("build-log-out", options.build_log_out.clone()), - ]; - if let Some(endtime) = &info.prev_endtime_for_commit { - monitor_args.push(("prev-endtime-for-commit", endtime.to_string())); - } - monitor_args.extend_from_slice(&common_args); - script.push(generate_command("monitor".to_owned(), &monitor_args)); + script.push( + MonitorAction { + project: project.to_owned(), + package: package.to_owned(), + repository: repo.to_owned(), + arch: arch.to_owned(), + rev: rev.to_owned(), + srcmd5: srcmd5.to_owned(), + build_log_out: options.build_log_out.clone(), + prev_endtime_for_commit: info.prev_endtime_for_commit, + } + .generate_command(), + ); artifact_paths.push(options.build_log_out.clone()); if let PipelineDownloadBinaries::OnSuccess { build_results_dir } = &options.download_binaries { - let mut download_args = vec![("build-results-dir", build_results_dir.clone())]; - download_args.extend_from_slice(&common_args); - script.push(generate_command( - "download-binaries".to_owned(), - &download_args, - )); + script.push( + DownloadBinariesAction { + project: project.to_owned(), + package: package.to_owned(), + repository: repo.to_owned(), + arch: arch.to_owned(), + build_results_dir: build_results_dir.into(), + } + .generate_command(), + ); artifact_paths.push(build_results_dir.clone()); } From 9d78d16c6bb0c8f5f404e20b8dfd84bfe54d297f Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Fri, 25 Jul 2025 17:13:38 -0500 Subject: [PATCH 2/7] Refactor integration test core into a new 'obo-tests' crate Most of this logic is rather intricate and shareable between multiple implementations, so it makes sense for them to be in a separate crate. This necessitates quite a bit of refactoring of the test structure so that the shared test code can actually run commands and inspect the logs + artifacts. Since there are various somewhat strange requirements around command execution (i.e. needing to use timeouts but still get the result), that API was redone around a builder trait, which also makes it easy for implementation-specific tests to have their own methods on it. --- Cargo.lock | 16 + Cargo.toml | 1 + obo-tests/Cargo.toml | 17 + obo-tests/src/lib.rs | 598 +++++++++++++++ obs-gitlab-runner/Cargo.toml | 1 + obs-gitlab-runner/src/handler.rs | 1229 ++++++++++-------------------- 6 files changed, 1018 insertions(+), 844 deletions(-) create mode 100644 obo-tests/Cargo.toml create mode 100644 obo-tests/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 97810fc..04f3085 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1533,6 +1533,21 @@ dependencies = [ "open-build-service-mock", ] +[[package]] +name = "obo-tests" +version = "0.1.0" +dependencies = [ + "async-trait", + "camino", + "claims", + "obo-core", + "obo-test-support", + "open-build-service-api", + "open-build-service-mock", + "serde_yaml", + "tokio", +] + [[package]] name = "obs-gitlab-runner" version = "0.1.8" @@ -1548,6 +1563,7 @@ dependencies = [ "gitlab-runner-mock", "obo-core", "obo-test-support", + "obo-tests", "open-build-service-api", "open-build-service-mock", "rstest", diff --git a/Cargo.toml b/Cargo.toml index 6f8e8bd..7116534 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,7 @@ resolver = "3" members = [ "obo-core", + "obo-tests", "obo-test-support", "obs-gitlab-runner" ] diff --git a/obo-tests/Cargo.toml b/obo-tests/Cargo.toml new file mode 100644 index 0000000..4b938f7 --- /dev/null +++ b/obo-tests/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "obo-tests" +description = "OBS Build Orchestrator — shared tests for different frontends" +version = "0.1.0" +edition = "2024" +license = "MIT OR Apache-2.0" + +[dependencies] +async-trait.workspace = true +camino.workspace = true +claims.workspace = true +obo-core = { path = "../obo-core" } +obo-test-support = { path = "../obo-test-support" } +open-build-service-api.workspace = true +open-build-service-mock.workspace = true +serde_yaml.workspace = true +tokio.workspace = true diff --git a/obo-tests/src/lib.rs b/obo-tests/src/lib.rs new file mode 100644 index 0000000..0dfe12c --- /dev/null +++ b/obo-tests/src/lib.rs @@ -0,0 +1,598 @@ +use std::{ + cmp::Ordering, + collections::HashMap, + fmt, + time::{Duration, SystemTime}, +}; + +use async_trait::async_trait; +use camino::Utf8Path; +use claims::*; +use obo_core::{ + actions::{DEFAULT_BUILD_INFO, DEFAULT_BUILD_LOG, ObsBuildInfo}, + build_meta::RepoArch, + upload::compute_md5, +}; +use obo_test_support::*; +use open_build_service_api as obs; +use open_build_service_mock::*; + +#[derive(Clone)] +pub struct ObsContext { + pub client: obs::Client, + pub mock: ObsMock, +} + +// A handle to some set of artifacts, either artificially "injected" (i.e. +// custom artifacts manually set up to be read by some command) or the outputs +// of executing a command. +pub trait ArtifactsHandle: Send + Sync + Clone + fmt::Debug {} + +// The result of a command, letting you easily deterimine if it succeeded and +// access the output logs / artifacts. +pub trait ExecutionResult: Send + Sync + Clone + fmt::Debug { + type Artifacts: ArtifactsHandle; + + fn ok(&self) -> bool; + fn log(&self) -> String; + fn artifacts(&self) -> Self::Artifacts; +} + +pub const EXECUTION_DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); + +// Builds a set of commands to run, along with the ability to determine its +// input artifacts and timeout. +#[async_trait] +pub trait RunBuilder<'context>: Send + Sync + Sized { + type ArtifactsHandle: ArtifactsHandle; + type ExecutionResult: ExecutionResult; + + fn command(self, cmd: impl Into) -> Self { + self.script(&[cmd.into()]) + } + + fn script(self, cmd: &[String]) -> Self; + fn artifacts(self, artifacts: Self::ArtifactsHandle) -> Self; + fn timeout(self, timeout: Duration) -> Self; + async fn go(self) -> Self::ExecutionResult; +} + +// A generic "context", implemented by specific obo frontends, that allows the +// tests to manipulate artifacts and run commands in a generalized way. +#[async_trait] +pub trait TestContext: Send + Sync { + type ArtifactsHandle: ArtifactsHandle; + type ExecutionResult: ExecutionResult; + type RunBuilder<'context>: RunBuilder< + 'context, + ArtifactsHandle = Self::ArtifactsHandle, + ExecutionResult = Self::ExecutionResult, + > + 'context + where + Self: 'context; + + fn obs(&self) -> &ObsContext; + + async fn inject_artifacts( + &mut self, + artifacts: HashMap>, + ) -> Self::ArtifactsHandle; + + async fn fetch_artifacts(&self, handle: &Self::ArtifactsHandle) -> HashMap>; + + fn run(&mut self) -> Self::RunBuilder<'_>; +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum DputTest { + Basic, + Rebuild, + ReusePreviousBuild, + Branch, +} + +pub async fn test_dput( + context: &mut C, + test: DputTest, +) -> (C::ArtifactsHandle, ObsBuildInfo) { + let test1_file = "test1"; + let test1_contents = b"123"; + let test1_md5 = compute_md5(test1_contents); + + let dsc1_file = "test1.dsc"; + let dsc1_contents = format!( + "Source: {}\nFiles:\n {} {} {}", + TEST_PACKAGE_1, + test1_md5.clone(), + test1_contents.len(), + test1_file + ); + let dsc1_md5 = compute_md5(dsc1_contents.as_bytes()); + + let dsc1_bad_file = "test1-bad.dsc"; + let dsc1_bad_contents = + dsc1_contents.replace(test1_file, &(test1_file.to_owned() + ".missing")); + + context.obs().mock.add_project(TEST_PROJECT.to_owned()); + + context.obs().mock.add_or_update_repository( + TEST_PROJECT, + TEST_REPO.to_owned(), + TEST_ARCH_1.to_owned(), + MockRepositoryCode::Finished, + ); + context.obs().mock.add_or_update_repository( + TEST_PROJECT, + TEST_REPO.to_owned(), + TEST_ARCH_2.to_owned(), + MockRepositoryCode::Finished, + ); + + if test == DputTest::Rebuild { + // We also test excluded repos on rebuilds; this test makes it easier, + // because it's not testing creating a new package, so we can create it + // ourselves first with the desired metadata. + context.obs().mock.add_new_package( + TEST_PROJECT, + TEST_PACKAGE_1.to_owned(), + MockPackageOptions::default(), + ); + context.obs().mock.set_package_build_status( + TEST_PROJECT, + TEST_REPO, + TEST_ARCH_2, + TEST_PACKAGE_1.to_owned(), + MockBuildStatus::new(MockPackageCode::Disabled), + ); + } + + let artifacts = context + .inject_artifacts( + [ + (dsc1_file.to_owned(), dsc1_contents.as_bytes().to_vec()), + ( + dsc1_bad_file.to_owned(), + dsc1_bad_contents.as_bytes().to_vec(), + ), + (test1_file.to_owned(), test1_contents.to_vec()), + ] + .into(), + ) + .await; + + let mut dput_command = format!("dput {TEST_PROJECT} {dsc1_file}"); + let mut created_project = TEST_PROJECT.to_owned(); + + if test == DputTest::Branch { + created_project += ":branched"; + dput_command += &format!(" --branch-to {created_project}"); + } + + let dput = context + .run() + .command(dput_command.replace(dsc1_file, dsc1_bad_file)) + .artifacts(artifacts.clone()) + .go() + .await; + + assert!(!dput.ok()); + + let results = context.fetch_artifacts(&dput.artifacts()).await; + let build_info: ObsBuildInfo = + serde_yaml::from_slice(results.get(DEFAULT_BUILD_INFO).unwrap()).unwrap(); + + assert_eq!(build_info.project, created_project); + assert_eq!(build_info.package, TEST_PACKAGE_1); + assert_none!(build_info.rev); + assert_eq!(build_info.is_branched, test == DputTest::Branch); + + let mut dput = context + .run() + .command(&dput_command) + .artifacts(artifacts.clone()) + .go() + .await; + assert!(dput.ok()); + + if test == DputTest::Rebuild || test == DputTest::ReusePreviousBuild { + context.obs().mock.add_or_update_repository( + &created_project, + TEST_REPO.to_owned(), + TEST_ARCH_1.to_owned(), + MockRepositoryCode::Building, + ); + // Also test endtimes, since we now have an existing package to modify + // the metadata of. + let dir = assert_ok!( + context + .obs() + .client + .project(TEST_PROJECT.to_owned()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + // Testing of reused builds never had the second arch disabled, so also + // add that build history. + if test == DputTest::ReusePreviousBuild { + context.obs().mock.add_job_history( + TEST_PROJECT, + TEST_REPO, + TEST_ARCH_2, + MockJobHistoryEntry { + package: TEST_PACKAGE_1.to_owned(), + rev: dir.rev.clone().unwrap(), + srcmd5: dir.srcmd5.clone(), + ..Default::default() + }, + ); + } + context.obs().mock.add_job_history( + TEST_PROJECT, + TEST_REPO, + TEST_ARCH_1, + MockJobHistoryEntry { + package: TEST_PACKAGE_1.to_owned(), + rev: dir.rev.unwrap(), + srcmd5: dir.srcmd5, + ..Default::default() + }, + ); + + context.obs().mock.set_package_build_status_for_rebuilds( + &created_project, + MockBuildStatus::new(MockPackageCode::Broken), + ); + context.obs().mock.set_package_build_status( + &created_project, + TEST_REPO, + TEST_ARCH_1, + TEST_PACKAGE_1.to_owned(), + MockBuildStatus::new(MockPackageCode::Failed), + ); + + let status = assert_ok!( + context + .obs() + .client + .project(created_project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .status(TEST_REPO, TEST_ARCH_1) + .await + ); + assert_eq!(status.code, obs::PackageCode::Failed); + + dput = context + .run() + .command(&dput_command) + .artifacts(artifacts.clone()) + .go() + .await; + assert!(dput.ok()); + + assert!(dput.log().contains("unchanged")); + + let status = assert_ok!( + context + .obs() + .client + .project(created_project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .status(TEST_REPO, TEST_ARCH_1) + .await + ); + assert_eq!(status.code, obs::PackageCode::Failed); + + if test == DputTest::Rebuild { + dput = context + .run() + .command(format!("{dput_command} --rebuild-if-unchanged")) + .artifacts(artifacts.clone()) + .go() + .await; + assert!(dput.ok()); + + let status = assert_ok!( + context + .obs() + .client + .project(created_project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .status(TEST_REPO, TEST_ARCH_1) + .await + ); + assert_eq!(status.code, obs::PackageCode::Broken); + + assert!(dput.log().contains("unchanged")); + } + } + + let results = context.fetch_artifacts(&dput.artifacts()).await; + let build_info: ObsBuildInfo = + serde_yaml::from_slice(results.get(DEFAULT_BUILD_INFO).unwrap()).unwrap(); + + assert_eq!(build_info.project, created_project); + assert_eq!(build_info.package, TEST_PACKAGE_1); + assert_some!(build_info.rev.as_deref()); + assert_eq!(build_info.is_branched, test == DputTest::Branch); + + assert_eq!( + build_info.enabled_repos.len(), + if test == DputTest::Rebuild { 1 } else { 2 } + ); + + let arch_1 = build_info + .enabled_repos + .get(&RepoArch { + repo: TEST_REPO.to_owned(), + arch: TEST_ARCH_1.to_owned(), + }) + .unwrap(); + + if test == DputTest::Rebuild { + assert_some!(arch_1.prev_endtime_for_commit); + } else { + assert_none!(arch_1.prev_endtime_for_commit); + + let arch_2 = build_info + .enabled_repos + .get(&RepoArch { + repo: TEST_REPO.to_owned(), + arch: TEST_ARCH_2.to_owned(), + }) + .unwrap(); + assert_none!(arch_2.prev_endtime_for_commit); + } + + let mut dir = assert_ok!( + context + .obs() + .client + .project(created_project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + + assert_eq!(dir.entries.len(), 3); + dir.entries.sort_by(|a, b| a.name.cmp(&b.name)); + assert_eq!(dir.entries[0].name, "_meta"); + assert_eq!(dir.entries[1].name, test1_file); + assert_eq!(dir.entries[1].size, test1_contents.len() as u64); + assert_eq!(dir.entries[1].md5, test1_md5); + assert_eq!(dir.entries[2].name, dsc1_file); + assert_eq!(dir.entries[2].size, dsc1_contents.len() as u64); + assert_eq!(dir.entries[2].md5, dsc1_md5); + + (dput.artifacts(), build_info) +} + +pub const MONITOR_TEST_BUILD_RESULTS_DIR: &str = "results"; +pub const MONITOR_TEST_BUILD_RESULT: &str = "test-build-result"; +pub const MONITOR_TEST_BUILD_RESULT_CONTENTS: &[u8] = b"abcdef"; +pub const MONITOR_TEST_LOG_TAIL: u64 = 50; +pub const MONITOR_TEST_OLD_STATUS_SLEEP_DURATION: Duration = Duration::from_millis(100); + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum MonitorLogTest { + // Test that long logs are truncated. + Long, + // Test that short logs are fully shown. + Short, + // Test that revision mismatches result in unavailable logs. + Unavailable, +} + +#[allow(clippy::too_many_arguments)] +pub async fn test_monitoring( + context: &mut C, + dput: C::ArtifactsHandle, + build_info: &ObsBuildInfo, + repo: &RepoArch, + script: &[String], + success: bool, + dput_test: DputTest, + log_test: MonitorLogTest, + download_binaries: bool, +) { + let srcmd5_prefix = format!( + "srcmd5 '{}' ", + if log_test == MonitorLogTest::Unavailable { + ZERO_REV_SRCMD5.to_owned() + } else { + let dir = assert_ok!( + context + .obs() + .client + .project(build_info.project.to_owned()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + + dir.srcmd5 + } + ); + + let (log_contents, log_vs_limit) = if log_test == MonitorLogTest::Short { + (srcmd5_prefix + "short", Ordering::Less) + } else { + ( + srcmd5_prefix + "this is a long log that will need to be trimmed when printed", + Ordering::Greater, + ) + }; + + assert_eq!( + log_contents.len().cmp(&(MONITOR_TEST_LOG_TAIL as usize)), + log_vs_limit + ); + + // Sanity check this, even though test_dput should have already checked it. + assert_eq!(repo.repo, TEST_REPO); + assert!( + repo.arch == TEST_ARCH_1 || repo.arch == TEST_ARCH_2, + "unexpected arch '{}'", + repo.arch + ); + + context.obs().mock.set_package_build_status( + &build_info.project, + &repo.repo, + &repo.arch, + TEST_PACKAGE_1.to_owned(), + MockBuildStatus::new(if success { + MockPackageCode::Succeeded + } else { + MockPackageCode::Failed + }), + ); + context.obs().mock.add_completed_build_log( + &build_info.project, + TEST_REPO, + &repo.arch, + TEST_PACKAGE_1.to_owned(), + MockBuildLog::new(log_contents.to_owned()), + success, + ); + context.obs().mock.set_package_binaries( + &build_info.project, + TEST_REPO, + &repo.arch, + TEST_PACKAGE_1.to_owned(), + [( + MONITOR_TEST_BUILD_RESULT.to_owned(), + MockBinary { + contents: MONITOR_TEST_BUILD_RESULT_CONTENTS.to_vec(), + mtime: SystemTime::now(), + }, + )] + .into(), + ); + + if dput_test != DputTest::ReusePreviousBuild { + // Update the endtime in the background, otherwise the monitor will hang + // forever waiting. + let mock = context.obs().mock.clone(); + let build_info_2 = build_info.clone(); + let repo_2 = repo.clone(); + tokio::spawn(async move { + tokio::time::sleep(MONITOR_TEST_OLD_STATUS_SLEEP_DURATION * 10).await; + mock.add_job_history( + &build_info_2.project, + &repo_2.repo, + &repo_2.arch, + MockJobHistoryEntry { + package: build_info_2.package, + endtime: SystemTime::UNIX_EPOCH + Duration::from_secs(999), + srcmd5: build_info_2.srcmd5.unwrap(), + ..Default::default() + }, + ); + }); + } + + let monitor = context + .run() + .script(script) + .artifacts(dput.clone()) + .timeout(MONITOR_TEST_OLD_STATUS_SLEEP_DURATION * 20) + .go() + .await; + assert_eq!( + monitor.ok(), + success && log_test != MonitorLogTest::Unavailable + ); + + let log = monitor.log(); + + assert_eq!( + log.contains("unavailable"), + log_test == MonitorLogTest::Unavailable + ); + + // If we reused a previous build, we're not waiting for a new build, so + // don't check for an old build status. + let build_actually_occurred = dput_test != DputTest::ReusePreviousBuild; + assert_eq!( + log.contains("Waiting for build status"), + build_actually_occurred + ); + + assert_eq!( + log.contains(&log_contents), + !success && log_test == MonitorLogTest::Short + ); + + if !success && log_test == MonitorLogTest::Long { + let log_bytes = log_contents.as_bytes(); + let truncated_log_bytes = &log_bytes[log_bytes.len() - (MONITOR_TEST_LOG_TAIL as usize)..]; + assert!(log.contains(String::from_utf8_lossy(truncated_log_bytes).as_ref())); + } + + let results = context.fetch_artifacts(&monitor.artifacts()).await; + let build_result_path = Utf8Path::new(MONITOR_TEST_BUILD_RESULTS_DIR) + .join(MONITOR_TEST_BUILD_RESULT) + .into_string(); + let mut has_built_result = false; + + if log_test != MonitorLogTest::Unavailable { + let full_log = results.get(DEFAULT_BUILD_LOG).unwrap(); + assert_eq!(log_contents, String::from_utf8_lossy(full_log)); + + if success && download_binaries { + let build_result = results.get(&build_result_path).unwrap(); + assert_eq!(MONITOR_TEST_BUILD_RESULT_CONTENTS, &build_result[..]); + + has_built_result = true; + } + } + + if !has_built_result { + assert_none!(results.get(&build_result_path)); + } +} + +pub async fn test_prune_missing_build_info(context: &mut C) { + let prune = context.run().command("prune").go().await; + assert!(!prune.ok()); + + let prune = context + .run() + .command("prune --ignore-missing-build-info") + .go() + .await; + assert!(prune.ok()); + + assert!(prune.log().contains("Skipping prune")); +} + +pub async fn test_prune_deleted_package_1_if_branched( + context: &C, + build_info: &ObsBuildInfo, + prune: &C::ExecutionResult, +) { + if build_info.is_branched { + assert_err!( + context + .obs() + .client + .project(build_info.project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + } else { + assert!(prune.log().contains("package was not branched")); + + assert_ok!( + context + .obs() + .client + .project(build_info.project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + } +} diff --git a/obs-gitlab-runner/Cargo.toml b/obs-gitlab-runner/Cargo.toml index 4e0c2cb..ac14003 100644 --- a/obs-gitlab-runner/Cargo.toml +++ b/obs-gitlab-runner/Cargo.toml @@ -36,6 +36,7 @@ url = "2.5" claims.workspace = true gitlab-runner-mock = "0.2.1" # gitlab-runner-mock = { path = "../gitlab-runner-rs/gitlab-runner-mock" } +obo-tests = { path = "../obo-tests" } open-build-service-mock.workspace = true rstest.workspace = true wiremock.workspace = true diff --git a/obs-gitlab-runner/src/handler.rs b/obs-gitlab-runner/src/handler.rs index 8b7751e..7276334 100644 --- a/obs-gitlab-runner/src/handler.rs +++ b/obs-gitlab-runner/src/handler.rs @@ -439,19 +439,18 @@ async fn check_for_artifact( #[cfg(test)] mod tests { use std::{ - cmp::Ordering, io::{Cursor, Read}, + marker::PhantomData, sync::{Arc, Once}, - time::{Duration, SystemTime}, + time::Duration, }; - use camino::Utf8Path; use claims::*; use gitlab_runner::{GitlabLayer, Runner, RunnerBuilder}; use gitlab_runner_mock::*; - use obo_core::{build_meta::RepoArch, upload::compute_md5}; + use obo_core::build_meta::{CommitBuildInfo, RepoArch}; use obo_test_support::*; - use open_build_service_mock::*; + use obo_tests::*; use rstest::rstest; use tempfile::TempDir; use tracing::{Level, instrument::WithSubscriber}; @@ -462,151 +461,6 @@ mod tests { use super::*; - const JOB_TIMEOUT: u64 = 3600; - const TEST_LOG_TAIL: u64 = 50; - const OLD_STATUS_SLEEP_DURATION: Duration = Duration::from_millis(100); - - const DEFAULT_HANDLER_OPTIONS: HandlerOptions = HandlerOptions { - default_monitor_job_timeout: None, - log_tail: TEST_LOG_TAIL, - monitor: PackageMonitoringOptions { - sleep_on_building: Duration::ZERO, - sleep_on_old_status: OLD_STATUS_SLEEP_DURATION, - // High limit, since we don't really test that - // functionality in the handler tests. - max_old_status_retries: 99, - }, - }; - - static COLOR_EYRE_INSTALL: Once = Once::new(); - - struct TestContext { - _runner_dir: TempDir, - gitlab_mock: GitlabRunnerMock, - runner: Runner, - - obs_mock: ObsMock, - obs_client: obs::Client, - } - - async fn with_context(func: impl AsyncFnOnce(TestContext) -> T) -> T { - COLOR_EYRE_INSTALL.call_once(|| color_eyre::install().unwrap()); - - let runner_dir = tempfile::tempdir().unwrap(); - let gitlab_mock = GitlabRunnerMock::start().await; - let (layer, jobs) = GitlabLayer::new(); - let runner = RunnerBuilder::new( - gitlab_mock.uri(), - gitlab_mock.runner_token().to_owned(), - runner_dir.path().to_owned(), - jobs, - ) - .build() - .await; - - let obs_mock = create_default_mock().await; - let obs_client = create_default_client(&obs_mock); - - let ctx = TestContext { - _runner_dir: runner_dir, - gitlab_mock, - runner, - obs_mock, - obs_client, - }; - - func(ctx) - .with_subscriber( - Registry::default() - .with( - tracing_subscriber::fmt::layer() - .with_test_writer() - .with_filter( - Targets::new().with_target("obs_gitlab_runner", Level::TRACE), - ), - ) - .with(tracing_error::ErrorLayer::default()) - .with(GitLabForwarder::new(layer)), - ) - .await - } - - #[derive(Default)] - struct JobSpec { - name: String, - dependencies: Vec, - variables: HashMap, - script: Vec, - after_script: Vec, - } - - fn enqueue_job(context: &TestContext, spec: JobSpec) -> MockJob { - let mut builder = context.gitlab_mock.job_builder(spec.name); - - builder.add_step( - MockJobStepName::Script, - spec.script, - JOB_TIMEOUT, - MockJobStepWhen::OnSuccess, - false, - ); - - if !spec.after_script.is_empty() { - builder.add_step( - MockJobStepName::AfterScript, - spec.after_script, - JOB_TIMEOUT, - MockJobStepWhen::OnSuccess, - false, - ); - } - - builder.add_artifact( - None, - false, - vec!["*".to_owned()], - Some(MockJobArtifactWhen::Always), - "archive".to_owned(), - Some("zip".to_owned()), - None, - ); - - for dependency in spec.dependencies { - builder.dependency(dependency); - } - for (key, value) in spec.variables { - builder.add_variable(key, value, true, false); - } - - builder.add_variable( - "OBS_SERVER".to_owned(), - context.obs_client.url().to_string(), - false, - true, - ); - builder.add_variable("OBS_USER".to_owned(), TEST_USER.to_owned(), false, true); - builder.add_variable("OBS_PASSWORD".to_owned(), TEST_PASS.to_owned(), false, true); - - let job = builder.build(); - context.gitlab_mock.enqueue_job(job.clone()); - job - } - - async fn run_handler(context: &mut TestContext, handler_func: Func) - where - U: UploadableFile + Send + 'static, - H: JobHandler + Send + 'static, - Func: (FnOnce(Job) -> H) + Send + Sync + 'static, - { - let got_job = context - .runner - .request_job(move |job| futures_util::future::ready(Ok(handler_func(job)))) - .await - .unwrap(); - assert!(got_job); - context.runner.wait_for_space(1).await; - } - struct PutArtifactsHandler { artifacts: Arc>>, } @@ -651,369 +505,334 @@ mod tests { } } - async fn put_artifacts( - context: &mut TestContext, - artifacts: HashMap>, - ) -> MockJob { - let artifacts_job = enqueue_job( - context, - JobSpec { - name: "artifacts".to_owned(), - script: vec!["dummy".to_owned()], - ..Default::default() - }, - ); - run_handler(context, |_| PutArtifactsHandler { - artifacts: Arc::new(artifacts), - }) - .await; - artifacts_job - } + #[derive(Clone, Debug)] + struct GitLabArtifactsHandle(MockJob); - fn get_job_artifacts(job: &MockJob) -> HashMap> { - let Some(artifact) = job.uploaded_artifacts().next() else { - return Default::default(); - }; + impl ArtifactsHandle for GitLabArtifactsHandle {} - let data = (*artifact.data).clone(); - assert!(!data.is_empty()); + #[derive(Clone, Debug)] + struct GitLabExecutionResult(MockJob); - let cursor = Cursor::new(data); - let mut zip = ZipArchive::new(cursor).unwrap(); + impl ExecutionResult for GitLabExecutionResult { + type Artifacts = GitLabArtifactsHandle; - (0..zip.len()) - .map(|i| { - let mut file = zip.by_index(i).unwrap(); + fn ok(&self) -> bool { + self.0.state() == MockJobState::Success + } - let mut contents = vec![]; - file.read_to_end(&mut contents).unwrap(); + fn log(&self) -> String { + String::from_utf8_lossy(&self.0.log()).into_owned() + } - (file.name().to_owned(), contents) - }) - .collect() + fn artifacts(&self) -> Self::Artifacts { + GitLabArtifactsHandle(self.0.clone()) + } } - async fn run_obs_handler_with_options(context: &mut TestContext, options: HandlerOptions) { - run_handler(context, move |job| { - assert_ok!(ObsJobHandler::from_obs_config_in_job(job, options)) - }) - .await; + // All the GitLabRunHandlerWrapper* boilerplate is basically a trick to be + // able to type-erase the generics of Runner::request_job(), so that you can + // pass a job handler to the RunBuilder without having to then propagate the + // handler's type everywhere. Ideally we could just use a lambda, but that + // causes a variety of lifetime issues due to the various constraints around + // async function types. + #[async_trait] + trait GitLabRunHandlerWrapper: Send + Sync { + async fn request_job(&mut self, runner: &mut Runner); } - async fn run_obs_handler(context: &mut TestContext) { - run_obs_handler_with_options(context, DEFAULT_HANDLER_OPTIONS).await; + struct GitLabRunHandlerWrapperImpl< + U: UploadableFile + Send + Sync + 'static, + H: JobHandler + Send + Sync + 'static, + Func: (FnOnce(Job) -> H) + Send + Sync + 'static, + > { + func: Option, + _phantom: PhantomData<(U, H)>, } - #[derive(Debug, PartialEq, Eq, Clone, Copy)] - enum DputTest { - Basic, - Rebuild, - ReusePreviousBuild, - Branch, + #[async_trait] + impl< + U: UploadableFile + Send + Sync + 'static, + H: JobHandler + Send + Sync + 'static, + Func: (FnOnce(Job) -> H) + Send + Sync + 'static, + > GitLabRunHandlerWrapper for GitLabRunHandlerWrapperImpl + { + async fn request_job(&mut self, runner: &mut Runner) { + let handler = self + .func + .take() + .expect("request_job can only be called once"); + let got_job = runner + .request_job(move |job| futures_util::future::ready(Ok(handler(job)))) + .await + .unwrap(); + assert!(got_job); + } } - async fn test_dput(context: &mut TestContext, test: DputTest) -> (MockJob, ObsBuildInfo) { - let test1_file = "test1"; - let test1_contents = b"123"; - let test1_md5 = compute_md5(test1_contents); - - let dsc1_file = "test1.dsc"; - let dsc1_contents = format!( - "Source: {}\nFiles:\n {} {} {}", - TEST_PACKAGE_1, - test1_md5.clone(), - test1_contents.len(), - test1_file - ); - let dsc1_md5 = compute_md5(dsc1_contents.as_bytes()); + struct GitLabRunBuilder<'context> { + context: &'context mut GitLabTestContext, + script: Vec, + after_script: Vec, + dependencies: Vec, + variables: HashMap, + handler: Box, + timeout: Duration, + } - let dsc1_bad_file = "test1-bad.dsc"; - let dsc1_bad_contents = - dsc1_contents.replace(test1_file, &(test1_file.to_owned() + ".missing")); + fn create_obs_job_handler_factory( + options: HandlerOptions, + ) -> impl FnOnce(Job) -> ObsJobHandler { + move |job| assert_ok!(ObsJobHandler::from_obs_config_in_job(job, options)) + } - context.obs_mock.add_project(TEST_PROJECT.to_owned()); + impl GitLabRunBuilder<'_> { + fn after_command(self, cmd: impl Into) -> Self { + self.after_script(&[cmd.into()]) + } - context.obs_mock.add_or_update_repository( - TEST_PROJECT, - TEST_REPO.to_owned(), - TEST_ARCH_1.to_owned(), - MockRepositoryCode::Finished, - ); - context.obs_mock.add_or_update_repository( - TEST_PROJECT, - TEST_REPO.to_owned(), - TEST_ARCH_2.to_owned(), - MockRepositoryCode::Finished, - ); + fn after_script(mut self, script: &[String]) -> Self { + self.after_script.extend_from_slice(script); + self + } - if test == DputTest::Rebuild { - // We also test excluded repos on rebuilds; this test makes it - // easier, because it's not testing creating a new package, so we - // can create it ourselves first with the desired metadata. - context.obs_mock.add_new_package( - TEST_PROJECT, - TEST_PACKAGE_1.to_owned(), - MockPackageOptions::default(), - ); - context.obs_mock.set_package_build_status( - TEST_PROJECT, - TEST_REPO, - TEST_ARCH_2, - TEST_PACKAGE_1.to_owned(), - MockBuildStatus::new(MockPackageCode::Disabled), - ); + fn variable(mut self, name: impl Into, value: impl Into) -> Self { + self.variables.insert(name.into(), value.into()); + self } - let artifacts = put_artifacts( - context, - [ - (dsc1_file.to_owned(), dsc1_contents.as_bytes().to_vec()), - ( - dsc1_bad_file.to_owned(), - dsc1_bad_contents.as_bytes().to_vec(), - ), - (test1_file.to_owned(), test1_contents.to_vec()), - ] - .into(), - ) - .await; + fn job_handler_factory< + U: UploadableFile + Send + Sync + 'static, + H: JobHandler + Send + Sync + 'static, + Func: (FnOnce(Job) -> H) + Send + Sync + 'static, + >( + mut self, + handler_func: Func, + ) -> Self { + self.handler = Box::new(GitLabRunHandlerWrapperImpl { + func: Some(handler_func), + _phantom: PhantomData, + }); + self + } - let mut dput_command = format!("dput {TEST_PROJECT} {dsc1_file}"); - let mut created_project = TEST_PROJECT.to_owned(); + fn obs_job_handler(self, options: HandlerOptions) -> Self { + self.job_handler_factory(create_obs_job_handler_factory(options)) + } + } - if test == DputTest::Branch { - created_project += ":branched"; - dput_command += &format!(" --branch-to {created_project}"); + #[async_trait] + impl<'context> RunBuilder<'context> for GitLabRunBuilder<'context> { + type ArtifactsHandle = GitLabArtifactsHandle; + type ExecutionResult = GitLabExecutionResult; + + fn script(mut self, script: &[String]) -> Self { + self.script.extend_from_slice(script); + self } - let dput = enqueue_job( - context, - JobSpec { - name: "dput".to_owned(), - dependencies: vec![artifacts.clone()], - script: vec![dput_command.replace(dsc1_file, dsc1_bad_file)], - ..Default::default() - }, - ); + fn artifacts(mut self, artifacts: Self::ArtifactsHandle) -> Self { + self.dependencies.push(artifacts.0); + self + } - run_obs_handler(context).await; - assert_eq!(MockJobState::Failed, dput.state()); - - let results = get_job_artifacts(&dput); - let build_info: ObsBuildInfo = - serde_yaml::from_slice(results.get(DEFAULT_BUILD_INFO).unwrap()).unwrap(); - - assert_eq!(build_info.project, created_project); - assert_eq!(build_info.package, TEST_PACKAGE_1); - assert_none!(build_info.rev); - assert_eq!(build_info.is_branched, test == DputTest::Branch); - - let mut dput = enqueue_job( - context, - JobSpec { - name: "dput".to_owned(), - dependencies: vec![artifacts.clone()], - script: vec![dput_command.clone()], - ..Default::default() - }, - ); + fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } - run_obs_handler(context).await; - assert_eq!(MockJobState::Success, dput.state()); + async fn go(mut self) -> Self::ExecutionResult { + const JOB_TIMEOUT: u64 = 3600; - if test == DputTest::Rebuild || test == DputTest::ReusePreviousBuild { - context.obs_mock.add_or_update_repository( - &created_project, - TEST_REPO.to_owned(), - TEST_ARCH_1.to_owned(), - MockRepositoryCode::Building, - ); - // Also test endtimes, since we now have an existing package to - // modify the metadata of. - let dir = assert_ok!( - context - .obs_client - .project(TEST_PROJECT.to_owned()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await + let mut builder = self.context.gitlab_mock.job_builder("test".to_owned()); + + builder.add_step( + MockJobStepName::Script, + self.script, + JOB_TIMEOUT, + MockJobStepWhen::OnSuccess, + false, ); - // Testing of reused builds never had the second arch disabled, so - // also add that build history. - if test == DputTest::ReusePreviousBuild { - context.obs_mock.add_job_history( - TEST_PROJECT, - TEST_REPO, - TEST_ARCH_2, - MockJobHistoryEntry { - package: TEST_PACKAGE_1.to_owned(), - rev: dir.rev.clone().unwrap(), - srcmd5: dir.srcmd5.clone(), - ..Default::default() - }, + + if !self.after_script.is_empty() { + builder.add_step( + MockJobStepName::AfterScript, + self.after_script, + JOB_TIMEOUT, + MockJobStepWhen::OnSuccess, + false, ); } - context.obs_mock.add_job_history( - TEST_PROJECT, - TEST_REPO, - TEST_ARCH_1, - MockJobHistoryEntry { - package: TEST_PACKAGE_1.to_owned(), - rev: dir.rev.unwrap(), - srcmd5: dir.srcmd5, - ..Default::default() - }, - ); - context.obs_mock.set_package_build_status_for_rebuilds( - &created_project, - MockBuildStatus::new(MockPackageCode::Broken), - ); - context.obs_mock.set_package_build_status( - &created_project, - TEST_REPO, - TEST_ARCH_1, - TEST_PACKAGE_1.to_owned(), - MockBuildStatus::new(MockPackageCode::Failed), + builder.add_artifact( + None, + false, + vec!["*".to_owned()], + Some(MockJobArtifactWhen::Always), + "archive".to_owned(), + Some("zip".to_owned()), + None, ); - let status = assert_ok!( - context - .obs_client - .project(created_project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .status(TEST_REPO, TEST_ARCH_1) - .await - ); - assert_eq!(status.code, obs::PackageCode::Failed); + for dependency in self.dependencies { + builder.dependency(dependency); + } + for (key, value) in self.variables { + builder.add_variable(key, value, true, false); + } - dput = enqueue_job( - context, - JobSpec { - name: "dput".to_owned(), - dependencies: vec![artifacts.clone()], - script: vec![dput_command.clone()], - ..Default::default() - }, + builder.add_variable( + "OBS_SERVER".to_owned(), + self.context.obs.client.url().to_string(), + false, + true, ); + builder.add_variable("OBS_USER".to_owned(), TEST_USER.to_owned(), false, true); + builder.add_variable("OBS_PASSWORD".to_owned(), TEST_PASS.to_owned(), false, true); - run_obs_handler(context).await; - assert_eq!(MockJobState::Success, dput.state()); + let job = builder.build(); + self.context.gitlab_mock.enqueue_job(job.clone()); - let job_log = String::from_utf8_lossy(&dput.log()).into_owned(); - assert!(job_log.contains("unchanged")); + self.handler.request_job(&mut self.context.runner).await; + self.context.runner.wait_for_space(1).await; - let status = assert_ok!( - context - .obs_client - .project(created_project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .status(TEST_REPO, TEST_ARCH_1) - .await - ); - assert_eq!(status.code, obs::PackageCode::Failed); - - if test == DputTest::Rebuild { - dput = enqueue_job( - context, - JobSpec { - name: "dput".to_owned(), - dependencies: vec![artifacts.clone()], - script: vec![format!("{} --rebuild-if-unchanged", dput_command)], - ..Default::default() - }, - ); + GitLabExecutionResult(job) + } + } + + const DEFAULT_HANDLER_OPTIONS: HandlerOptions = HandlerOptions { + default_monitor_job_timeout: None, + log_tail: MONITOR_TEST_LOG_TAIL, + monitor: PackageMonitoringOptions { + sleep_on_building: Duration::ZERO, + sleep_on_old_status: MONITOR_TEST_OLD_STATUS_SLEEP_DURATION, + // High limit, since we don't really test that + // functionality in the handler tests. + max_old_status_retries: 99, + }, + }; - run_obs_handler(context).await; - assert_eq!(MockJobState::Success, dput.state()); + struct GitLabTestContext { + _runner_dir: TempDir, + gitlab_mock: GitlabRunnerMock, + runner: Runner, + obs: ObsContext, + } - let status = assert_ok!( - context - .obs_client - .project(created_project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .status(TEST_REPO, TEST_ARCH_1) - .await - ); - assert_eq!(status.code, obs::PackageCode::Broken); + #[async_trait] + impl TestContext for GitLabTestContext { + type ArtifactsHandle = GitLabArtifactsHandle; + type ExecutionResult = GitLabExecutionResult; + type RunBuilder<'context> = GitLabRunBuilder<'context>; - let job_log = String::from_utf8_lossy(&dput.log()).into_owned(); - assert!(job_log.contains("unchanged")); - } + fn obs(&self) -> &ObsContext { + &self.obs } - let results = get_job_artifacts(&dput); - let build_info: ObsBuildInfo = - serde_yaml::from_slice(results.get(DEFAULT_BUILD_INFO).unwrap()).unwrap(); + async fn inject_artifacts( + &mut self, + artifacts: HashMap>, + ) -> Self::ArtifactsHandle { + self.run() + .job_handler_factory(|_| PutArtifactsHandler { + artifacts: Arc::new(artifacts), + }) + .command("dummy") + .go() + .await + .artifacts() + } - assert_eq!(build_info.project, created_project); - assert_eq!(build_info.package, TEST_PACKAGE_1); - assert_some!(build_info.rev.as_deref()); - assert_eq!(build_info.is_branched, test == DputTest::Branch); + async fn fetch_artifacts( + &self, + handle: &Self::ArtifactsHandle, + ) -> HashMap> { + let Some(artifact) = handle.0.uploaded_artifacts().next() else { + return Default::default(); + }; - assert_eq!( - build_info.enabled_repos.len(), - if test == DputTest::Rebuild { 1 } else { 2 } - ); + let data = (*artifact.data).clone(); + assert!(!data.is_empty()); - let arch_1 = build_info - .enabled_repos - .get(&RepoArch { - repo: TEST_REPO.to_owned(), - arch: TEST_ARCH_1.to_owned(), - }) - .unwrap(); + let cursor = Cursor::new(data); + let mut zip = ZipArchive::new(cursor).unwrap(); - if test == DputTest::Rebuild { - assert_some!(arch_1.prev_endtime_for_commit); - } else { - assert_none!(arch_1.prev_endtime_for_commit); + (0..zip.len()) + .map(|i| { + let mut file = zip.by_index(i).unwrap(); + + let mut contents = vec![]; + file.read_to_end(&mut contents).unwrap(); - let arch_2 = build_info - .enabled_repos - .get(&RepoArch { - repo: TEST_REPO.to_owned(), - arch: TEST_ARCH_2.to_owned(), + (file.name().to_owned(), contents) }) - .unwrap(); - assert_none!(arch_2.prev_endtime_for_commit); + .collect() } - let mut dir = assert_ok!( - context - .obs_client - .project(created_project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await - ); - - assert_eq!(dir.entries.len(), 3); - dir.entries.sort_by(|a, b| a.name.cmp(&b.name)); - assert_eq!(dir.entries[0].name, "_meta"); - assert_eq!(dir.entries[1].name, test1_file); - assert_eq!(dir.entries[1].size, test1_contents.len() as u64); - assert_eq!(dir.entries[1].md5, test1_md5); - assert_eq!(dir.entries[2].name, dsc1_file); - assert_eq!(dir.entries[2].size, dsc1_contents.len() as u64); - assert_eq!(dir.entries[2].md5, dsc1_md5); - - (dput, build_info) + fn run(&mut self) -> Self::RunBuilder<'_> { + GitLabRunBuilder { + context: self, + script: vec![], + after_script: vec![], + dependencies: vec![], + variables: Default::default(), + handler: Box::new(GitLabRunHandlerWrapperImpl { + func: Some(create_obs_job_handler_factory(DEFAULT_HANDLER_OPTIONS)), + _phantom: PhantomData, + }), + timeout: EXECUTION_DEFAULT_TIMEOUT, + } + } } - #[derive(Debug, PartialEq, Eq)] - enum MonitorLogTest { - // Test that long logs are truncated. - Long, - // Test that short logs are fully shown. - Short, - // Test that revision mismatches result in unavailable logs. - Unavailable, + static COLOR_EYRE_INSTALL: Once = Once::new(); + + async fn with_context(func: impl AsyncFnOnce(GitLabTestContext) -> T) -> T { + COLOR_EYRE_INSTALL.call_once(|| color_eyre::install().unwrap()); + + let runner_dir = tempfile::tempdir().unwrap(); + let gitlab_mock = GitlabRunnerMock::start().await; + let (layer, jobs) = GitlabLayer::new(); + let runner = RunnerBuilder::new( + gitlab_mock.uri(), + gitlab_mock.runner_token().to_owned(), + runner_dir.path().to_owned(), + jobs, + ) + .build() + .await; + + let obs_mock = create_default_mock().await; + let obs_client = create_default_client(&obs_mock); + + let ctx = GitLabTestContext { + _runner_dir: runner_dir, + gitlab_mock, + runner, + obs: ObsContext { + client: obs_client, + mock: obs_mock, + }, + }; + + func(ctx) + .with_subscriber( + Registry::default() + .with( + tracing_subscriber::fmt::layer() + .with_test_writer() + .with_filter( + Targets::new().with_target("obs_gitlab_runner", Level::TRACE), + ), + ) + .with(tracing_error::ErrorLayer::default()) + .with(GitLabForwarder::new(layer)), + ) + .await } - async fn test_monitoring( - context: &mut TestContext, - dput: MockJob, + async fn test_monitoring_with_generation( + context: &mut GitLabTestContext, + dput: GitLabArtifactsHandle, build_info: &ObsBuildInfo, success: bool, dput_test: DputTest, @@ -1022,41 +841,6 @@ mod tests { ) { const TEST_JOB_RUNNER_TAG: &str = "test-tag"; const TEST_MONITOR_TIMEOUT: &str = "1 day"; - const TEST_BUILD_RESULTS_DIR: &str = "results"; - const TEST_BUILD_RESULT: &str = "test-build-result"; - const TEST_BUILD_RESULT_CONTENTS: &[u8] = b"abcdef"; - - let srcmd5_prefix = format!( - "srcmd5 '{}' ", - if log_test == MonitorLogTest::Unavailable { - ZERO_REV_SRCMD5.to_owned() - } else { - let dir = assert_ok!( - context - .obs_client - .project(build_info.project.to_owned()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await - ); - - dir.srcmd5 - } - ); - - let (log_contents, log_vs_limit) = if log_test == MonitorLogTest::Short { - (srcmd5_prefix + "short", Ordering::Less) - } else { - ( - srcmd5_prefix + "this is a long log that will need to be trimmed when printed", - Ordering::Greater, - ) - }; - - assert_eq!( - log_contents.len().cmp(&(TEST_LOG_TAIL as usize)), - log_vs_limit - ); let mut generate_command = format!( "generate-monitor {TEST_JOB_RUNNER_TAG} \ @@ -1064,22 +848,19 @@ mod tests { --rules '[{{a: 1}}, {{b: 2}}]'" ); if download_binaries { - generate_command += &format!(" --download-build-results-to {TEST_BUILD_RESULTS_DIR}"); + generate_command += + &format!(" --download-build-results-to {MONITOR_TEST_BUILD_RESULTS_DIR}"); } - let generate = enqueue_job( - context, - JobSpec { - name: "generate".to_owned(), - dependencies: vec![dput.clone()], - script: vec![generate_command], - ..Default::default() - }, - ); - run_obs_handler(context).await; - assert_eq!(generate.state(), MockJobState::Success); + let generate = context + .run() + .command(generate_command) + .artifacts(dput.clone()) + .go() + .await; + assert!(generate.ok()); - let results = get_job_artifacts(&generate); + let results = context.fetch_artifacts(&generate.artifacts()).await; let pipeline_yaml: serde_yaml::Value = assert_ok!(serde_yaml::from_slice( results.get(DEFAULT_MONITOR_PIPELINE).unwrap() )); @@ -1088,49 +869,6 @@ mod tests { assert_eq!(pipeline_map.len(), build_info.enabled_repos.len()); for repo in build_info.enabled_repos.keys() { - // Sanity check this, even though test_dput should have already - // checked it. - assert_eq!(repo.repo, TEST_REPO); - assert!( - repo.arch == TEST_ARCH_1 || repo.arch == TEST_ARCH_2, - "unexpected arch '{}'", - repo.arch - ); - - context.obs_mock.set_package_build_status( - &build_info.project, - &repo.repo, - &repo.arch, - TEST_PACKAGE_1.to_owned(), - MockBuildStatus::new(if success { - MockPackageCode::Succeeded - } else { - MockPackageCode::Failed - }), - ); - context.obs_mock.add_completed_build_log( - &build_info.project, - TEST_REPO, - &repo.arch, - TEST_PACKAGE_1.to_owned(), - MockBuildLog::new(log_contents.to_owned()), - success, - ); - context.obs_mock.set_package_binaries( - &build_info.project, - TEST_REPO, - &repo.arch, - TEST_PACKAGE_1.to_owned(), - [( - TEST_BUILD_RESULT.to_owned(), - MockBinary { - contents: TEST_BUILD_RESULT_CONTENTS.to_vec(), - mtime: SystemTime::now(), - }, - )] - .into(), - ); - let monitor_job_name = format!( "{}-{}-{}", DEFAULT_PIPELINE_JOB_PREFIX, TEST_REPO, &repo.arch @@ -1163,7 +901,7 @@ mod tests { if download_binaries { assert_eq!( &artifact_paths, - &[DEFAULT_BUILD_LOG, TEST_BUILD_RESULTS_DIR] + &[DEFAULT_BUILD_LOG, MONITOR_TEST_BUILD_RESULTS_DIR] ); } else { assert_eq!(&artifact_paths, &[DEFAULT_BUILD_LOG]); @@ -1203,214 +941,68 @@ mod tests { .map(|v| v.as_str().unwrap().to_owned()) .collect::>(); - let monitor = enqueue_job( + test_monitoring( context, - JobSpec { - name: monitor_job_name.clone(), - dependencies: vec![dput.clone()], - script: script.clone(), - ..Default::default() - }, - ); - - if dput_test != DputTest::ReusePreviousBuild { - // Update the endtime in the background, otherwise the monitor - // will hang forever waiting. - let mock = context.obs_mock.clone(); - let build_info_2 = build_info.clone(); - let repo_2 = repo.clone(); - tokio::spawn(async move { - tokio::time::sleep(OLD_STATUS_SLEEP_DURATION * 10).await; - mock.add_job_history( - &build_info_2.project, - &repo_2.repo, - &repo_2.arch, - MockJobHistoryEntry { - package: build_info_2.package, - endtime: SystemTime::UNIX_EPOCH + Duration::from_secs(999), - srcmd5: build_info_2.srcmd5.unwrap(), - ..Default::default() - }, - ); - }); - } - - assert_ok!( - tokio::time::timeout(OLD_STATUS_SLEEP_DURATION * 20, run_obs_handler(context)) - .await - ); - assert_eq!( - monitor.state(), - if success && log_test != MonitorLogTest::Unavailable { - MockJobState::Success - } else { - MockJobState::Failed - } - ); - - let job_log = String::from_utf8_lossy(&monitor.log()).into_owned(); - - assert_eq!( - job_log.contains("unavailable"), - log_test == MonitorLogTest::Unavailable - ); - - // If we reused a previous build, we're not waiting for a new build, - // so don't check for an old build status. - let build_actually_occurred = dput_test != DputTest::ReusePreviousBuild; - assert_eq!( - job_log.contains("Waiting for build status"), - build_actually_occurred - ); - - assert_eq!( - job_log.contains(&log_contents), - !success && log_test == MonitorLogTest::Short - ); - - if !success && log_test == MonitorLogTest::Long { - let log_bytes = log_contents.as_bytes(); - let truncated_log_bytes = &log_bytes[log_bytes.len() - (TEST_LOG_TAIL as usize)..]; - assert!(job_log.contains(String::from_utf8_lossy(truncated_log_bytes).as_ref())); - } - - let results = get_job_artifacts(&monitor); - let build_result_path = Utf8Path::new(TEST_BUILD_RESULTS_DIR) - .join(TEST_BUILD_RESULT) - .into_string(); - let mut has_built_result = false; - - if log_test != MonitorLogTest::Unavailable { - let full_log = results.get(DEFAULT_BUILD_LOG).unwrap(); - assert_eq!(log_contents, String::from_utf8_lossy(full_log)); - - if success && download_binaries { - let build_result = results.get(&build_result_path).unwrap(); - assert_eq!(TEST_BUILD_RESULT_CONTENTS, &build_result[..]); - - has_built_result = true; - } - } - - if !has_built_result { - assert_none!(results.get(&build_result_path)); - } + dput.clone(), + build_info, + repo, + &script, + success, + dput_test, + log_test, + download_binaries, + ) + .await; } } async fn test_prune( - context: &mut TestContext, - dput: MockJob, + context: &mut GitLabTestContext, + dput: GitLabArtifactsHandle, build_info: &ObsBuildInfo, only_if_job_unsuccessful: bool, ) { - let prune = enqueue_job( - context, - JobSpec { - name: "prune".to_owned(), - script: vec!["prune".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(MockJobState::Failed, prune.state()); - - let prune = enqueue_job( - context, - JobSpec { - name: "prune".to_owned(), - script: vec!["prune --ignore-missing-build-info".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(MockJobState::Success, prune.state()); - - assert!(String::from_utf8_lossy(&prune.log()).contains("Skipping prune")); + test_prune_missing_build_info(context).await; let prune = if only_if_job_unsuccessful { - let prune = enqueue_job( - context, - JobSpec { - name: "prune".to_owned(), - dependencies: vec![dput.clone()], - script: vec!["echo".to_owned()], - after_script: vec!["prune --only-if-job-unsuccessful".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(MockJobState::Success, prune.state()); + let prune = context + .run() + .command("echo") + .after_command("prune --only-if-job-unsuccessful") + .artifacts(dput.clone()) + .go() + .await; - assert!(String::from_utf8_lossy(&prune.log()).contains("Skipping prune")); + assert!(prune.ok()); + assert!(prune.log().contains("Skipping prune")); assert_ok!( context - .obs_client + .obs() + .client .project(build_info.project.clone()) .package(TEST_PACKAGE_1.to_owned()) .list(None) .await ); - enqueue_job( - context, - JobSpec { - name: "prune".to_owned(), - - dependencies: vec![dput.clone()], - script: vec!["echo --fail".to_owned()], - after_script: vec!["prune --only-if-job-unsuccessful".to_owned()], - ..Default::default() - }, - ) + context + .run() + .command("echo --fail") + .after_command("prune --only-if-job-unsuccessful") + .artifacts(dput.clone()) + .go() + .await } else { - enqueue_job( - context, - JobSpec { - name: "prune".to_owned(), - - dependencies: vec![dput.clone()], - script: vec!["prune".to_owned()], - ..Default::default() - }, - ) + context + .run() + .command("prune") + .artifacts(dput.clone()) + .go() + .await }; - run_obs_handler(context).await; - assert_eq!( - prune.state(), - if only_if_job_unsuccessful { - MockJobState::Failed - } else { - MockJobState::Success - } - ); - - if build_info.is_branched { - assert_err!( - context - .obs_client - .project(build_info.project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await - ); - } else { - assert!(String::from_utf8_lossy(&prune.log()).contains("package was not branched")); - - assert_ok!( - context - .obs_client - .project(build_info.project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await - ); - } + test_prune_deleted_package_1_if_branched(context, build_info, &prune).await; } #[rstest] @@ -1436,7 +1028,7 @@ mod tests { with_context(async |mut context| { let (dput, build_info) = test_dput(&mut context, dput_test).await; - test_monitoring( + test_monitoring_with_generation( &mut context, dput.clone(), &build_info, @@ -1449,7 +1041,7 @@ mod tests { test_prune( &mut context, - dput, + dput.clone(), &build_info, prune_only_if_job_unsuccessful, ) @@ -1462,27 +1054,18 @@ mod tests { #[tokio::test] async fn test_variable_expansion() { with_context(async |mut context| { - let job = enqueue_job( - &context, - JobSpec { - name: "expansion".to_owned(), - variables: [ - ("ESCAPED".to_owned(), "this should not appear".to_owned()), - ("QUOTED".to_owned(), "spaces should be preserved".to_owned()), - ("RECURSIVE".to_owned(), "recursion($RECURSIVE)".to_owned()), - ] - .into(), - script: vec!["echo --sep ; $MISSING $$ESCAPED $QUOTED $RECURSIVE".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(&mut context).await; - assert_eq!(job.state(), MockJobState::Success); + let expansion = context + .run() + .variable("ESCAPED", "this should not appear") + .variable("QUOTED", "spaces should be preserved") + .variable("RECURSIVE", "recursion($RECURSIVE)") + .command("echo --sep ; $MISSING $$ESCAPED $QUOTED $RECURSIVE") + .go() + .await; + assert!(expansion.ok()); - let job_log = String::from_utf8_lossy(&job.log()).into_owned(); assert_eq!( - job_log.lines().last().unwrap(), + expansion.log().lines().last().unwrap(), ";$ESCAPED;spaces should be preserved;recursion()" ); }) @@ -1493,62 +1076,31 @@ mod tests { #[tokio::test] async fn test_flag_parsing() { with_context(async |mut context| { - let job = enqueue_job( - &context, - JobSpec { - name: "flag".to_owned(), - script: vec!["echo --uppercase false".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(&mut context).await; - assert_eq!(job.state(), MockJobState::Success); - - let job_log = String::from_utf8_lossy(&job.log()).into_owned(); - assert_eq!(job_log.lines().last().unwrap(), "FALSE"); - - let job = enqueue_job( - &context, - JobSpec { - name: "flag".to_owned(), - script: vec!["echo --uppercase=false true".to_owned()], - ..Default::default() - }, - ); + let echo = context.run().command("echo --uppercase false").go().await; + assert!(echo.ok()); - run_obs_handler(&mut context).await; - assert_eq!(job.state(), MockJobState::Success); + assert_eq!(echo.log().lines().last().unwrap(), "FALSE"); - let job_log = String::from_utf8_lossy(&job.log()).into_owned(); - assert_eq!(job_log.lines().last().unwrap(), "true"); + let echo = context + .run() + .command("echo --uppercase=false true") + .go() + .await; + assert!(echo.ok()); - let job = enqueue_job( - &context, - JobSpec { - name: "flag".to_owned(), - script: vec!["echo --uppercase=true false".to_owned()], - ..Default::default() - }, - ); + assert_eq!(echo.log().lines().last().unwrap(), "true"); - run_obs_handler(&mut context).await; - assert_eq!(job.state(), MockJobState::Success); + let echo = context + .run() + .command("echo --uppercase=true false") + .go() + .await; + assert!(echo.ok()); - let job_log = String::from_utf8_lossy(&job.log()).into_owned(); - assert_eq!(job_log.lines().last().unwrap(), "FALSE"); + assert_eq!(echo.log().lines().last().unwrap(), "FALSE"); - let job = enqueue_job( - &context, - JobSpec { - name: "flag".to_owned(), - script: vec!["echo --uppercase=X false".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(&mut context).await; - assert_eq!(job.state(), MockJobState::Failed); + let echo = context.run().command("echo --uppercase=X false").go().await; + assert!(!echo.ok()); }) .await; } @@ -1569,8 +1121,6 @@ mod tests { )] test: Option, ) { - use obo_core::build_meta::CommitBuildInfo; - const TEST_MONITOR_TIMEOUT: &str = "10 minutes"; with_context(async |mut context| { @@ -1592,49 +1142,40 @@ mod tests { .into(), }; - let build_info = put_artifacts( - &mut context, - [( - DEFAULT_BUILD_INFO.to_owned(), - serde_yaml::to_string(&build_info).unwrap().into_bytes(), - )] - .into(), - ) - .await; - - let mut generate_spec = JobSpec { - name: "generate".to_owned(), - script: vec!["generate-monitor tag".to_owned()], - dependencies: vec![build_info], - ..Default::default() - }; - - if test == Some(GenerateMonitorTimeoutLocation::Argument) { - use std::fmt::Write; - write!( - &mut generate_spec.script[0], - " --job-timeout '{TEST_MONITOR_TIMEOUT}'" + let build_info = context + .inject_artifacts( + [( + DEFAULT_BUILD_INFO.to_owned(), + serde_yaml::to_string(&build_info).unwrap().into_bytes(), + )] + .into(), ) - .unwrap(); - } + .await; - let generate = enqueue_job(&context, generate_spec); + let generate_builder = if test == Some(GenerateMonitorTimeoutLocation::Argument) { + context.run().command(format!( + "generate-monitor tag --job-timeout '{TEST_MONITOR_TIMEOUT}'" + )) + } else { + context.run().command("generate-monitor tag") + } + .artifacts(build_info); - if test == Some(GenerateMonitorTimeoutLocation::HandlerOption) { - run_obs_handler_with_options( - &mut context, - HandlerOptions { + let generate = if test == Some(GenerateMonitorTimeoutLocation::HandlerOption) { + generate_builder + .obs_job_handler(HandlerOptions { default_monitor_job_timeout: Some(TEST_MONITOR_TIMEOUT.to_owned()), ..DEFAULT_HANDLER_OPTIONS - }, - ) - .await; + }) + .go() + .await } else { - run_obs_handler(&mut context).await; - } - assert_eq!(generate.state(), MockJobState::Success); + generate_builder.go().await + }; + + assert!(generate.ok()); - let results = get_job_artifacts(&generate); + let results = context.fetch_artifacts(&generate.artifacts()).await; let pipeline_yaml: serde_yaml::Value = assert_ok!(serde_yaml::from_slice( results.get(DEFAULT_MONITOR_PIPELINE).unwrap() )); From 1b177cb6142f8f37bb24dce8459b318d5c0482e7 Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Wed, 6 Aug 2025 15:48:45 -0500 Subject: [PATCH 3/7] Rework the build-info format to be JSON External services like GH Actions can read JSON but *not* YAML, and the previous format couldn't simply be written "as JSON" because it relied on having structs as keys. This tweaks the format to be JSON-friendly and uses actual JSON files. --- Cargo.lock | 3 +- Cargo.toml | 1 + README.md | 14 ++++----- obo-core/Cargo.toml | 2 +- obo-core/src/actions.rs | 17 +++++------ obo-core/src/build_meta.rs | 51 ++++++++++++++----------------- obo-tests/src/lib.rs | 12 +++----- obs-gitlab-runner/Cargo.toml | 1 + obs-gitlab-runner/src/handler.rs | 24 +++++++-------- obs-gitlab-runner/src/pipeline.rs | 10 +++--- 10 files changed, 63 insertions(+), 72 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 04f3085..8308f3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1514,7 +1514,7 @@ dependencies = [ "rfc822-like", "rstest", "serde", - "serde_yaml", + "serde_json", "shell-words", "tempfile", "thiserror 2.0.16", @@ -1568,6 +1568,7 @@ dependencies = [ "open-build-service-mock", "rstest", "serde", + "serde_json", "serde_yaml", "shell-words", "shellexpand", diff --git a/Cargo.toml b/Cargo.toml index 7116534..176b3e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ open-build-service-mock = { git = "https://github.com/collabora/open-build-servi # open-build-service-mock = { path = "../open-build-service-rs/open-build-service-api" } rstest = "0.26" serde = "1.0" +serde_json = "1.0.140" serde_yaml = "0.9" shell-words = "1.1" tempfile = "3.20" diff --git a/README.md b/README.md index 6ff07e5..cb35248 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,7 @@ In order to connect to OBS, three variables must be set (generally within the ```bash dput PROJECT DSC_FILE [--branch-to BRANCHED_PROJECT] - [--build-info-out BUILD_INFO_FILE=build-info.yml] + [--build-info-out BUILD_INFO_FILE=build-info.json] [--rebuild-if-unchanged] ``` @@ -92,7 +92,7 @@ within, will be removed. Metadata information on the uploaded revision, such as the revision number, project name, and package name, will be saved into the file specified by -`--build-info-out` (default is `build-info.yml`). This file is **required** by +`--build-info-out` (default is `build-info.json`). This file is **required** by the `generate-monitor` and `prune` steps. Do note that, if `--branch-to` is given, the file will be written *immediately* after the branch takes place (i.e. before the upload); that way, if the upload fails, the branched project can still @@ -109,7 +109,7 @@ testing builds on MRs; you can create an OBS branch named after the MR's Git branch, and then builds can take place there without interfering with your main projects. -##### `--build-info-out BUILD_INFO_FILE=build-info.yml` +##### `--build-info-out BUILD_INFO_FILE=build-info.json` Changes the filename that the build info will be written to. @@ -130,7 +130,7 @@ operation, there will *always* be a change to upload. generate-monitor RUNNER_TAG [--rules RULES] [--download-build-results-to BUILD_RESULTS_DIR] - [--build-info BUILD_INFO_FILE=build-info.yml] + [--build-info BUILD_INFO_FILE=build-info.json] [--pipeline-out PIPELINE_FILE=obs.yml] [--job-prefix MONITOR_JOB_PREFIX=obs] [--job-timeout MONITOR_JOB_TIMEOUT] @@ -199,7 +199,7 @@ dput-and-generate: After a monitoring job completes, download the build results from OBS to the given `BUILD_RESULTS_DIR`, and upload it as a GitLab build artifact.. -##### `--build-info BUILD_INFO_FILE=build-info.yml` +##### `--build-info BUILD_INFO_FILE=build-info.json` Specifies the name of the build info file to read. In particular, if a different build info filename was used with `dput` via @@ -233,7 +233,7 @@ Changes the filename each monitoring job will save the build log into. ```bash prune - [--build-info BUILD_INFO_FILE=build-info.yml] + [--build-info BUILD_INFO_FILE=build-info.json] [--ignore-missing-build-info] [--only-if-job-unsuccessful] ``` @@ -242,7 +242,7 @@ If a branch occurred, deletes the branched package and, if now empty, project, using the information from the build info file. (If no branching occurred, this does nothing.) -##### `--build-info BUILD_INFO_FILE=build-info.yml` +##### `--build-info BUILD_INFO_FILE=build-info.json` Specifies the name of the build info file to read. In particular, if a different build info filename was used with `dput` via diff --git a/obo-core/Cargo.toml b/obo-core/Cargo.toml index 146640a..da839db 100644 --- a/obo-core/Cargo.toml +++ b/obo-core/Cargo.toml @@ -20,7 +20,7 @@ open-build-service-api.workspace = true reqwest = "0.12" rfc822-like = "0.2" serde.workspace = true -serde_yaml.workspace = true +serde_json.workspace = true shell-words.workspace = true tempfile.workspace = true thiserror.workspace = true diff --git a/obo-core/src/actions.rs b/obo-core/src/actions.rs index 1dfee2c..11244d3 100644 --- a/obo-core/src/actions.rs +++ b/obo-core/src/actions.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, io::SeekFrom}; +use std::io::SeekFrom; use camino::{Utf8Path, Utf8PathBuf}; use clap::{ArgAction, Parser}; @@ -11,10 +11,7 @@ use tracing::{debug, instrument}; use crate::{ artifacts::{ArtifactDirectory, ArtifactReader, ArtifactWriter, MissingArtifactToNone}, binaries::download_binaries, - build_meta::{ - BuildHistoryRetrieval, BuildMeta, BuildMetaOptions, CommitBuildInfo, DisabledRepos, - RepoArch, - }, + build_meta::{BuildHistoryRetrieval, BuildMeta, BuildMetaOptions, DisabledRepos, EnabledRepo}, monitor::{MonitoredPackage, ObsMonitor, PackageCompletion, PackageMonitoringOptions}, outputln, prune::prune_branch, @@ -22,7 +19,7 @@ use crate::{ upload::ObsDscUploader, }; -pub const DEFAULT_BUILD_INFO: &str = "build-info.yml"; +pub const DEFAULT_BUILD_INFO: &str = "build-info.json"; pub const DEFAULT_BUILD_LOG: &str = "build.log"; // Our flags can all take explicit values, because it makes it easier to @@ -155,7 +152,7 @@ pub struct ObsBuildInfo { pub rev: Option, pub srcmd5: Option, pub is_branched: bool, - pub enabled_repos: HashMap, + pub enabled_repos: Vec, } impl ObsBuildInfo { @@ -164,7 +161,7 @@ impl ObsBuildInfo { artifacts .save_with(path, async |file: &mut ArtifactWriter| { let data = - serde_yaml::to_string(&self).wrap_err("Failed to serialize build info")?; + serde_json::to_string(&self).wrap_err("Failed to serialize build info")?; file.write_all(data.as_bytes()) .await .wrap_err("Failed to write build info file")?; @@ -217,7 +214,7 @@ impl Actions { rev: None, srcmd5: None, is_branched, - enabled_repos: HashMap::new(), + enabled_repos: vec![], }; debug!("Saving initial build info: {:?}", build_info); build_info @@ -414,7 +411,7 @@ impl Actions { artifacts.read_string(&args.build_info).await? }; - let build_info: ObsBuildInfo = serde_yaml::from_str(&build_info_data) + let build_info: ObsBuildInfo = serde_json::from_str(&build_info_data) .wrap_err("Failed to parse provided build info file")?; if build_info.is_branched { diff --git a/obo-core/src/build_meta.rs b/obo-core/src/build_meta.rs index c2330b6..41ebad7 100644 --- a/obo-core/src/build_meta.rs +++ b/obo-core/src/build_meta.rs @@ -14,7 +14,9 @@ pub struct RepoArch { } #[derive(Deserialize, Serialize, Debug, Clone)] -pub struct CommitBuildInfo { +pub struct EnabledRepo { + #[serde(flatten)] + pub repo_arch: RepoArch, pub prev_endtime_for_commit: Option, } @@ -237,26 +239,19 @@ impl BuildMeta { Ok(()) } - pub fn get_commit_build_info(&self, srcmd5: &str) -> HashMap { - let mut repos = HashMap::new(); - - for (repo, jobhist) in &self.repos { - let prev_endtime_for_commit = jobhist - .jobhist - .iter() - .filter(|e| e.srcmd5 == srcmd5) - .next_back() - .map(|e| e.endtime); - - repos.insert( - repo.clone(), - CommitBuildInfo { - prev_endtime_for_commit, - }, - ); - } - - repos + pub fn get_commit_build_info(&self, srcmd5: &str) -> Vec { + self.repos + .iter() + .map(|(repo, jobhist)| EnabledRepo { + repo_arch: repo.clone(), + prev_endtime_for_commit: jobhist + .jobhist + .iter() + .filter(|e| e.srcmd5 == srcmd5) + .next_back() + .map(|e| e.endtime), + }) + .collect() } } @@ -360,10 +355,10 @@ mod tests { let build_info = meta.get_commit_build_info(&srcmd5_1); - let arch_1 = assert_some!(build_info.get(&repo_arch_1)); + let arch_1 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_1)); assert_some_eq!(arch_1.prev_endtime_for_commit, endtime_1); - let arch_2 = assert_some!(build_info.get(&repo_arch_2)); + let arch_2 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_2)); assert_none!(arch_2.prev_endtime_for_commit); let meta = assert_ok!( @@ -385,9 +380,9 @@ mod tests { let build_info = meta.get_commit_build_info(&srcmd5_1); - let arch_1 = assert_some!(build_info.get(&repo_arch_1)); + let arch_1 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_1)); assert_none!(arch_1.prev_endtime_for_commit); - let arch_2 = assert_some!(build_info.get(&repo_arch_2)); + let arch_2 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_2)); assert_none!(arch_2.prev_endtime_for_commit); assert!(meta.repos.contains_key(&repo_arch_2)); @@ -431,7 +426,7 @@ mod tests { let build_info = meta.get_commit_build_info(&srcmd5_2); - let arch_1 = assert_some!(build_info.get(&repo_arch_2)); + let arch_1 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_2)); assert_some_eq!(arch_1.prev_endtime_for_commit, endtime_2); mock.add_job_history( @@ -465,13 +460,13 @@ mod tests { let build_info = meta.get_commit_build_info(&srcmd5_1); - let arch_2 = assert_some!(build_info.get(&repo_arch_2)); + let arch_2 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_2)); assert_some_eq!(arch_2.prev_endtime_for_commit, endtime_1); meta.clear_stored_history(); let build_info = meta.get_commit_build_info(&srcmd5_1); - let arch_2 = assert_some!(build_info.get(&repo_arch_2)); + let arch_2 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_2)); assert_none!(arch_2.prev_endtime_for_commit); mock.set_package_build_status( diff --git a/obo-tests/src/lib.rs b/obo-tests/src/lib.rs index 0dfe12c..0561dc9 100644 --- a/obo-tests/src/lib.rs +++ b/obo-tests/src/lib.rs @@ -323,10 +323,8 @@ pub async fn test_dput( let arch_1 = build_info .enabled_repos - .get(&RepoArch { - repo: TEST_REPO.to_owned(), - arch: TEST_ARCH_1.to_owned(), - }) + .iter() + .find(|e| e.repo_arch.repo == TEST_REPO && e.repo_arch.arch == TEST_ARCH_1) .unwrap(); if test == DputTest::Rebuild { @@ -336,10 +334,8 @@ pub async fn test_dput( let arch_2 = build_info .enabled_repos - .get(&RepoArch { - repo: TEST_REPO.to_owned(), - arch: TEST_ARCH_2.to_owned(), - }) + .iter() + .find(|e| e.repo_arch.repo == TEST_REPO && e.repo_arch.arch == TEST_ARCH_2) .unwrap(); assert_none!(arch_2.prev_endtime_for_commit); } diff --git a/obs-gitlab-runner/Cargo.toml b/obs-gitlab-runner/Cargo.toml index ac14003..7b3e203 100644 --- a/obs-gitlab-runner/Cargo.toml +++ b/obs-gitlab-runner/Cargo.toml @@ -19,6 +19,7 @@ obo-core = { path = "../obo-core" } obo-test-support = { path = "../obo-test-support" } open-build-service-api.workspace = true serde.workspace = true +serde_json.workspace = true serde_yaml.workspace = true shellexpand = "3.1" shell-words.workspace = true diff --git a/obs-gitlab-runner/src/handler.rs b/obs-gitlab-runner/src/handler.rs index 7276334..ae825c0 100644 --- a/obs-gitlab-runner/src/handler.rs +++ b/obs-gitlab-runner/src/handler.rs @@ -228,7 +228,7 @@ impl ObsJobHandler { }; let build_info_data = artifacts.read_string(&args.build_info).await?; - let build_info: ObsBuildInfo = serde_yaml::from_str(&build_info_data) + let build_info: ObsBuildInfo = serde_json::from_str(&build_info_data) .wrap_err("Failed to parse provided build info file")?; let pipeline = generate_monitor_pipeline( @@ -448,7 +448,7 @@ mod tests { use claims::*; use gitlab_runner::{GitlabLayer, Runner, RunnerBuilder}; use gitlab_runner_mock::*; - use obo_core::build_meta::{CommitBuildInfo, RepoArch}; + use obo_core::build_meta::{EnabledRepo, RepoArch}; use obo_test_support::*; use obo_tests::*; use rstest::rstest; @@ -868,10 +868,10 @@ mod tests { assert_eq!(pipeline_map.len(), build_info.enabled_repos.len()); - for repo in build_info.enabled_repos.keys() { + for enabled in &build_info.enabled_repos { let monitor_job_name = format!( "{}-{}-{}", - DEFAULT_PIPELINE_JOB_PREFIX, TEST_REPO, &repo.arch + DEFAULT_PIPELINE_JOB_PREFIX, TEST_REPO, &enabled.repo_arch.arch ); let monitor_map = pipeline_yaml @@ -945,7 +945,7 @@ mod tests { context, dput.clone(), build_info, - repo, + &enabled.repo_arch, &script, success, dput_test, @@ -1130,23 +1130,21 @@ mod tests { rev: Some("1".to_owned()), srcmd5: Some("abc".to_owned()), is_branched: false, - enabled_repos: [( - RepoArch { + enabled_repos: vec![EnabledRepo { + repo_arch: RepoArch { repo: TEST_REPO.to_owned(), arch: TEST_ARCH_1.to_owned(), }, - CommitBuildInfo { - prev_endtime_for_commit: None, - }, - )] - .into(), + + prev_endtime_for_commit: None, + }], }; let build_info = context .inject_artifacts( [( DEFAULT_BUILD_INFO.to_owned(), - serde_yaml::to_string(&build_info).unwrap().into_bytes(), + serde_json::to_string(&build_info).unwrap().into_bytes(), )] .into(), ) diff --git a/obs-gitlab-runner/src/pipeline.rs b/obs-gitlab-runner/src/pipeline.rs index 375bb19..a0295fa 100644 --- a/obs-gitlab-runner/src/pipeline.rs +++ b/obs-gitlab-runner/src/pipeline.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use color_eyre::eyre::{Context, Result}; use obo_core::{ actions::{DownloadBinariesAction, MonitorAction}, - build_meta::{CommitBuildInfo, RepoArch}, + build_meta::{EnabledRepo, RepoArch}, }; use serde::Serialize; use tracing::instrument; @@ -51,7 +51,7 @@ pub fn generate_monitor_pipeline( package: &str, rev: &str, srcmd5: &str, - enabled_repos: &HashMap, + enabled_repos: &[EnabledRepo], options: GeneratePipelineOptions, ) -> Result { let rules: Option = options @@ -62,7 +62,9 @@ pub fn generate_monitor_pipeline( .wrap_err("Failed to parse provided rules")?; let mut jobs = HashMap::new(); - for (RepoArch { repo, arch }, info) in enabled_repos { + for enabled in enabled_repos { + let RepoArch { repo, arch } = &enabled.repo_arch; + let mut script = vec![]; let mut artifact_paths = vec![]; @@ -75,7 +77,7 @@ pub fn generate_monitor_pipeline( rev: rev.to_owned(), srcmd5: srcmd5.to_owned(), build_log_out: options.build_log_out.clone(), - prev_endtime_for_commit: info.prev_endtime_for_commit, + prev_endtime_for_commit: enabled.prev_endtime_for_commit, } .generate_command(), ); From 049a11274033a0696109d48952a16d2fd0861552 Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Mon, 15 Sep 2025 15:44:28 -0500 Subject: [PATCH 4/7] artifacts: Add Artifact[Reader/Writer]::new(AsyncFile) There are cases where we get the files from places that are irrelevant to the shared artifacts code, so those callers need to be able to still place the AsyncFiles inside the wrappers. --- obo-core/src/artifacts.rs | 12 ++++++++++-- obs-gitlab-runner/src/handler.rs | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/obo-core/src/artifacts.rs b/obo-core/src/artifacts.rs index 325b45f..473d421 100644 --- a/obo-core/src/artifacts.rs +++ b/obo-core/src/artifacts.rs @@ -40,8 +40,12 @@ pub struct ArtifactWriter { } impl ArtifactWriter { + pub fn new(inner: AsyncFile) -> Self { + Self { inner } + } + #[instrument] - pub async fn new() -> Result { + pub async fn new_anon() -> Result { let file = tokio::task::spawn_blocking(tempfile::tempfile).await??; Ok(Self { inner: AsyncFile::from_std(file), @@ -112,6 +116,10 @@ pub struct ArtifactReader { } impl ArtifactReader { + pub fn new(inner: AsyncFile) -> Self { + Self { inner } + } + pub async fn from_async_file(file: &AsyncFile) -> Result { let inner = AsyncFile::options() .read(true) @@ -234,7 +242,7 @@ pub mod test_support { F: for<'a> SaveCallback<'a, Ret, Err> + Send, P: AsRef + Send, { - let mut writer = ArtifactWriter::new().await?; + let mut writer = ArtifactWriter::new_anon().await?; let ret = func(&mut writer).await?; self.artifacts .insert(path.as_ref().to_owned(), writer.into_reader().await?); diff --git a/obs-gitlab-runner/src/handler.rs b/obs-gitlab-runner/src/handler.rs index ae825c0..b307b91 100644 --- a/obs-gitlab-runner/src/handler.rs +++ b/obs-gitlab-runner/src/handler.rs @@ -153,7 +153,7 @@ impl ArtifactDirectory for GitLabArtifacts<'_> { F: for<'a> SaveCallback<'a, Ret, Err> + Send, P: AsRef + Send, { - let mut writer = ArtifactWriter::new().await?; + let mut writer = ArtifactWriter::new_anon().await?; let ret = func(&mut writer).await?; self.artifacts .insert(path.as_ref().to_owned(), writer.into_reader().await?); From df1bb350052c3c2c274b5d42609d3c1a3f584cb6 Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Wed, 6 Aug 2025 15:48:45 -0500 Subject: [PATCH 5/7] Add a standalone CLI This adds a new crate `obo-cli` that behaves much like the commands already available to the runner, but as a separate entity. As replacement for child pipeline generation, the `generate-monitor` command will create a JSON table containing the commands needed to run monitoring and download binaries for a given repo/arch combination. --- .github/workflows/ci.yml | 7 +- Cargo.lock | 38 +++ Cargo.toml | 1 + Dockerfile.obo-cli | 18 ++ Dockerfile => Dockerfile.obs-gitlab-runner | 2 +- obo-cli/Cargo.toml | 37 +++ obo-cli/src/lib.rs | 234 ++++++++++++++ obo-cli/src/main.rs | 120 ++++++++ obo-cli/tests/test_cli.rs | 335 +++++++++++++++++++++ 9 files changed, 789 insertions(+), 3 deletions(-) create mode 100644 Dockerfile.obo-cli rename Dockerfile => Dockerfile.obs-gitlab-runner (92%) create mode 100644 obo-cli/Cargo.toml create mode 100644 obo-cli/src/lib.rs create mode 100644 obo-cli/src/main.rs create mode 100644 obo-cli/tests/test_cli.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 195cc42..1091693 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,7 +8,6 @@ on: env: REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} jobs: check: @@ -87,6 +86,9 @@ jobs: name: Docker image build runs-on: ubuntu-latest needs: [check, fmt, test, clippy] + strategy: + matrix: + target: [obo-cli, obs-gitlab-runner] permissions: contents: read @@ -102,12 +104,13 @@ jobs: - id: meta uses: docker/metadata-action@v5 with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + images: ${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ matrix.target }} - name: Build and push Docker image uses: docker/build-push-action@v6 if: github.event_name != 'pull_request' with: context: . + file: Dockerfile.${{ matrix.target }} push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} diff --git a/Cargo.lock b/Cargo.lock index 8308f3e..638c411 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1493,6 +1493,33 @@ dependencies = [ "memchr", ] +[[package]] +name = "obo-cli" +version = "0.1.8" +dependencies = [ + "async-trait", + "camino", + "claims", + "clap", + "color-eyre", + "gitlab-runner-mock", + "obo-core", + "obo-test-support", + "obo-tests", + "open-build-service-api", + "open-build-service-mock", + "rstest", + "serde", + "serde_json", + "tempfile", + "tokio", + "tokio-stream", + "tracing", + "tracing-error", + "tracing-subscriber", + "url", +] + [[package]] name = "obo-core" version = "0.1.0" @@ -2582,6 +2609,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.16" diff --git a/Cargo.toml b/Cargo.toml index 176b3e2..f37861b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] resolver = "3" members = [ + "obo-cli", "obo-core", "obo-tests", "obo-test-support", diff --git a/Dockerfile.obo-cli b/Dockerfile.obo-cli new file mode 100644 index 0000000..dddc252 --- /dev/null +++ b/Dockerfile.obo-cli @@ -0,0 +1,18 @@ +FROM rust:1.88.0-slim-bookworm AS build +ARG DEBIAN_FRONTEND=noninteractive + +ADD . /app +WORKDIR /app +RUN apt-get update \ + && apt-get install -y pkg-config libssl-dev \ + && cargo build -p obo-cli --release + +FROM debian:bookworm-slim +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update \ + && apt-get install -y libssl3 ca-certificates \ + && rm -rf /var/lib/apt/lists/ +COPY --from=build /app/target/release/obo /usr/local/bin/ + +ENTRYPOINT ["/usr/local/bin/obo"] diff --git a/Dockerfile b/Dockerfile.obs-gitlab-runner similarity index 92% rename from Dockerfile rename to Dockerfile.obs-gitlab-runner index 392bc22..b5ca074 100644 --- a/Dockerfile +++ b/Dockerfile.obs-gitlab-runner @@ -5,7 +5,7 @@ ADD . /app WORKDIR /app RUN apt-get update \ && apt-get install -y pkg-config libssl-dev \ - && cargo build --release + && cargo build -p obs-gitlab-runner --release FROM debian:bookworm-slim ARG DEBIAN_FRONTEND=noninteractive diff --git a/obo-cli/Cargo.toml b/obo-cli/Cargo.toml new file mode 100644 index 0000000..b74d12e --- /dev/null +++ b/obo-cli/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "obo-cli" +description = "OBS Build Orchestrator — command-line frontend" +version = "0.1.8" +edition = "2024" +license = "MIT OR Apache-2.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[[bin]] +name = "obo" +path = "src/main.rs" + +[dependencies] +async-trait.workspace = true +camino.workspace = true +clap.workspace = true +color-eyre.workspace = true +obo-core = { path = "../obo-core" } +obo-test-support = { path = "../obo-test-support" } +open-build-service-api.workspace = true +serde.workspace = true +serde_json.workspace = true +tempfile.workspace = true +tokio.workspace = true +tracing.workspace = true +tracing-error = "0.2" +tracing-subscriber = { version = "0.3", features = ["default", "json"] } +url = "2.5" + +[dev-dependencies] +claims.workspace = true +gitlab-runner-mock = "0.2.1" +obo-tests = { path = "../obo-tests" } +open-build-service-mock.workspace = true +rstest.workspace = true +tokio-stream = { version = "0.1.17", features = ["io-util"] } diff --git a/obo-cli/src/lib.rs b/obo-cli/src/lib.rs new file mode 100644 index 0000000..57b800d --- /dev/null +++ b/obo-cli/src/lib.rs @@ -0,0 +1,234 @@ +use std::time::Duration; + +use async_trait::async_trait; +use camino::{Utf8Path, Utf8PathBuf}; +use clap::{Args, Subcommand}; +use color_eyre::eyre::{Context, Report, Result, bail, eyre}; +use obo_core::{ + actions::{ + Actions, DEFAULT_BUILD_INFO, DEFAULT_BUILD_LOG, DownloadBinariesAction, DputAction, + LOG_TAIL_2MB, MonitorAction, ObsBuildInfo, PruneAction, + }, + artifacts::{ArtifactDirectory, ArtifactReader, ArtifactWriter, MissingArtifact, SaveCallback}, + build_meta::RepoArch, + monitor::PackageMonitoringOptions, + outputln, +}; +use open_build_service_api as obs; +use serde::{Deserialize, Serialize}; +use tempfile::NamedTempFile; +use tokio::{ + fs::File as AsyncFile, + io::{AsyncBufReadExt, AsyncWriteExt, BufReader}, +}; + +pub const DEFAULT_MONITOR_TABLE: &str = "obs-monitor.json"; + +#[derive(Debug, Deserialize, Serialize)] +pub struct MonitorCommands { + pub monitor: String, + pub download_binaries: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct MonitorEntry { + #[serde(flatten)] + pub repo_arch: RepoArch, + pub commands: MonitorCommands, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct MonitorTable { + pub entries: Vec, +} + +#[derive(Args)] +pub struct GenerateMonitorAction { + #[clap(long, default_value_t = DEFAULT_BUILD_INFO.to_owned())] + build_info: String, + #[clap(long, default_value_t = DEFAULT_MONITOR_TABLE.to_owned())] + monitor_out: String, + #[clap(long, default_value_t = DEFAULT_BUILD_LOG.into())] + build_log_out: String, + #[clap(long = "download-build-results-to")] + build_results_dir: Option, +} + +#[derive(Subcommand)] +pub enum CliAction { + Dput(DputAction), + Monitor { + #[clap(flatten)] + args: MonitorAction, + + // These are needed by the integration tests. + #[clap(long, hide = true, env = "OBO_TEST_LOG_TAIL", default_value_t = LOG_TAIL_2MB)] + log_tail: u64, + #[clap(long, hide = true, env = "OBO_TEST_SLEEP_ON_BUILDING_MS")] + sleep_on_building_ms: Option, + #[clap(long, hide = true, env = "OBO_TEST_SLEEP_ON_OLD_STATUS_MS")] + sleep_on_old_status_ms: Option, + }, + GenerateMonitor(GenerateMonitorAction), + DownloadBinaries(DownloadBinariesAction), + Prune(PruneAction), +} + +#[derive(Default)] +pub struct LocalFsArtifacts(pub Utf8PathBuf); + +#[async_trait] +impl ArtifactDirectory for LocalFsArtifacts { + async fn open(&self, path: impl AsRef + Send) -> Result { + let path = self.0.join(path.as_ref()); + AsyncFile::open(&path) + .await + .map(ArtifactReader::new) + .map_err(|e| { + if e.kind() == std::io::ErrorKind::NotFound { + eyre!(MissingArtifact(path)) + } else { + eyre!(e) + } + }) + } + + async fn save_with(&mut self, path: P, func: F) -> Result + where + Report: From, + Ret: Send, + Err: Send, + F: for<'a> SaveCallback<'a, Ret, Err> + Send, + P: AsRef + Send, + { + let path = self.0.join(path.as_ref()); + let parent = path.parent().unwrap_or_else(|| Utf8Path::new(".")); + tokio::fs::create_dir_all(&parent) + .await + .wrap_err_with(|| format!("Failed to create parents of '{path}'"))?; + + let Some(basename) = path.file_name() else { + bail!("Invalid path: {path}"); + }; + let temp = NamedTempFile::with_prefix_in(basename, parent) + .wrap_err("Failed to create temporary file")?; + + let mut writer = ArtifactWriter::new(AsyncFile::from_std(temp.as_file().try_clone()?)); + let ret = func(&mut writer).await?; + + writer.flush().await?; + temp.persist(&path)?; + Ok(ret) + } +} + +pub struct Handler { + actions: Actions, + artifacts: LocalFsArtifacts, +} + +impl Handler { + pub fn new(client: obs::Client, artifacts_dir: Utf8PathBuf) -> Self { + Self { + actions: Actions { client }, + artifacts: LocalFsArtifacts(artifacts_dir), + } + } + + async fn generate_monitor(&mut self, args: GenerateMonitorAction) -> Result<()> { + let build_info_data = self.artifacts.read_string(&args.build_info).await?; + let build_info: ObsBuildInfo = serde_json::from_str(&build_info_data) + .wrap_err("Failed to parse provided build info file")?; + + let rev = build_info + .rev + .ok_or_else(|| eyre!("Build revision was not set"))?; + let srcmd5 = build_info + .srcmd5 + .ok_or_else(|| eyre!("Build srcmd5 was not set"))?; + + let mut table = MonitorTable { entries: vec![] }; + for enabled_repo in build_info.enabled_repos { + table.entries.push(MonitorEntry { + repo_arch: enabled_repo.repo_arch.clone(), + commands: MonitorCommands { + monitor: MonitorAction { + project: build_info.project.clone(), + package: build_info.package.clone(), + repository: enabled_repo.repo_arch.repo.clone(), + arch: enabled_repo.repo_arch.arch.clone(), + rev: rev.clone(), + srcmd5: srcmd5.clone(), + prev_endtime_for_commit: enabled_repo.prev_endtime_for_commit, + build_log_out: args.build_log_out.clone(), + } + .generate_command(), + download_binaries: args.build_results_dir.clone().map(|build_results_dir| { + DownloadBinariesAction { + project: build_info.project.clone(), + package: build_info.package.clone(), + repository: enabled_repo.repo_arch.repo, + arch: enabled_repo.repo_arch.arch, + build_results_dir, + } + .generate_command() + }), + }, + }); + } + + let data = serde_json::to_string(&table).wrap_err("Failed to serialize data")?; + + self.artifacts + .write(&args.monitor_out, data.as_bytes()) + .await?; + outputln!("Wrote monitor file '{}'.", args.monitor_out); + + Ok(()) + } + + pub async fn run(&mut self, action: CliAction) -> Result<()> { + match action { + CliAction::Dput(args) => self.actions.dput(args, &mut self.artifacts).await?, + CliAction::Monitor { + log_tail, + sleep_on_building_ms, + sleep_on_old_status_ms, + args, + } => { + let mut options = PackageMonitoringOptions::default(); + if let Some(value) = sleep_on_building_ms { + options.sleep_on_building = Duration::from_millis(value); + } + if let Some(value) = sleep_on_old_status_ms { + options.sleep_on_old_status = Duration::from_millis(value); + } + + self.actions + .monitor( + args, + options, + |file| async { + let mut lines = BufReader::new(file).lines(); + while let Some(line) = lines.next_line().await? { + eprintln!("{line}"); + } + Ok(()) + }, + log_tail, + &mut self.artifacts, + ) + .await? + } + CliAction::GenerateMonitor(args) => self.generate_monitor(args).await?, + CliAction::DownloadBinaries(args) => { + self.actions + .download_binaries(args, &mut self.artifacts) + .await? + } + CliAction::Prune(args) => self.actions.prune(args, &self.artifacts).await?, + } + + Ok(()) + } +} diff --git a/obo-cli/src/main.rs b/obo-cli/src/main.rs new file mode 100644 index 0000000..dbb0445 --- /dev/null +++ b/obo-cli/src/main.rs @@ -0,0 +1,120 @@ +use std::{fmt, str::FromStr}; + +use camino::Utf8PathBuf; +use clap::Parser; +use color_eyre::eyre::Result; +use obo_cli::{CliAction, Handler}; +use obo_core::logging::{ + get_event_message, is_output_field_in_metadata, is_output_field_set_in_event, +}; +use open_build_service_api as obs; +use tracing::{Event, Subscriber}; +use tracing_subscriber::{ + filter::Targets, + fmt::{FmtContext, FormatEvent, FormatFields, format}, + layer::{self, Filter}, + prelude::*, + registry::LookupSpan, +}; +use url::Url; + +#[derive(Debug, Clone)] +struct TargetsArg { + targets: Targets, + parsed_from: String, +} + +impl FromStr for TargetsArg { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + Targets::from_str(s).map(|targets| TargetsArg { + targets, + parsed_from: s.to_owned(), + }) + } +} + +impl Default for TargetsArg { + fn default() -> Self { + "".parse().unwrap() + } +} + +impl fmt::Display for TargetsArg { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.parsed_from) + } +} + +struct OutputFilter; + +impl Filter for OutputFilter { + fn enabled(&self, meta: &tracing::Metadata<'_>, _cx: &layer::Context<'_, S>) -> bool { + is_output_field_in_metadata(meta) + } + + fn event_enabled(&self, event: &Event<'_>, _cx: &layer::Context<'_, S>) -> bool { + is_output_field_set_in_event(event) + } +} + +struct OutputFormatter; + +impl FormatEvent for OutputFormatter +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, +{ + fn format_event( + &self, + _ctx: &FmtContext<'_, S, N>, + mut writer: format::Writer<'_>, + event: &Event<'_>, + ) -> fmt::Result { + let Some(message) = get_event_message(event) else { + return Ok(()); + }; + writeln!(writer, "{message}") + } +} + +#[derive(Parser)] +struct Args { + #[clap(long, env = "OBO_LOG", default_value_t = TargetsArg::default())] + log: TargetsArg, + + #[clap(long, env = "OBS_SERVER")] + obs_server: Url, + #[clap(long, env = "OBS_USER")] + obs_user: String, + #[clap(long, env = "OBS_PASSWORD")] + obs_password: String, + + #[clap(subcommand)] + action: CliAction, +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = Args::parse(); + + tracing_subscriber::registry() + .with(tracing_error::ErrorLayer::default()) + .with(tracing_subscriber::fmt::layer().with_filter(args.log.targets)) + .with( + tracing_subscriber::fmt::layer() + .event_format(OutputFormatter) + .with_filter(OutputFilter), + ) + .init(); + + color_eyre::install().unwrap(); + + let client = obs::Client::new(args.obs_server, args.obs_user, args.obs_password); + Handler::new(client, Utf8PathBuf::new()) + .run(args.action) + .await?; + + Ok(()) +} diff --git a/obo-cli/tests/test_cli.rs b/obo-cli/tests/test_cli.rs new file mode 100644 index 0000000..e9a9e0f --- /dev/null +++ b/obo-cli/tests/test_cli.rs @@ -0,0 +1,335 @@ +use std::{ + collections::HashMap, + path::Path, + process::{ExitStatus, Stdio}, + sync::Arc, + time::Duration, +}; + +use async_trait::async_trait; +use camino::Utf8Path; +use claims::*; +use obo_cli::{DEFAULT_MONITOR_TABLE, MonitorTable}; +use obo_core::actions::ObsBuildInfo; +use obo_test_support::*; +use obo_tests::*; +use rstest::rstest; +use tempfile::TempDir; +use tokio::{ + io::{AsyncBufReadExt, BufReader}, + process::Command, +}; +use tokio_stream::{StreamExt, wrappers::LinesStream}; + +#[derive(Debug, Clone)] +struct CliArtifactsHandle(Arc); + +impl ArtifactsHandle for CliArtifactsHandle {} + +#[derive(Clone, Debug)] +struct CliExecutionResult { + status: ExitStatus, + out: String, + artifacts: Arc, +} + +impl ExecutionResult for CliExecutionResult { + type Artifacts = CliArtifactsHandle; + + fn ok(&self) -> bool { + self.status.success() + } + + fn log(&self) -> String { + self.out.clone() + } + + fn artifacts(&self) -> Self::Artifacts { + CliArtifactsHandle(self.artifacts.clone()) + } +} + +struct CliRunBuilder { + obs_server: String, + script: Vec, + dependencies: Vec>, + timeout: Duration, +} + +#[async_trait] +impl RunBuilder<'_> for CliRunBuilder { + type ArtifactsHandle = CliArtifactsHandle; + type ExecutionResult = CliExecutionResult; + + fn script(mut self, script: &[String]) -> Self { + for line in script { + self.script + .push(format!("{} {line}", env!("CARGO_BIN_EXE_obo"))); + } + self + } + + fn artifacts(mut self, artifacts: Self::ArtifactsHandle) -> Self { + self.dependencies.push(artifacts.0); + self + } + + fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + // TODO: timeouts!! + async fn go(self) -> Self::ExecutionResult { + let temp = TempDir::new().unwrap(); + + // Symlink all the dependency artifacts into the cwd, and clean them up + // at the end. + let mut dep_files = vec![]; + for dep in &self.dependencies { + let mut reader = tokio::fs::read_dir(dep.path()).await.unwrap(); + while let Some(entry) = reader.next_entry().await.unwrap() { + let dest = temp.path().join(entry.file_name()); + tokio::fs::symlink(entry.path(), &dest).await.unwrap(); + dep_files.push(dest); + } + } + + let mut child = Command::new("sh") + .arg("-exc") + .arg(self.script.join("\n")) + .kill_on_drop(true) + .env("OBS_SERVER", self.obs_server) + .env("OBS_USER", TEST_USER) + .env("OBS_PASSWORD", TEST_PASS) + .env("OBO_TEST_LOG_TAIL", MONITOR_TEST_LOG_TAIL.to_string()) + .env("OBO_TEST_SLEEP_ON_BUILDING_MS", "0") + .env( + "OBO_TEST_SLEEP_ON_OLD_STATUS_MS", + MONITOR_TEST_OLD_STATUS_SLEEP_DURATION + .as_millis() + .to_string(), + ) + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(temp.path()) + .spawn() + .unwrap(); + + let (status, lines) = tokio::time::timeout(self.timeout, async move { + let stdout = BufReader::new(child.stdout.take().unwrap()); + let stderr = BufReader::new(child.stderr.take().unwrap()); + + let mut output = + LinesStream::new(stdout.lines()).merge(LinesStream::new(stderr.lines())); + let mut lines = vec![]; + + while let Some(line) = output.try_next().await.unwrap() { + // Forward the lines back to the output. + eprintln!("{line}"); + lines.push(line); + } + + let status = child.wait().await.unwrap(); + (status, lines) + }) + .await + .unwrap(); + + for file in dep_files { + tokio::fs::remove_file(&file).await.unwrap(); + } + + Self::ExecutionResult { + status, + out: lines.join("\n"), + artifacts: Arc::new(temp), + } + } +} + +struct CliTestContext { + obs: ObsContext, +} + +fn collect_artifacts_from_dir(out: &mut HashMap>, root: &Path, subdir: &Utf8Path) { + for entry in std::fs::read_dir(root.join(subdir.as_std_path())).unwrap() { + let entry = entry.unwrap(); + let name = entry.file_name().into_string().unwrap(); + let ft = entry.file_type().unwrap(); + if ft.is_dir() { + collect_artifacts_from_dir(out, root, &subdir.join(name)); + } else if ft.is_file() { + let contents = std::fs::read(entry.path()) + .unwrap_or_else(|err| panic!("Fetching {}: {err}", entry.path().display())); + out.insert(subdir.join(name).into_string(), contents); + } + } +} + +#[async_trait] +impl TestContext for CliTestContext { + type ArtifactsHandle = CliArtifactsHandle; + type ExecutionResult = CliExecutionResult; + type RunBuilder<'context> = CliRunBuilder; + + fn obs(&self) -> &ObsContext { + &self.obs + } + + async fn inject_artifacts( + &mut self, + artifacts: HashMap>, + ) -> Self::ArtifactsHandle { + let temp = TempDir::new().unwrap(); + + for (name, contents) in artifacts { + tokio::fs::write(temp.path().join(name), contents) + .await + .unwrap(); + } + + CliArtifactsHandle(Arc::new(temp)) + } + + async fn fetch_artifacts(&self, handle: &Self::ArtifactsHandle) -> HashMap> { + let mut ret = HashMap::new(); + collect_artifacts_from_dir(&mut ret, handle.0.path(), Utf8Path::new("")); + ret + } + + fn run(&mut self) -> Self::RunBuilder<'_> { + CliRunBuilder { + obs_server: self.obs.client.url().to_string(), + script: vec![], + dependencies: vec![], + timeout: EXECUTION_DEFAULT_TIMEOUT, + } + } +} + +async fn with_context(func: impl AsyncFnOnce(CliTestContext) -> T) -> T { + let obs_mock = create_default_mock().await; + let obs_client = create_default_client(&obs_mock); + + let ctx = CliTestContext { + obs: ObsContext { + client: obs_client, + mock: obs_mock, + }, + }; + + func(ctx).await +} + +async fn test_monitor_table( + context: &mut CliTestContext, + dput: CliArtifactsHandle, + build_info: &ObsBuildInfo, + success: bool, + dput_test: DputTest, + log_test: MonitorLogTest, + download_binaries: bool, +) { + let mut generate_command = "generate-monitor".to_owned(); + if download_binaries { + generate_command += + &format!(" --download-build-results-to {MONITOR_TEST_BUILD_RESULTS_DIR}"); + } + + let generate = context + .run() + .command(generate_command) + .artifacts(dput.clone()) + .go() + .await; + assert!(generate.ok()); + + let results = context.fetch_artifacts(&generate.artifacts()).await; + let table: MonitorTable = assert_ok!(serde_json::from_slice( + results.get(DEFAULT_MONITOR_TABLE).unwrap() + )); + + for enabled in &build_info.enabled_repos { + let entry = table + .entries + .iter() + .find(|m| m.repo_arch == enabled.repo_arch) + .unwrap(); + + let mut script = vec![entry.commands.monitor.clone()]; + if download_binaries { + script.push(entry.commands.download_binaries.clone().unwrap()); + } else { + assert_none!(&entry.commands.download_binaries); + } + + test_monitoring( + context, + dput.clone(), + build_info, + &enabled.repo_arch, + &script, + success, + dput_test, + log_test, + download_binaries, + ) + .await; + } +} + +async fn test_prune( + context: &mut CliTestContext, + dput: CliArtifactsHandle, + build_info: &ObsBuildInfo, +) { + test_prune_missing_build_info(context).await; + + let prune = context + .run() + .command("prune") + .artifacts(dput.clone()) + .go() + .await; + test_prune_deleted_package_1_if_branched(context, build_info, &prune).await; +} + +#[rstest] +#[tokio::test] +async fn test_cli_flow( + #[values( + DputTest::Basic, + DputTest::Rebuild, + DputTest::ReusePreviousBuild, + DputTest::Branch + )] + dput_test: DputTest, + #[values(true, false)] build_success: bool, + #[values( + MonitorLogTest::Long, + MonitorLogTest::Short, + MonitorLogTest::Unavailable + )] + log_test: MonitorLogTest, + #[values(true, false)] download_binaries: bool, +) { + with_context(async |mut context| { + let (dput, build_info) = test_dput(&mut context, dput_test).await; + + test_monitor_table( + &mut context, + dput.clone(), + &build_info, + build_success, + dput_test, + log_test, + download_binaries, + ) + .await; + + test_prune(&mut context, dput.clone(), &build_info).await; + }) + .await; +} From 307b9928b403f3b0533d1fc216e9d961d8e01400 Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Tue, 7 Oct 2025 11:19:30 -0500 Subject: [PATCH 6/7] Fix outdated OBS docs link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cb35248..171b27b 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,7 @@ be cleaned up. ##### `--branch-to BRANCHED_PROJECT` Before starting an upload, -[branch](https://openbuildservice.org/help/manuals/obs-user-guide/art.obs.bg.html#sec.obsbg.uc.branchprj) +[branch](https://openbuildservice.org/help/manuals/obs-user-guide/art-obs-bg#sec-obsbg-uc-branchprj) the package to a new project, named with the value passed to the argument. Any uploads will now go to the branched project, and `generate-monitor` / `prune` will both used the branched project / package. This is particularly useful to run From 6d5da1504482db960c943de9f95e2d629c053b4a Mon Sep 17 00:00:00 2001 From: Ryan Gonzalez Date: Tue, 7 Oct 2025 14:36:25 -0500 Subject: [PATCH 7/7] Split-up the README and update for obo-cli This splits the single readme into three, a top-level one with all the useful shared info, and much smaller project-specific ones with the relevant setup. --- README.md | 233 ++++++++++++------------------------ obo-cli/README.md | 20 ++++ obs-gitlab-runner/README.md | 150 +++++++++++++++++++++++ 3 files changed, 248 insertions(+), 155 deletions(-) create mode 100644 obo-cli/README.md create mode 100644 obs-gitlab-runner/README.md diff --git a/README.md b/README.md index 171b27b..cd204b4 100644 --- a/README.md +++ b/README.md @@ -1,72 +1,18 @@ -# OBS GitLab Runner +# OBS Build Orchestrator -This is a custom [GitLab Runner](https://docs.gitlab.com/runner/) implementation -exposing a custom command language for starting, monitoring, and cleaning up -builds on [OBS](https://build.opensuse.org/), specifically targeting Debian -packages. +This repo contains tools for starting, monitoring, and cleaning up builds on +[OBS](https://build.opensuse.org/), specifically targeting Debian packages). +It's usable in two forms: -## Usage - -### Sending Jobs - -In order to send commands in a job to the runner, set the job's `tags:` to -include the tag you used during [the deployment](#deployment), e.g.: - -```yaml -my-job: - tags: - - obs-runner # <-- set to run on runners tagged "obs-runner" - stage: some-stage - script: - # [...] -``` - -This will run all the commands inside `before_script`, `script`, and -`after_script` with the runner. - -### Supported Syntax - -A subset of shell syntax is supported for commands: - -- Commands are split at spaces, but parts can be quoted, just like in the shell: - ```bash - some-command this-is-argument-1 "this entire string is argument 2" - ``` -- Variable substitution is supported, as well as the `${VARIABLE:-DEFAULT}` - syntax to use the value `DEFAULT` if `$VARIABLE` is unset: - ```bash - some-command $a_variable ${a_variable_with_default:-this is used if unset} - ``` - The variables are sourced from the pipeline-wide and job-specific variables - set. - - A significant departure from shell argument parsing is that **variable - contents are auto-quoted, thus spaces inside a variable do not split - arguments**. For example, given `MYVAR=a b c`, this: - ```bash - some-command $MYVAR - ``` - is interpreted as: - ```bash - some-command 'a b c' - ``` - *not*: - ```bash - some-command a b c - ``` - There is no way to use a variable without auto-quoting its contents. - -#### Flags +- [obo-cli](obo-cli/README.md), a standalone CLI. +- [obs-gitlab-runner](obs-gitlab-runner/README.md), a custom [GitLab + Runner](https://docs.gitlab.com/runner/). -Any flag arguments shown below can also explicitly take a true/false value, e.g. -`--rebuild-if-unchanged`, `--rebuild-if-unchanged=true`, and -`--rebuild-if-unchanged=false`. This is primarily useful to conditionally set -the value for a flag; you can set `SOME_VARIABLE=true/false` in your GitLab -pipeline, then use that variable in a flag value as `--flag=$SOME_VARIABLE`. +## Usage ### Required Environment -In order to connect to OBS, three variables must be set (generally within the -"CI/CD" section of the settings): +In order to connect to OBS, three environment variables must be set: - `OBS_SERVER`: The URL of the OBS instance, e.g. `https://obs.somewhere.com/`. - `OBS_USER`: The username used to authenticate with OBS (any commits created @@ -75,6 +21,24 @@ In order to connect to OBS, three variables must be set (generally within the there are no places where this value should be logged, **for safety purposes, it is highly recommended to mark this variable as *Masked*.**. +For obs-gitlab-runner, these should generally be configured within the "CI/CD" +section of the repository / group settings. **For safety purposes, it is highly +recommended to mark the `OBS_PASSWORD` variable as *Masked*.**. (It should not +be logged anywhere regardless, but that will provide insulation against +mistakes.) + +For obo-cli, you can additionally use the `--obs-server`, `--obs-user`, and +`--obs-password` options, but care should be taken to avoid accidentally saving +the values into shell history or other tenuous locations. + +### Flag syntax + +Any flag arguments shown below can also explicitly take a true/false value, e.g. +`--rebuild-if-unchanged`, `--rebuild-if-unchanged=true`, and +`--rebuild-if-unchanged=false`. This is primarily useful to conditionally set +the value for a flag; you can set `SOME_VARIABLE=true/false` in your CI +pipeline, then use that variable in a flag value as `--flag=$SOME_VARIABLE`. + ### Commands #### `dput` @@ -124,7 +88,7 @@ Note that, if `--branch-to` was specified, this will, in practice, never be triggered: due to the way metadata files are handled, right after a branching operation, there will *always* be a change to upload. -#### `generate-monitor` +#### `generate-monitor` (*obs-gitlab-runner version*) ```bash generate-monitor RUNNER_TAG @@ -229,6 +193,56 @@ Changes the expiration of the build results & logs. Changes the filename each monitoring job will save the build log into. +#### `generate-monitor` (*obo-cli version*) + +```bash +generate-monitor + [--download-build-results-to BUILD_RESULTS_DIR] + [--build-info BUILD_INFO_FILE=build-info.json] + [--monitor-out MONITOR_OUT=obs-monitor.json] + [--build-log-out BUILD_LOG_FILE=build.log] +``` + +Generates a JSON file `MONITOR_OUT` structured as: + +```json5 +{ + "entries": [ + { + "repo": "REPO", + "arch": "ARCH", + "commands": { + "monitor": "monitor [...]", + "download-binaries": "download-binaries [...]", + } + }, + // ... + ] +} +``` + +`entries` contains a list of OBS repository + architecture combinations, along +with the subcommands to run to monitor a build and download the results (to be +used as `obo THE_SUBCOMMAND`). + +##### `--download-build-results-to BUILD_RESULTS_DIR` + +Fills in `entries[*].commands.download-binaries` with a command that will +download the build results from OBS to the given `BUILD_RESULTS_DIR`. If this +option is not given, `commands.download-binaries` will be `null`. + +##### `--build-info BUILD_INFO_FILE=build-info.json` + +Specifies the name of the build info file to read. In particular, if a different +build info filename was used with `dput` via +[`--build-info-out`](#--build-info-out), then `--build-info` should be used here +to specify the same filename. + +##### `--build-log-out BUILD_LOG_FILE=build.log` + +Changes the filename each subcommand in `entries[*].commands.monitor` will save +the build log into. + #### `prune` ```bash @@ -261,94 +275,3 @@ is written. Only run the prune if a previous command in the same job failed. This is primarily useful if `prune` is used inside of `after_script`, to only remove the branched project/package if e.g. the upload failed. - -## Deployment - -### Registering the Runner - -In order to use the runner, you must first register it with your GitLab -instance. This requires the use of a registration token, which can be obtained -via the following steps: - -- Enter the GitLab admin area. -- Navigate to Overview -> Runners. -- Click "Register an instance runner". -- Copy the registration token within. - -(Per-group/-project registration tokens can also be retrieved from the CI/CD -settings of the group or project.) - -With this token, you can now register the runner via the [GitLab -API](https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner). - -Example using curl: - -```bash -curl --request POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ - --form description='OBS runner' \ - --form run_untagged=false \ - --form tag_list=obs-runner \ - --form token="$REGISTRATION_TOKEN" -``` - -httpie: - -```bash -http --form POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ - description='OBS runner' \ - run_untagged=false \ - tag_list=obs-runner \ - token="$REGISTRATION_TOKEN" -``` - -**It is critical that you set `run_untagged=false`,** otherwise this runner -will be used for *all* jobs that don't explicitly set a tag, rather than just -the jobs explicitly targeting the runner. - -This API call will return a JSON object containing a `token` key, whose value -is a _runner token_ that is used by the runner to connect to GitLab. - -### Docker - -Docker images are built on every commit, available at -`ghcr.io/collabora/obs-gitlab-runner:main`. The entry point takes two arguments: - -- The GitLab server URL. -- The runner token acquired previously. - -Simple example usage via the Docker CLI: - -```bash -$ docker run --rm -it ghcr.io/collabora/obs-gitlab-runner:main \ - "$GITLAB_SERVER_URL" "$GITLAB_RUNNER_TOKEN" -``` - -In addition, you can instead opt to set the `GITLAB_URL` and `GITLAB_TOKEN` -environment variables: - -```bash -$ docker run --rm -it \ - -e GITLAB_URL="$GITLAB_SERVER_URL" \ - -e GITLAB_TOKEN="$GITLAB_RUNNER_TOKEN" \ - ghcr.io/collabora/obs-gitlab-runner:main -``` - -### Kubernetes - -A [Helm](https://helm.sh/) chart has been provided in the `chart/` directory, -installable via: - -```bash -$ helm install \ - --set-string gitlab.url="$GITLAB_SERVER_URL" \ - --set-string gitlab.token="$GITLAB_RUNNER_TOKEN" \ - obs-gitlab-runner chart -``` - -Upgrades can skip setting `gitlab.token` to re-use the previously set value: - -```bash -$ helm upgrade \ - --set-string gitlab.url="$GITLAB_SERVER_URL" \ - obs-gitlab-runner chart -``` diff --git a/obo-cli/README.md b/obo-cli/README.md new file mode 100644 index 0000000..ba22e53 --- /dev/null +++ b/obo-cli/README.md @@ -0,0 +1,20 @@ +# obo-cli + +This is a CLI for starting, monitoring, and cleaning up builds on +[OBS](https://build.opensuse.org/), specifically targeting Debian packages. + +## Usage + +For information on OBS authentication and the commands supported, see [the +project-wide README](../README.md). + +Docker images are built on every commit, available at +`ghcr.io/collabora/obo-cli:main`. The entry point directly takes the +subcommands, e.g: + +``` +docker run --rm -it -v $PWD:/work -w /work ghcr.io/collabora/obo-cli:main prune +``` + +will mount the current directory as `/work` and then run the `prune` command +from within. diff --git a/obs-gitlab-runner/README.md b/obs-gitlab-runner/README.md new file mode 100644 index 0000000..5881b3f --- /dev/null +++ b/obs-gitlab-runner/README.md @@ -0,0 +1,150 @@ +# obs-gitlab-runner + +This is a custom [GitLab Runner](https://docs.gitlab.com/runner/) implementation +providing a shell-like command language for starting, monitoring, and cleaning up +builds on [OBS](https://build.opensuse.org/), specifically targeting Debian +packages. + +## Usage + +For information on OBS authentication and the commands supported, see [the +project-wide README](../README.md). + +### Sending Jobs + +In order to send commands in a job to the runner, set the job's `tags:` to +include the tag you used during [the deployment](#deployment), e.g.: + +```yaml +my-job: + tags: + - obs-runner # <-- set to run on runners tagged "obs-runner" + stage: some-stage + script: + # [...] +``` + +This will run all the commands inside `before_script`, `script`, and +`after_script` with the runner. + +### Supported Syntax + +A subset of shell syntax is supported for commands: + +- Commands are split at spaces, but parts can be quoted, just like in the shell: + ```bash + some-command this-is-argument-1 "this entire string is argument 2" + ``` +- Variable substitution is supported, as well as the `${VARIABLE:-DEFAULT}` + syntax to use the value `DEFAULT` if `$VARIABLE` is unset: + ```bash + some-command $a_variable ${a_variable_with_default:-this is used if unset} + ``` + The variables are sourced from the pipeline-wide and job-specific variables + set. + - A significant departure from shell argument parsing is that **variable + contents are auto-quoted, thus spaces inside a variable do not split + arguments**. For example, given `MYVAR=a b c`, this: + ```bash + some-command $MYVAR + ``` + is interpreted as: + ```bash + some-command 'a b c' + ``` + *not*: + ```bash + some-command a b c + ``` + There is no way to use a variable without auto-quoting its contents. + +## Deployment + +### Registering the Runner + +In order to use the runner, you must first register it with your GitLab +instance. This requires the use of a registration token, which can be obtained +via the following steps: + +- Enter the GitLab admin area. +- Navigate to Overview -> Runners. +- Click "Register an instance runner". +- Copy the registration token within. + +(Per-group/-project registration tokens can also be retrieved from the CI/CD +settings of the group or project.) + +With this token, you can now register the runner via the [GitLab +API](https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner). + +Example using curl: + +```bash +curl --request POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ + --form description='OBS runner' \ + --form run_untagged=false \ + --form tag_list=obs-runner \ + --form token="$REGISTRATION_TOKEN" +``` + +httpie: + +```bash +http --form POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ + description='OBS runner' \ + run_untagged=false \ + tag_list=obs-runner \ + token="$REGISTRATION_TOKEN" +``` + +**It is critical that you set `run_untagged=false`,** otherwise this runner +will be used for *all* jobs that don't explicitly set a tag, rather than just +the jobs explicitly targeting the runner. + +This API call will return a JSON object containing a `token` key, whose value +is a _runner token_ that is used by the runner to connect to GitLab. + +### Docker + +Docker images are built on every commit, available at +`ghcr.io/collabora/obs-gitlab-runner:main`. The entry point takes two arguments: + +- The GitLab server URL. +- The runner token acquired previously. + +Simple example usage via the Docker CLI: + +```bash +$ docker run --rm -it ghcr.io/collabora/obs-gitlab-runner:main \ + "$GITLAB_SERVER_URL" "$GITLAB_RUNNER_TOKEN" +``` + +In addition, you can instead opt to set the `GITLAB_URL` and `GITLAB_TOKEN` +environment variables: + +```bash +$ docker run --rm -it \ + -e GITLAB_URL="$GITLAB_SERVER_URL" \ + -e GITLAB_TOKEN="$GITLAB_RUNNER_TOKEN" \ + ghcr.io/collabora/obs-gitlab-runner:main +``` + +### Kubernetes + +A [Helm](https://helm.sh/) chart has been provided in the `chart/` directory, +installable via: + +```bash +$ helm install \ + --set-string gitlab.url="$GITLAB_SERVER_URL" \ + --set-string gitlab.token="$GITLAB_RUNNER_TOKEN" \ + obs-gitlab-runner chart +``` + +Upgrades can skip setting `gitlab.token` to re-use the previously set value: + +```bash +$ helm upgrade \ + --set-string gitlab.url="$GITLAB_SERVER_URL" \ + obs-gitlab-runner chart +```