diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 195cc42..1091693 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,7 +8,6 @@ on: env: REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} jobs: check: @@ -87,6 +86,9 @@ jobs: name: Docker image build runs-on: ubuntu-latest needs: [check, fmt, test, clippy] + strategy: + matrix: + target: [obo-cli, obs-gitlab-runner] permissions: contents: read @@ -102,12 +104,13 @@ jobs: - id: meta uses: docker/metadata-action@v5 with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + images: ${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ matrix.target }} - name: Build and push Docker image uses: docker/build-push-action@v6 if: github.event_name != 'pull_request' with: context: . + file: Dockerfile.${{ matrix.target }} push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} diff --git a/Cargo.lock b/Cargo.lock index 6971eb0..638c411 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1494,8 +1494,35 @@ dependencies = [ ] [[package]] -name = "obs-gitlab-runner" +name = "obo-cli" version = "0.1.8" +dependencies = [ + "async-trait", + "camino", + "claims", + "clap", + "color-eyre", + "gitlab-runner-mock", + "obo-core", + "obo-test-support", + "obo-tests", + "open-build-service-api", + "open-build-service-mock", + "rstest", + "serde", + "serde_json", + "tempfile", + "tokio", + "tokio-stream", + "tracing", + "tracing-error", + "tracing-subscriber", + "url", +] + +[[package]] +name = "obo-core" +version = "0.1.0" dependencies = [ "async-trait", "base16ct", @@ -1506,15 +1533,69 @@ dependencies = [ "color-eyre", "derivative", "futures-util", - "gitlab-runner", - "gitlab-runner-mock", "md-5", + "obo-test-support", "open-build-service-api", "open-build-service-mock", "reqwest", "rfc822-like", "rstest", "serde", + "serde_json", + "shell-words", + "tempfile", + "thiserror 2.0.16", + "tokio", + "tokio-retry2 0.6.0", + "tokio-util", + "tracing", + "wiremock", +] + +[[package]] +name = "obo-test-support" +version = "0.1.0" +dependencies = [ + "open-build-service-api", + "open-build-service-mock", +] + +[[package]] +name = "obo-tests" +version = "0.1.0" +dependencies = [ + "async-trait", + "camino", + "claims", + "obo-core", + "obo-test-support", + "open-build-service-api", + "open-build-service-mock", + "serde_yaml", + "tokio", +] + +[[package]] +name = "obs-gitlab-runner" +version = "0.1.8" +dependencies = [ + "async-trait", + "camino", + "claims", + "clap", + "color-eyre", + "derivative", + "futures-util", + "gitlab-runner", + "gitlab-runner-mock", + "obo-core", + "obo-test-support", + "obo-tests", + "open-build-service-api", + "open-build-service-mock", + "rstest", + "serde", + "serde_json", "serde_yaml", "shell-words", "shellexpand", @@ -1522,7 +1603,6 @@ dependencies = [ "tempfile", "thiserror 2.0.16", "tokio", - "tokio-retry2 0.6.0", "tokio-util", "tracing", "tracing-error", @@ -2529,6 +2609,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.16" diff --git a/Cargo.toml b/Cargo.toml index 5a66c3e..f37861b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,50 +1,33 @@ -[package] -name = "obs-gitlab-runner" -version = "0.1.8" -edition = "2024" -license = "MIT OR Apache-2.0" +[workspace] +resolver = "3" +members = [ + "obo-cli", + "obo-core", + "obo-tests", + "obo-test-support", + "obs-gitlab-runner" +] -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] +[workspace.dependencies] async-trait = "0.1" -base16ct = { version = "0.2", features = ["std"] } -bytes = "1.10" camino = "1.1" +claims = "0.8" clap = { version = "4.5", features = ["default", "derive", "env"] } color-eyre = "0.6" derivative = "2.2" futures-util = "0.3" -md-5 = "0.10" -reqwest = "0.12" -rfc822-like = "0.2" +open-build-service-api = { git = "https://github.com/collabora/open-build-service-rs" } +# open-build-service-api = { path = "../open-build-service-rs/open-build-service-api" } +open-build-service-mock = { git = "https://github.com/collabora/open-build-service-rs" } +# open-build-service-mock = { path = "../open-build-service-rs/open-build-service-api" } +rstest = "0.26" serde = "1.0" +serde_json = "1.0.140" serde_yaml = "0.9" -shellexpand = "3.1" shell-words = "1.1" -strum = { version = "0.27", features = ["derive"] } tempfile = "3.20" +thiserror = "2.0" tokio = { version = "1.45", features = ["full"] } -tokio-retry2 = { version = "0.6.0", features = ["jitter"] } tokio-util = { version = "0.7", features = ["full"] } tracing = "0.1" -tracing-error = "0.2" -tracing-subscriber = { version = "0.3", features = ["default", "json"] } -url = "2.5" - -gitlab-runner = "0.3.0-rc1" -# gitlab-runner = { path = "../gitlab-runner-rs/gitlab-runner" } -open-build-service-api = { git = "https://github.com/collabora/open-build-service-rs" } -thiserror = "2.0.12" -# open-build-service-api = { path = "../open-build-service-rs/open-build-service-api" } - -[dev-dependencies] -claims = "0.8" -rstest = "0.26" wiremock = "0.6" -zip = "5.1" - -gitlab-runner-mock = "0.2.1" -# gitlab-runner-mock = { path = "../gitlab-runner-rs/gitlab-runner-mock" } -open-build-service-mock = { git = "https://github.com/collabora/open-build-service-rs" } -# open-build-service-mock = { path = "../open-build-service-rs/open-build-service-mock" } diff --git a/Dockerfile.obo-cli b/Dockerfile.obo-cli new file mode 100644 index 0000000..dddc252 --- /dev/null +++ b/Dockerfile.obo-cli @@ -0,0 +1,18 @@ +FROM rust:1.88.0-slim-bookworm AS build +ARG DEBIAN_FRONTEND=noninteractive + +ADD . /app +WORKDIR /app +RUN apt-get update \ + && apt-get install -y pkg-config libssl-dev \ + && cargo build -p obo-cli --release + +FROM debian:bookworm-slim +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update \ + && apt-get install -y libssl3 ca-certificates \ + && rm -rf /var/lib/apt/lists/ +COPY --from=build /app/target/release/obo /usr/local/bin/ + +ENTRYPOINT ["/usr/local/bin/obo"] diff --git a/Dockerfile b/Dockerfile.obs-gitlab-runner similarity index 92% rename from Dockerfile rename to Dockerfile.obs-gitlab-runner index 392bc22..b5ca074 100644 --- a/Dockerfile +++ b/Dockerfile.obs-gitlab-runner @@ -5,7 +5,7 @@ ADD . /app WORKDIR /app RUN apt-get update \ && apt-get install -y pkg-config libssl-dev \ - && cargo build --release + && cargo build -p obs-gitlab-runner --release FROM debian:bookworm-slim ARG DEBIAN_FRONTEND=noninteractive diff --git a/README.md b/README.md index 6ff07e5..cd204b4 100644 --- a/README.md +++ b/README.md @@ -1,72 +1,18 @@ -# OBS GitLab Runner +# OBS Build Orchestrator -This is a custom [GitLab Runner](https://docs.gitlab.com/runner/) implementation -exposing a custom command language for starting, monitoring, and cleaning up -builds on [OBS](https://build.opensuse.org/), specifically targeting Debian -packages. +This repo contains tools for starting, monitoring, and cleaning up builds on +[OBS](https://build.opensuse.org/), specifically targeting Debian packages). +It's usable in two forms: -## Usage - -### Sending Jobs - -In order to send commands in a job to the runner, set the job's `tags:` to -include the tag you used during [the deployment](#deployment), e.g.: - -```yaml -my-job: - tags: - - obs-runner # <-- set to run on runners tagged "obs-runner" - stage: some-stage - script: - # [...] -``` - -This will run all the commands inside `before_script`, `script`, and -`after_script` with the runner. - -### Supported Syntax - -A subset of shell syntax is supported for commands: - -- Commands are split at spaces, but parts can be quoted, just like in the shell: - ```bash - some-command this-is-argument-1 "this entire string is argument 2" - ``` -- Variable substitution is supported, as well as the `${VARIABLE:-DEFAULT}` - syntax to use the value `DEFAULT` if `$VARIABLE` is unset: - ```bash - some-command $a_variable ${a_variable_with_default:-this is used if unset} - ``` - The variables are sourced from the pipeline-wide and job-specific variables - set. - - A significant departure from shell argument parsing is that **variable - contents are auto-quoted, thus spaces inside a variable do not split - arguments**. For example, given `MYVAR=a b c`, this: - ```bash - some-command $MYVAR - ``` - is interpreted as: - ```bash - some-command 'a b c' - ``` - *not*: - ```bash - some-command a b c - ``` - There is no way to use a variable without auto-quoting its contents. - -#### Flags +- [obo-cli](obo-cli/README.md), a standalone CLI. +- [obs-gitlab-runner](obs-gitlab-runner/README.md), a custom [GitLab + Runner](https://docs.gitlab.com/runner/). -Any flag arguments shown below can also explicitly take a true/false value, e.g. -`--rebuild-if-unchanged`, `--rebuild-if-unchanged=true`, and -`--rebuild-if-unchanged=false`. This is primarily useful to conditionally set -the value for a flag; you can set `SOME_VARIABLE=true/false` in your GitLab -pipeline, then use that variable in a flag value as `--flag=$SOME_VARIABLE`. +## Usage ### Required Environment -In order to connect to OBS, three variables must be set (generally within the -"CI/CD" section of the settings): +In order to connect to OBS, three environment variables must be set: - `OBS_SERVER`: The URL of the OBS instance, e.g. `https://obs.somewhere.com/`. - `OBS_USER`: The username used to authenticate with OBS (any commits created @@ -75,6 +21,24 @@ In order to connect to OBS, three variables must be set (generally within the there are no places where this value should be logged, **for safety purposes, it is highly recommended to mark this variable as *Masked*.**. +For obs-gitlab-runner, these should generally be configured within the "CI/CD" +section of the repository / group settings. **For safety purposes, it is highly +recommended to mark the `OBS_PASSWORD` variable as *Masked*.**. (It should not +be logged anywhere regardless, but that will provide insulation against +mistakes.) + +For obo-cli, you can additionally use the `--obs-server`, `--obs-user`, and +`--obs-password` options, but care should be taken to avoid accidentally saving +the values into shell history or other tenuous locations. + +### Flag syntax + +Any flag arguments shown below can also explicitly take a true/false value, e.g. +`--rebuild-if-unchanged`, `--rebuild-if-unchanged=true`, and +`--rebuild-if-unchanged=false`. This is primarily useful to conditionally set +the value for a flag; you can set `SOME_VARIABLE=true/false` in your CI +pipeline, then use that variable in a flag value as `--flag=$SOME_VARIABLE`. + ### Commands #### `dput` @@ -82,7 +46,7 @@ In order to connect to OBS, three variables must be set (generally within the ```bash dput PROJECT DSC_FILE [--branch-to BRANCHED_PROJECT] - [--build-info-out BUILD_INFO_FILE=build-info.yml] + [--build-info-out BUILD_INFO_FILE=build-info.json] [--rebuild-if-unchanged] ``` @@ -92,7 +56,7 @@ within, will be removed. Metadata information on the uploaded revision, such as the revision number, project name, and package name, will be saved into the file specified by -`--build-info-out` (default is `build-info.yml`). This file is **required** by +`--build-info-out` (default is `build-info.json`). This file is **required** by the `generate-monitor` and `prune` steps. Do note that, if `--branch-to` is given, the file will be written *immediately* after the branch takes place (i.e. before the upload); that way, if the upload fails, the branched project can still @@ -101,7 +65,7 @@ be cleaned up. ##### `--branch-to BRANCHED_PROJECT` Before starting an upload, -[branch](https://openbuildservice.org/help/manuals/obs-user-guide/art.obs.bg.html#sec.obsbg.uc.branchprj) +[branch](https://openbuildservice.org/help/manuals/obs-user-guide/art-obs-bg#sec-obsbg-uc-branchprj) the package to a new project, named with the value passed to the argument. Any uploads will now go to the branched project, and `generate-monitor` / `prune` will both used the branched project / package. This is particularly useful to run @@ -109,7 +73,7 @@ testing builds on MRs; you can create an OBS branch named after the MR's Git branch, and then builds can take place there without interfering with your main projects. -##### `--build-info-out BUILD_INFO_FILE=build-info.yml` +##### `--build-info-out BUILD_INFO_FILE=build-info.json` Changes the filename that the build info will be written to. @@ -124,13 +88,13 @@ Note that, if `--branch-to` was specified, this will, in practice, never be triggered: due to the way metadata files are handled, right after a branching operation, there will *always* be a change to upload. -#### `generate-monitor` +#### `generate-monitor` (*obs-gitlab-runner version*) ```bash generate-monitor RUNNER_TAG [--rules RULES] [--download-build-results-to BUILD_RESULTS_DIR] - [--build-info BUILD_INFO_FILE=build-info.yml] + [--build-info BUILD_INFO_FILE=build-info.json] [--pipeline-out PIPELINE_FILE=obs.yml] [--job-prefix MONITOR_JOB_PREFIX=obs] [--job-timeout MONITOR_JOB_TIMEOUT] @@ -199,7 +163,7 @@ dput-and-generate: After a monitoring job completes, download the build results from OBS to the given `BUILD_RESULTS_DIR`, and upload it as a GitLab build artifact.. -##### `--build-info BUILD_INFO_FILE=build-info.yml` +##### `--build-info BUILD_INFO_FILE=build-info.json` Specifies the name of the build info file to read. In particular, if a different build info filename was used with `dput` via @@ -229,11 +193,61 @@ Changes the expiration of the build results & logs. Changes the filename each monitoring job will save the build log into. +#### `generate-monitor` (*obo-cli version*) + +```bash +generate-monitor + [--download-build-results-to BUILD_RESULTS_DIR] + [--build-info BUILD_INFO_FILE=build-info.json] + [--monitor-out MONITOR_OUT=obs-monitor.json] + [--build-log-out BUILD_LOG_FILE=build.log] +``` + +Generates a JSON file `MONITOR_OUT` structured as: + +```json5 +{ + "entries": [ + { + "repo": "REPO", + "arch": "ARCH", + "commands": { + "monitor": "monitor [...]", + "download-binaries": "download-binaries [...]", + } + }, + // ... + ] +} +``` + +`entries` contains a list of OBS repository + architecture combinations, along +with the subcommands to run to monitor a build and download the results (to be +used as `obo THE_SUBCOMMAND`). + +##### `--download-build-results-to BUILD_RESULTS_DIR` + +Fills in `entries[*].commands.download-binaries` with a command that will +download the build results from OBS to the given `BUILD_RESULTS_DIR`. If this +option is not given, `commands.download-binaries` will be `null`. + +##### `--build-info BUILD_INFO_FILE=build-info.json` + +Specifies the name of the build info file to read. In particular, if a different +build info filename was used with `dput` via +[`--build-info-out`](#--build-info-out), then `--build-info` should be used here +to specify the same filename. + +##### `--build-log-out BUILD_LOG_FILE=build.log` + +Changes the filename each subcommand in `entries[*].commands.monitor` will save +the build log into. + #### `prune` ```bash prune - [--build-info BUILD_INFO_FILE=build-info.yml] + [--build-info BUILD_INFO_FILE=build-info.json] [--ignore-missing-build-info] [--only-if-job-unsuccessful] ``` @@ -242,7 +256,7 @@ If a branch occurred, deletes the branched package and, if now empty, project, using the information from the build info file. (If no branching occurred, this does nothing.) -##### `--build-info BUILD_INFO_FILE=build-info.yml` +##### `--build-info BUILD_INFO_FILE=build-info.json` Specifies the name of the build info file to read. In particular, if a different build info filename was used with `dput` via @@ -261,94 +275,3 @@ is written. Only run the prune if a previous command in the same job failed. This is primarily useful if `prune` is used inside of `after_script`, to only remove the branched project/package if e.g. the upload failed. - -## Deployment - -### Registering the Runner - -In order to use the runner, you must first register it with your GitLab -instance. This requires the use of a registration token, which can be obtained -via the following steps: - -- Enter the GitLab admin area. -- Navigate to Overview -> Runners. -- Click "Register an instance runner". -- Copy the registration token within. - -(Per-group/-project registration tokens can also be retrieved from the CI/CD -settings of the group or project.) - -With this token, you can now register the runner via the [GitLab -API](https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner). - -Example using curl: - -```bash -curl --request POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ - --form description='OBS runner' \ - --form run_untagged=false \ - --form tag_list=obs-runner \ - --form token="$REGISTRATION_TOKEN" -``` - -httpie: - -```bash -http --form POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ - description='OBS runner' \ - run_untagged=false \ - tag_list=obs-runner \ - token="$REGISTRATION_TOKEN" -``` - -**It is critical that you set `run_untagged=false`,** otherwise this runner -will be used for *all* jobs that don't explicitly set a tag, rather than just -the jobs explicitly targeting the runner. - -This API call will return a JSON object containing a `token` key, whose value -is a _runner token_ that is used by the runner to connect to GitLab. - -### Docker - -Docker images are built on every commit, available at -`ghcr.io/collabora/obs-gitlab-runner:main`. The entry point takes two arguments: - -- The GitLab server URL. -- The runner token acquired previously. - -Simple example usage via the Docker CLI: - -```bash -$ docker run --rm -it ghcr.io/collabora/obs-gitlab-runner:main \ - "$GITLAB_SERVER_URL" "$GITLAB_RUNNER_TOKEN" -``` - -In addition, you can instead opt to set the `GITLAB_URL` and `GITLAB_TOKEN` -environment variables: - -```bash -$ docker run --rm -it \ - -e GITLAB_URL="$GITLAB_SERVER_URL" \ - -e GITLAB_TOKEN="$GITLAB_RUNNER_TOKEN" \ - ghcr.io/collabora/obs-gitlab-runner:main -``` - -### Kubernetes - -A [Helm](https://helm.sh/) chart has been provided in the `chart/` directory, -installable via: - -```bash -$ helm install \ - --set-string gitlab.url="$GITLAB_SERVER_URL" \ - --set-string gitlab.token="$GITLAB_RUNNER_TOKEN" \ - obs-gitlab-runner chart -``` - -Upgrades can skip setting `gitlab.token` to re-use the previously set value: - -```bash -$ helm upgrade \ - --set-string gitlab.url="$GITLAB_SERVER_URL" \ - obs-gitlab-runner chart -``` diff --git a/obo-cli/Cargo.toml b/obo-cli/Cargo.toml new file mode 100644 index 0000000..b74d12e --- /dev/null +++ b/obo-cli/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "obo-cli" +description = "OBS Build Orchestrator — command-line frontend" +version = "0.1.8" +edition = "2024" +license = "MIT OR Apache-2.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[[bin]] +name = "obo" +path = "src/main.rs" + +[dependencies] +async-trait.workspace = true +camino.workspace = true +clap.workspace = true +color-eyre.workspace = true +obo-core = { path = "../obo-core" } +obo-test-support = { path = "../obo-test-support" } +open-build-service-api.workspace = true +serde.workspace = true +serde_json.workspace = true +tempfile.workspace = true +tokio.workspace = true +tracing.workspace = true +tracing-error = "0.2" +tracing-subscriber = { version = "0.3", features = ["default", "json"] } +url = "2.5" + +[dev-dependencies] +claims.workspace = true +gitlab-runner-mock = "0.2.1" +obo-tests = { path = "../obo-tests" } +open-build-service-mock.workspace = true +rstest.workspace = true +tokio-stream = { version = "0.1.17", features = ["io-util"] } diff --git a/obo-cli/README.md b/obo-cli/README.md new file mode 100644 index 0000000..ba22e53 --- /dev/null +++ b/obo-cli/README.md @@ -0,0 +1,20 @@ +# obo-cli + +This is a CLI for starting, monitoring, and cleaning up builds on +[OBS](https://build.opensuse.org/), specifically targeting Debian packages. + +## Usage + +For information on OBS authentication and the commands supported, see [the +project-wide README](../README.md). + +Docker images are built on every commit, available at +`ghcr.io/collabora/obo-cli:main`. The entry point directly takes the +subcommands, e.g: + +``` +docker run --rm -it -v $PWD:/work -w /work ghcr.io/collabora/obo-cli:main prune +``` + +will mount the current directory as `/work` and then run the `prune` command +from within. diff --git a/obo-cli/src/lib.rs b/obo-cli/src/lib.rs new file mode 100644 index 0000000..57b800d --- /dev/null +++ b/obo-cli/src/lib.rs @@ -0,0 +1,234 @@ +use std::time::Duration; + +use async_trait::async_trait; +use camino::{Utf8Path, Utf8PathBuf}; +use clap::{Args, Subcommand}; +use color_eyre::eyre::{Context, Report, Result, bail, eyre}; +use obo_core::{ + actions::{ + Actions, DEFAULT_BUILD_INFO, DEFAULT_BUILD_LOG, DownloadBinariesAction, DputAction, + LOG_TAIL_2MB, MonitorAction, ObsBuildInfo, PruneAction, + }, + artifacts::{ArtifactDirectory, ArtifactReader, ArtifactWriter, MissingArtifact, SaveCallback}, + build_meta::RepoArch, + monitor::PackageMonitoringOptions, + outputln, +}; +use open_build_service_api as obs; +use serde::{Deserialize, Serialize}; +use tempfile::NamedTempFile; +use tokio::{ + fs::File as AsyncFile, + io::{AsyncBufReadExt, AsyncWriteExt, BufReader}, +}; + +pub const DEFAULT_MONITOR_TABLE: &str = "obs-monitor.json"; + +#[derive(Debug, Deserialize, Serialize)] +pub struct MonitorCommands { + pub monitor: String, + pub download_binaries: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct MonitorEntry { + #[serde(flatten)] + pub repo_arch: RepoArch, + pub commands: MonitorCommands, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct MonitorTable { + pub entries: Vec, +} + +#[derive(Args)] +pub struct GenerateMonitorAction { + #[clap(long, default_value_t = DEFAULT_BUILD_INFO.to_owned())] + build_info: String, + #[clap(long, default_value_t = DEFAULT_MONITOR_TABLE.to_owned())] + monitor_out: String, + #[clap(long, default_value_t = DEFAULT_BUILD_LOG.into())] + build_log_out: String, + #[clap(long = "download-build-results-to")] + build_results_dir: Option, +} + +#[derive(Subcommand)] +pub enum CliAction { + Dput(DputAction), + Monitor { + #[clap(flatten)] + args: MonitorAction, + + // These are needed by the integration tests. + #[clap(long, hide = true, env = "OBO_TEST_LOG_TAIL", default_value_t = LOG_TAIL_2MB)] + log_tail: u64, + #[clap(long, hide = true, env = "OBO_TEST_SLEEP_ON_BUILDING_MS")] + sleep_on_building_ms: Option, + #[clap(long, hide = true, env = "OBO_TEST_SLEEP_ON_OLD_STATUS_MS")] + sleep_on_old_status_ms: Option, + }, + GenerateMonitor(GenerateMonitorAction), + DownloadBinaries(DownloadBinariesAction), + Prune(PruneAction), +} + +#[derive(Default)] +pub struct LocalFsArtifacts(pub Utf8PathBuf); + +#[async_trait] +impl ArtifactDirectory for LocalFsArtifacts { + async fn open(&self, path: impl AsRef + Send) -> Result { + let path = self.0.join(path.as_ref()); + AsyncFile::open(&path) + .await + .map(ArtifactReader::new) + .map_err(|e| { + if e.kind() == std::io::ErrorKind::NotFound { + eyre!(MissingArtifact(path)) + } else { + eyre!(e) + } + }) + } + + async fn save_with(&mut self, path: P, func: F) -> Result + where + Report: From, + Ret: Send, + Err: Send, + F: for<'a> SaveCallback<'a, Ret, Err> + Send, + P: AsRef + Send, + { + let path = self.0.join(path.as_ref()); + let parent = path.parent().unwrap_or_else(|| Utf8Path::new(".")); + tokio::fs::create_dir_all(&parent) + .await + .wrap_err_with(|| format!("Failed to create parents of '{path}'"))?; + + let Some(basename) = path.file_name() else { + bail!("Invalid path: {path}"); + }; + let temp = NamedTempFile::with_prefix_in(basename, parent) + .wrap_err("Failed to create temporary file")?; + + let mut writer = ArtifactWriter::new(AsyncFile::from_std(temp.as_file().try_clone()?)); + let ret = func(&mut writer).await?; + + writer.flush().await?; + temp.persist(&path)?; + Ok(ret) + } +} + +pub struct Handler { + actions: Actions, + artifacts: LocalFsArtifacts, +} + +impl Handler { + pub fn new(client: obs::Client, artifacts_dir: Utf8PathBuf) -> Self { + Self { + actions: Actions { client }, + artifacts: LocalFsArtifacts(artifacts_dir), + } + } + + async fn generate_monitor(&mut self, args: GenerateMonitorAction) -> Result<()> { + let build_info_data = self.artifacts.read_string(&args.build_info).await?; + let build_info: ObsBuildInfo = serde_json::from_str(&build_info_data) + .wrap_err("Failed to parse provided build info file")?; + + let rev = build_info + .rev + .ok_or_else(|| eyre!("Build revision was not set"))?; + let srcmd5 = build_info + .srcmd5 + .ok_or_else(|| eyre!("Build srcmd5 was not set"))?; + + let mut table = MonitorTable { entries: vec![] }; + for enabled_repo in build_info.enabled_repos { + table.entries.push(MonitorEntry { + repo_arch: enabled_repo.repo_arch.clone(), + commands: MonitorCommands { + monitor: MonitorAction { + project: build_info.project.clone(), + package: build_info.package.clone(), + repository: enabled_repo.repo_arch.repo.clone(), + arch: enabled_repo.repo_arch.arch.clone(), + rev: rev.clone(), + srcmd5: srcmd5.clone(), + prev_endtime_for_commit: enabled_repo.prev_endtime_for_commit, + build_log_out: args.build_log_out.clone(), + } + .generate_command(), + download_binaries: args.build_results_dir.clone().map(|build_results_dir| { + DownloadBinariesAction { + project: build_info.project.clone(), + package: build_info.package.clone(), + repository: enabled_repo.repo_arch.repo, + arch: enabled_repo.repo_arch.arch, + build_results_dir, + } + .generate_command() + }), + }, + }); + } + + let data = serde_json::to_string(&table).wrap_err("Failed to serialize data")?; + + self.artifacts + .write(&args.monitor_out, data.as_bytes()) + .await?; + outputln!("Wrote monitor file '{}'.", args.monitor_out); + + Ok(()) + } + + pub async fn run(&mut self, action: CliAction) -> Result<()> { + match action { + CliAction::Dput(args) => self.actions.dput(args, &mut self.artifacts).await?, + CliAction::Monitor { + log_tail, + sleep_on_building_ms, + sleep_on_old_status_ms, + args, + } => { + let mut options = PackageMonitoringOptions::default(); + if let Some(value) = sleep_on_building_ms { + options.sleep_on_building = Duration::from_millis(value); + } + if let Some(value) = sleep_on_old_status_ms { + options.sleep_on_old_status = Duration::from_millis(value); + } + + self.actions + .monitor( + args, + options, + |file| async { + let mut lines = BufReader::new(file).lines(); + while let Some(line) = lines.next_line().await? { + eprintln!("{line}"); + } + Ok(()) + }, + log_tail, + &mut self.artifacts, + ) + .await? + } + CliAction::GenerateMonitor(args) => self.generate_monitor(args).await?, + CliAction::DownloadBinaries(args) => { + self.actions + .download_binaries(args, &mut self.artifacts) + .await? + } + CliAction::Prune(args) => self.actions.prune(args, &self.artifacts).await?, + } + + Ok(()) + } +} diff --git a/obo-cli/src/main.rs b/obo-cli/src/main.rs new file mode 100644 index 0000000..dbb0445 --- /dev/null +++ b/obo-cli/src/main.rs @@ -0,0 +1,120 @@ +use std::{fmt, str::FromStr}; + +use camino::Utf8PathBuf; +use clap::Parser; +use color_eyre::eyre::Result; +use obo_cli::{CliAction, Handler}; +use obo_core::logging::{ + get_event_message, is_output_field_in_metadata, is_output_field_set_in_event, +}; +use open_build_service_api as obs; +use tracing::{Event, Subscriber}; +use tracing_subscriber::{ + filter::Targets, + fmt::{FmtContext, FormatEvent, FormatFields, format}, + layer::{self, Filter}, + prelude::*, + registry::LookupSpan, +}; +use url::Url; + +#[derive(Debug, Clone)] +struct TargetsArg { + targets: Targets, + parsed_from: String, +} + +impl FromStr for TargetsArg { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + Targets::from_str(s).map(|targets| TargetsArg { + targets, + parsed_from: s.to_owned(), + }) + } +} + +impl Default for TargetsArg { + fn default() -> Self { + "".parse().unwrap() + } +} + +impl fmt::Display for TargetsArg { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.parsed_from) + } +} + +struct OutputFilter; + +impl Filter for OutputFilter { + fn enabled(&self, meta: &tracing::Metadata<'_>, _cx: &layer::Context<'_, S>) -> bool { + is_output_field_in_metadata(meta) + } + + fn event_enabled(&self, event: &Event<'_>, _cx: &layer::Context<'_, S>) -> bool { + is_output_field_set_in_event(event) + } +} + +struct OutputFormatter; + +impl FormatEvent for OutputFormatter +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, +{ + fn format_event( + &self, + _ctx: &FmtContext<'_, S, N>, + mut writer: format::Writer<'_>, + event: &Event<'_>, + ) -> fmt::Result { + let Some(message) = get_event_message(event) else { + return Ok(()); + }; + writeln!(writer, "{message}") + } +} + +#[derive(Parser)] +struct Args { + #[clap(long, env = "OBO_LOG", default_value_t = TargetsArg::default())] + log: TargetsArg, + + #[clap(long, env = "OBS_SERVER")] + obs_server: Url, + #[clap(long, env = "OBS_USER")] + obs_user: String, + #[clap(long, env = "OBS_PASSWORD")] + obs_password: String, + + #[clap(subcommand)] + action: CliAction, +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = Args::parse(); + + tracing_subscriber::registry() + .with(tracing_error::ErrorLayer::default()) + .with(tracing_subscriber::fmt::layer().with_filter(args.log.targets)) + .with( + tracing_subscriber::fmt::layer() + .event_format(OutputFormatter) + .with_filter(OutputFilter), + ) + .init(); + + color_eyre::install().unwrap(); + + let client = obs::Client::new(args.obs_server, args.obs_user, args.obs_password); + Handler::new(client, Utf8PathBuf::new()) + .run(args.action) + .await?; + + Ok(()) +} diff --git a/obo-cli/tests/test_cli.rs b/obo-cli/tests/test_cli.rs new file mode 100644 index 0000000..e9a9e0f --- /dev/null +++ b/obo-cli/tests/test_cli.rs @@ -0,0 +1,335 @@ +use std::{ + collections::HashMap, + path::Path, + process::{ExitStatus, Stdio}, + sync::Arc, + time::Duration, +}; + +use async_trait::async_trait; +use camino::Utf8Path; +use claims::*; +use obo_cli::{DEFAULT_MONITOR_TABLE, MonitorTable}; +use obo_core::actions::ObsBuildInfo; +use obo_test_support::*; +use obo_tests::*; +use rstest::rstest; +use tempfile::TempDir; +use tokio::{ + io::{AsyncBufReadExt, BufReader}, + process::Command, +}; +use tokio_stream::{StreamExt, wrappers::LinesStream}; + +#[derive(Debug, Clone)] +struct CliArtifactsHandle(Arc); + +impl ArtifactsHandle for CliArtifactsHandle {} + +#[derive(Clone, Debug)] +struct CliExecutionResult { + status: ExitStatus, + out: String, + artifacts: Arc, +} + +impl ExecutionResult for CliExecutionResult { + type Artifacts = CliArtifactsHandle; + + fn ok(&self) -> bool { + self.status.success() + } + + fn log(&self) -> String { + self.out.clone() + } + + fn artifacts(&self) -> Self::Artifacts { + CliArtifactsHandle(self.artifacts.clone()) + } +} + +struct CliRunBuilder { + obs_server: String, + script: Vec, + dependencies: Vec>, + timeout: Duration, +} + +#[async_trait] +impl RunBuilder<'_> for CliRunBuilder { + type ArtifactsHandle = CliArtifactsHandle; + type ExecutionResult = CliExecutionResult; + + fn script(mut self, script: &[String]) -> Self { + for line in script { + self.script + .push(format!("{} {line}", env!("CARGO_BIN_EXE_obo"))); + } + self + } + + fn artifacts(mut self, artifacts: Self::ArtifactsHandle) -> Self { + self.dependencies.push(artifacts.0); + self + } + + fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + // TODO: timeouts!! + async fn go(self) -> Self::ExecutionResult { + let temp = TempDir::new().unwrap(); + + // Symlink all the dependency artifacts into the cwd, and clean them up + // at the end. + let mut dep_files = vec![]; + for dep in &self.dependencies { + let mut reader = tokio::fs::read_dir(dep.path()).await.unwrap(); + while let Some(entry) = reader.next_entry().await.unwrap() { + let dest = temp.path().join(entry.file_name()); + tokio::fs::symlink(entry.path(), &dest).await.unwrap(); + dep_files.push(dest); + } + } + + let mut child = Command::new("sh") + .arg("-exc") + .arg(self.script.join("\n")) + .kill_on_drop(true) + .env("OBS_SERVER", self.obs_server) + .env("OBS_USER", TEST_USER) + .env("OBS_PASSWORD", TEST_PASS) + .env("OBO_TEST_LOG_TAIL", MONITOR_TEST_LOG_TAIL.to_string()) + .env("OBO_TEST_SLEEP_ON_BUILDING_MS", "0") + .env( + "OBO_TEST_SLEEP_ON_OLD_STATUS_MS", + MONITOR_TEST_OLD_STATUS_SLEEP_DURATION + .as_millis() + .to_string(), + ) + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(temp.path()) + .spawn() + .unwrap(); + + let (status, lines) = tokio::time::timeout(self.timeout, async move { + let stdout = BufReader::new(child.stdout.take().unwrap()); + let stderr = BufReader::new(child.stderr.take().unwrap()); + + let mut output = + LinesStream::new(stdout.lines()).merge(LinesStream::new(stderr.lines())); + let mut lines = vec![]; + + while let Some(line) = output.try_next().await.unwrap() { + // Forward the lines back to the output. + eprintln!("{line}"); + lines.push(line); + } + + let status = child.wait().await.unwrap(); + (status, lines) + }) + .await + .unwrap(); + + for file in dep_files { + tokio::fs::remove_file(&file).await.unwrap(); + } + + Self::ExecutionResult { + status, + out: lines.join("\n"), + artifacts: Arc::new(temp), + } + } +} + +struct CliTestContext { + obs: ObsContext, +} + +fn collect_artifacts_from_dir(out: &mut HashMap>, root: &Path, subdir: &Utf8Path) { + for entry in std::fs::read_dir(root.join(subdir.as_std_path())).unwrap() { + let entry = entry.unwrap(); + let name = entry.file_name().into_string().unwrap(); + let ft = entry.file_type().unwrap(); + if ft.is_dir() { + collect_artifacts_from_dir(out, root, &subdir.join(name)); + } else if ft.is_file() { + let contents = std::fs::read(entry.path()) + .unwrap_or_else(|err| panic!("Fetching {}: {err}", entry.path().display())); + out.insert(subdir.join(name).into_string(), contents); + } + } +} + +#[async_trait] +impl TestContext for CliTestContext { + type ArtifactsHandle = CliArtifactsHandle; + type ExecutionResult = CliExecutionResult; + type RunBuilder<'context> = CliRunBuilder; + + fn obs(&self) -> &ObsContext { + &self.obs + } + + async fn inject_artifacts( + &mut self, + artifacts: HashMap>, + ) -> Self::ArtifactsHandle { + let temp = TempDir::new().unwrap(); + + for (name, contents) in artifacts { + tokio::fs::write(temp.path().join(name), contents) + .await + .unwrap(); + } + + CliArtifactsHandle(Arc::new(temp)) + } + + async fn fetch_artifacts(&self, handle: &Self::ArtifactsHandle) -> HashMap> { + let mut ret = HashMap::new(); + collect_artifacts_from_dir(&mut ret, handle.0.path(), Utf8Path::new("")); + ret + } + + fn run(&mut self) -> Self::RunBuilder<'_> { + CliRunBuilder { + obs_server: self.obs.client.url().to_string(), + script: vec![], + dependencies: vec![], + timeout: EXECUTION_DEFAULT_TIMEOUT, + } + } +} + +async fn with_context(func: impl AsyncFnOnce(CliTestContext) -> T) -> T { + let obs_mock = create_default_mock().await; + let obs_client = create_default_client(&obs_mock); + + let ctx = CliTestContext { + obs: ObsContext { + client: obs_client, + mock: obs_mock, + }, + }; + + func(ctx).await +} + +async fn test_monitor_table( + context: &mut CliTestContext, + dput: CliArtifactsHandle, + build_info: &ObsBuildInfo, + success: bool, + dput_test: DputTest, + log_test: MonitorLogTest, + download_binaries: bool, +) { + let mut generate_command = "generate-monitor".to_owned(); + if download_binaries { + generate_command += + &format!(" --download-build-results-to {MONITOR_TEST_BUILD_RESULTS_DIR}"); + } + + let generate = context + .run() + .command(generate_command) + .artifacts(dput.clone()) + .go() + .await; + assert!(generate.ok()); + + let results = context.fetch_artifacts(&generate.artifacts()).await; + let table: MonitorTable = assert_ok!(serde_json::from_slice( + results.get(DEFAULT_MONITOR_TABLE).unwrap() + )); + + for enabled in &build_info.enabled_repos { + let entry = table + .entries + .iter() + .find(|m| m.repo_arch == enabled.repo_arch) + .unwrap(); + + let mut script = vec![entry.commands.monitor.clone()]; + if download_binaries { + script.push(entry.commands.download_binaries.clone().unwrap()); + } else { + assert_none!(&entry.commands.download_binaries); + } + + test_monitoring( + context, + dput.clone(), + build_info, + &enabled.repo_arch, + &script, + success, + dput_test, + log_test, + download_binaries, + ) + .await; + } +} + +async fn test_prune( + context: &mut CliTestContext, + dput: CliArtifactsHandle, + build_info: &ObsBuildInfo, +) { + test_prune_missing_build_info(context).await; + + let prune = context + .run() + .command("prune") + .artifacts(dput.clone()) + .go() + .await; + test_prune_deleted_package_1_if_branched(context, build_info, &prune).await; +} + +#[rstest] +#[tokio::test] +async fn test_cli_flow( + #[values( + DputTest::Basic, + DputTest::Rebuild, + DputTest::ReusePreviousBuild, + DputTest::Branch + )] + dput_test: DputTest, + #[values(true, false)] build_success: bool, + #[values( + MonitorLogTest::Long, + MonitorLogTest::Short, + MonitorLogTest::Unavailable + )] + log_test: MonitorLogTest, + #[values(true, false)] download_binaries: bool, +) { + with_context(async |mut context| { + let (dput, build_info) = test_dput(&mut context, dput_test).await; + + test_monitor_table( + &mut context, + dput.clone(), + &build_info, + build_success, + dput_test, + log_test, + download_binaries, + ) + .await; + + test_prune(&mut context, dput.clone(), &build_info).await; + }) + .await; +} diff --git a/obo-core/Cargo.toml b/obo-core/Cargo.toml new file mode 100644 index 0000000..da839db --- /dev/null +++ b/obo-core/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "obo-core" +description = "OBS Build Orchestrator — core" +version = "0.1.0" +edition = "2024" +license = "MIT OR Apache-2.0" + +[dependencies] +async-trait.workspace = true +base16ct = { version = "0.2", features = ["std"] } +bytes = "1.10" +camino.workspace = true +clap.workspace = true +color-eyre.workspace = true +derivative.workspace = true +futures-util.workspace = true +md-5 = "0.10" +obo-test-support = { path = "../obo-test-support" } +open-build-service-api.workspace = true +reqwest = "0.12" +rfc822-like = "0.2" +serde.workspace = true +serde_json.workspace = true +shell-words.workspace = true +tempfile.workspace = true +thiserror.workspace = true +tokio.workspace = true +tokio-retry2 = { version = "0.6.0", features = ["jitter"] } +tokio-util.workspace = true +tracing.workspace = true + +[dev-dependencies] +claims.workspace = true +rstest.workspace = true +open-build-service-mock.workspace = true +wiremock.workspace = true + diff --git a/src/actions.rs b/obo-core/src/actions.rs similarity index 86% rename from src/actions.rs rename to obo-core/src/actions.rs index 140f8e0..11244d3 100644 --- a/src/actions.rs +++ b/obo-core/src/actions.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, io::SeekFrom}; +use std::io::SeekFrom; use camino::{Utf8Path, Utf8PathBuf}; use clap::{ArgAction, Parser}; @@ -11,10 +11,7 @@ use tracing::{debug, instrument}; use crate::{ artifacts::{ArtifactDirectory, ArtifactReader, ArtifactWriter, MissingArtifactToNone}, binaries::download_binaries, - build_meta::{ - BuildHistoryRetrieval, BuildMeta, BuildMetaOptions, CommitBuildInfo, DisabledRepos, - RepoArch, - }, + build_meta::{BuildHistoryRetrieval, BuildMeta, BuildMetaOptions, DisabledRepos, EnabledRepo}, monitor::{MonitoredPackage, ObsMonitor, PackageCompletion, PackageMonitoringOptions}, outputln, prune::prune_branch, @@ -22,7 +19,7 @@ use crate::{ upload::ObsDscUploader, }; -pub const DEFAULT_BUILD_INFO: &str = "build-info.yml"; +pub const DEFAULT_BUILD_INFO: &str = "build-info.json"; pub const DEFAULT_BUILD_LOG: &str = "build.log"; // Our flags can all take explicit values, because it makes it easier to @@ -42,6 +39,27 @@ impl FlagSupportingExplicitValue for clap::Arg { } } +#[derive(Debug)] +struct CommandBuilder { + args: Vec, +} + +impl CommandBuilder { + fn new(name: String) -> Self { + Self { args: vec![name] } + } + + fn add(&mut self, arg: &str, value: &str) -> &mut Self { + self.args + .push(format!("--{arg}={}", shell_words::quote(value))); + self + } + + fn build(self) -> String { + self.args.join(" ") + } +} + #[derive(Parser, Debug)] pub struct DputAction { pub project: String, @@ -74,6 +92,24 @@ pub struct MonitorAction { pub build_log_out: String, } +impl MonitorAction { + pub fn generate_command(&self) -> String { + let mut builder = CommandBuilder::new("monitor".to_owned()); + builder + .add("project", &self.project) + .add("package", &self.package) + .add("rev", &self.rev) + .add("srcmd5", &self.srcmd5) + .add("repository", &self.repository) + .add("arch", &self.arch) + .add("build-log-out", &self.build_log_out); + if let Some(endtime) = &self.prev_endtime_for_commit { + builder.add("prev-endtime-for-commit", &endtime.to_string()); + } + builder.build() + } +} + #[derive(Parser, Debug)] pub struct DownloadBinariesAction { #[clap(long)] @@ -88,6 +124,19 @@ pub struct DownloadBinariesAction { pub build_results_dir: Utf8PathBuf, } +impl DownloadBinariesAction { + pub fn generate_command(&self) -> String { + let mut builder = CommandBuilder::new("download-binaries".to_owned()); + builder + .add("project", &self.project) + .add("package", &self.package) + .add("repository", &self.repository) + .add("arch", &self.arch) + .add("build-results-dir", self.build_results_dir.as_str()); + builder.build() + } +} + #[derive(Parser, Debug)] pub struct PruneAction { #[clap(long, default_value_t = DEFAULT_BUILD_INFO.to_owned())] @@ -103,7 +152,7 @@ pub struct ObsBuildInfo { pub rev: Option, pub srcmd5: Option, pub is_branched: bool, - pub enabled_repos: HashMap, + pub enabled_repos: Vec, } impl ObsBuildInfo { @@ -112,7 +161,7 @@ impl ObsBuildInfo { artifacts .save_with(path, async |file: &mut ArtifactWriter| { let data = - serde_yaml::to_string(&self).wrap_err("Failed to serialize build info")?; + serde_json::to_string(&self).wrap_err("Failed to serialize build info")?; file.write_all(data.as_bytes()) .await .wrap_err("Failed to write build info file")?; @@ -165,7 +214,7 @@ impl Actions { rev: None, srcmd5: None, is_branched, - enabled_repos: HashMap::new(), + enabled_repos: vec![], }; debug!("Saving initial build info: {:?}", build_info); build_info @@ -362,7 +411,7 @@ impl Actions { artifacts.read_string(&args.build_info).await? }; - let build_info: ObsBuildInfo = serde_yaml::from_str(&build_info_data) + let build_info: ObsBuildInfo = serde_json::from_str(&build_info_data) .wrap_err("Failed to parse provided build info file")?; if build_info.is_branched { diff --git a/src/artifacts.rs b/obo-core/src/artifacts.rs similarity index 96% rename from src/artifacts.rs rename to obo-core/src/artifacts.rs index 325b45f..473d421 100644 --- a/src/artifacts.rs +++ b/obo-core/src/artifacts.rs @@ -40,8 +40,12 @@ pub struct ArtifactWriter { } impl ArtifactWriter { + pub fn new(inner: AsyncFile) -> Self { + Self { inner } + } + #[instrument] - pub async fn new() -> Result { + pub async fn new_anon() -> Result { let file = tokio::task::spawn_blocking(tempfile::tempfile).await??; Ok(Self { inner: AsyncFile::from_std(file), @@ -112,6 +116,10 @@ pub struct ArtifactReader { } impl ArtifactReader { + pub fn new(inner: AsyncFile) -> Self { + Self { inner } + } + pub async fn from_async_file(file: &AsyncFile) -> Result { let inner = AsyncFile::options() .read(true) @@ -234,7 +242,7 @@ pub mod test_support { F: for<'a> SaveCallback<'a, Ret, Err> + Send, P: AsRef + Send, { - let mut writer = ArtifactWriter::new().await?; + let mut writer = ArtifactWriter::new_anon().await?; let ret = func(&mut writer).await?; self.artifacts .insert(path.as_ref().to_owned(), writer.into_reader().await?); diff --git a/src/binaries.rs b/obo-core/src/binaries.rs similarity index 97% rename from src/binaries.rs rename to obo-core/src/binaries.rs index a010537..86b0cb5 100644 --- a/src/binaries.rs +++ b/obo-core/src/binaries.rs @@ -80,9 +80,10 @@ mod tests { use std::time::SystemTime; use claims::*; + use obo_test_support::*; use open_build_service_mock::*; - use crate::{artifacts::test_support::MockArtifactDirectory, test_support::*}; + use crate::artifacts::test_support::MockArtifactDirectory; use super::*; diff --git a/src/build_meta.rs b/obo-core/src/build_meta.rs similarity index 93% rename from src/build_meta.rs rename to obo-core/src/build_meta.rs index 3588110..41ebad7 100644 --- a/src/build_meta.rs +++ b/obo-core/src/build_meta.rs @@ -14,7 +14,9 @@ pub struct RepoArch { } #[derive(Deserialize, Serialize, Debug, Clone)] -pub struct CommitBuildInfo { +pub struct EnabledRepo { + #[serde(flatten)] + pub repo_arch: RepoArch, pub prev_endtime_for_commit: Option, } @@ -237,26 +239,19 @@ impl BuildMeta { Ok(()) } - pub fn get_commit_build_info(&self, srcmd5: &str) -> HashMap { - let mut repos = HashMap::new(); - - for (repo, jobhist) in &self.repos { - let prev_endtime_for_commit = jobhist - .jobhist - .iter() - .filter(|e| e.srcmd5 == srcmd5) - .next_back() - .map(|e| e.endtime); - - repos.insert( - repo.clone(), - CommitBuildInfo { - prev_endtime_for_commit, - }, - ); - } - - repos + pub fn get_commit_build_info(&self, srcmd5: &str) -> Vec { + self.repos + .iter() + .map(|(repo, jobhist)| EnabledRepo { + repo_arch: repo.clone(), + prev_endtime_for_commit: jobhist + .jobhist + .iter() + .filter(|e| e.srcmd5 == srcmd5) + .next_back() + .map(|e| e.endtime), + }) + .collect() } } @@ -265,11 +260,10 @@ mod tests { use std::time::{Duration, SystemTime}; use claims::*; + use obo_test_support::*; use open_build_service_mock::*; use rstest::rstest; - use crate::test_support::*; - use super::*; #[tokio::test] @@ -361,10 +355,10 @@ mod tests { let build_info = meta.get_commit_build_info(&srcmd5_1); - let arch_1 = assert_some!(build_info.get(&repo_arch_1)); + let arch_1 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_1)); assert_some_eq!(arch_1.prev_endtime_for_commit, endtime_1); - let arch_2 = assert_some!(build_info.get(&repo_arch_2)); + let arch_2 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_2)); assert_none!(arch_2.prev_endtime_for_commit); let meta = assert_ok!( @@ -386,9 +380,9 @@ mod tests { let build_info = meta.get_commit_build_info(&srcmd5_1); - let arch_1 = assert_some!(build_info.get(&repo_arch_1)); + let arch_1 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_1)); assert_none!(arch_1.prev_endtime_for_commit); - let arch_2 = assert_some!(build_info.get(&repo_arch_2)); + let arch_2 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_2)); assert_none!(arch_2.prev_endtime_for_commit); assert!(meta.repos.contains_key(&repo_arch_2)); @@ -432,7 +426,7 @@ mod tests { let build_info = meta.get_commit_build_info(&srcmd5_2); - let arch_1 = assert_some!(build_info.get(&repo_arch_2)); + let arch_1 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_2)); assert_some_eq!(arch_1.prev_endtime_for_commit, endtime_2); mock.add_job_history( @@ -466,13 +460,13 @@ mod tests { let build_info = meta.get_commit_build_info(&srcmd5_1); - let arch_2 = assert_some!(build_info.get(&repo_arch_2)); + let arch_2 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_2)); assert_some_eq!(arch_2.prev_endtime_for_commit, endtime_1); meta.clear_stored_history(); let build_info = meta.get_commit_build_info(&srcmd5_1); - let arch_2 = assert_some!(build_info.get(&repo_arch_2)); + let arch_2 = assert_some!(build_info.iter().find(|e| e.repo_arch == repo_arch_2)); assert_none!(arch_2.prev_endtime_for_commit); mock.set_package_build_status( diff --git a/src/dsc.rs b/obo-core/src/dsc.rs similarity index 100% rename from src/dsc.rs rename to obo-core/src/dsc.rs diff --git a/obo-core/src/lib.rs b/obo-core/src/lib.rs new file mode 100644 index 0000000..9e5a517 --- /dev/null +++ b/obo-core/src/lib.rs @@ -0,0 +1,10 @@ +pub mod actions; +pub mod artifacts; +pub mod binaries; +pub mod build_meta; +pub mod dsc; +pub mod logging; +pub mod monitor; +pub mod prune; +pub mod retry; +pub mod upload; diff --git a/obo-core/src/logging.rs b/obo-core/src/logging.rs new file mode 100644 index 0000000..ae94cfc --- /dev/null +++ b/obo-core/src/logging.rs @@ -0,0 +1,57 @@ +use tracing::{ + Event, Metadata, + field::{self, Field}, +}; + +pub const TRACING_FIELD: &str = "obo_core.output"; + +#[macro_export] +macro_rules! outputln { + ($($args:tt)*) => { + ::tracing::trace!(obo_core.output = true, $($args)*) + }; +} + +struct OutputTester(bool); + +impl field::Visit for OutputTester { + fn record_bool(&mut self, field: &Field, value: bool) { + if field.name() == TRACING_FIELD { + self.0 = value; + } + } + + fn record_debug(&mut self, _field: &Field, _value: &dyn std::fmt::Debug) {} +} + +pub fn is_output_field_set_in_event(event: &Event<'_>) -> bool { + let mut visitor = OutputTester(false); + event.record(&mut visitor); + visitor.0 +} + +pub fn is_output_field_in_metadata(metadata: &Metadata<'_>) -> bool { + metadata.fields().iter().any(|f| f.name() == TRACING_FIELD) +} + +struct MessageExtractor(Option); + +impl field::Visit for MessageExtractor { + fn record_str(&mut self, field: &Field, value: &str) { + if field.name() == "message" { + self.0 = Some(value.to_owned()); + } + } + + fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { + if field.name() == "message" { + self.0 = Some(format!("{value:?}")); + } + } +} + +pub fn get_event_message(event: &Event) -> Option { + let mut visitor = MessageExtractor(None); + event.record(&mut visitor); + visitor.0 +} diff --git a/src/monitor.rs b/obo-core/src/monitor.rs similarity index 99% rename from src/monitor.rs rename to obo-core/src/monitor.rs index 95c6c7c..ecf8b81 100644 --- a/src/monitor.rs +++ b/obo-core/src/monitor.rs @@ -287,10 +287,11 @@ mod tests { use std::{collections::HashMap, time::SystemTime}; use claims::*; + use obo_test_support::*; use obs::PackageCode; use open_build_service_mock::*; - use crate::{artifacts::test_support::MockArtifactDirectory, test_support::*}; + use crate::artifacts::test_support::MockArtifactDirectory; use super::*; diff --git a/src/prune.rs b/obo-core/src/prune.rs similarity index 99% rename from src/prune.rs rename to obo-core/src/prune.rs index 088f871..9178328 100644 --- a/src/prune.rs +++ b/obo-core/src/prune.rs @@ -95,10 +95,9 @@ mod tests { use std::time::SystemTime; use claims::*; + use obo_test_support::*; use open_build_service_mock::*; - use crate::test_support::*; - use super::*; #[tokio::test] diff --git a/src/retry.rs b/obo-core/src/retry.rs similarity index 99% rename from src/retry.rs rename to obo-core/src/retry.rs index 857a6d8..6b90f39 100644 --- a/src/retry.rs +++ b/obo-core/src/retry.rs @@ -79,6 +79,7 @@ macro_rules! retry_request { mod tests { use claims::*; use futures_util::Future; + use obo_test_support::*; use open_build_service_api as obs; use rstest::*; use wiremock::{ @@ -86,8 +87,6 @@ mod tests { matchers::{method, path_regex}, }; - use crate::test_support::*; - use super::*; #[fixture] diff --git a/src/upload.rs b/obo-core/src/upload.rs similarity index 99% rename from src/upload.rs rename to obo-core/src/upload.rs index 844698b..a2dbd17 100644 --- a/src/upload.rs +++ b/obo-core/src/upload.rs @@ -328,9 +328,10 @@ mod tests { use std::time::SystemTime; use claims::*; + use obo_test_support::*; use open_build_service_mock::*; - use crate::{artifacts::test_support::MockArtifactDirectory, test_support::*}; + use crate::artifacts::test_support::MockArtifactDirectory; use super::*; diff --git a/obo-test-support/Cargo.toml b/obo-test-support/Cargo.toml new file mode 100644 index 0000000..e6c281c --- /dev/null +++ b/obo-test-support/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "obo-test-support" +version = "0.1.0" +edition = "2024" +license = "MIT OR Apache-2.0" + +[dependencies] +open-build-service-api.workspace = true +open-build-service-mock.workspace = true diff --git a/src/test_support.rs b/obo-test-support/src/lib.rs similarity index 100% rename from src/test_support.rs rename to obo-test-support/src/lib.rs diff --git a/obo-tests/Cargo.toml b/obo-tests/Cargo.toml new file mode 100644 index 0000000..4b938f7 --- /dev/null +++ b/obo-tests/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "obo-tests" +description = "OBS Build Orchestrator — shared tests for different frontends" +version = "0.1.0" +edition = "2024" +license = "MIT OR Apache-2.0" + +[dependencies] +async-trait.workspace = true +camino.workspace = true +claims.workspace = true +obo-core = { path = "../obo-core" } +obo-test-support = { path = "../obo-test-support" } +open-build-service-api.workspace = true +open-build-service-mock.workspace = true +serde_yaml.workspace = true +tokio.workspace = true diff --git a/obo-tests/src/lib.rs b/obo-tests/src/lib.rs new file mode 100644 index 0000000..0561dc9 --- /dev/null +++ b/obo-tests/src/lib.rs @@ -0,0 +1,594 @@ +use std::{ + cmp::Ordering, + collections::HashMap, + fmt, + time::{Duration, SystemTime}, +}; + +use async_trait::async_trait; +use camino::Utf8Path; +use claims::*; +use obo_core::{ + actions::{DEFAULT_BUILD_INFO, DEFAULT_BUILD_LOG, ObsBuildInfo}, + build_meta::RepoArch, + upload::compute_md5, +}; +use obo_test_support::*; +use open_build_service_api as obs; +use open_build_service_mock::*; + +#[derive(Clone)] +pub struct ObsContext { + pub client: obs::Client, + pub mock: ObsMock, +} + +// A handle to some set of artifacts, either artificially "injected" (i.e. +// custom artifacts manually set up to be read by some command) or the outputs +// of executing a command. +pub trait ArtifactsHandle: Send + Sync + Clone + fmt::Debug {} + +// The result of a command, letting you easily deterimine if it succeeded and +// access the output logs / artifacts. +pub trait ExecutionResult: Send + Sync + Clone + fmt::Debug { + type Artifacts: ArtifactsHandle; + + fn ok(&self) -> bool; + fn log(&self) -> String; + fn artifacts(&self) -> Self::Artifacts; +} + +pub const EXECUTION_DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); + +// Builds a set of commands to run, along with the ability to determine its +// input artifacts and timeout. +#[async_trait] +pub trait RunBuilder<'context>: Send + Sync + Sized { + type ArtifactsHandle: ArtifactsHandle; + type ExecutionResult: ExecutionResult; + + fn command(self, cmd: impl Into) -> Self { + self.script(&[cmd.into()]) + } + + fn script(self, cmd: &[String]) -> Self; + fn artifacts(self, artifacts: Self::ArtifactsHandle) -> Self; + fn timeout(self, timeout: Duration) -> Self; + async fn go(self) -> Self::ExecutionResult; +} + +// A generic "context", implemented by specific obo frontends, that allows the +// tests to manipulate artifacts and run commands in a generalized way. +#[async_trait] +pub trait TestContext: Send + Sync { + type ArtifactsHandle: ArtifactsHandle; + type ExecutionResult: ExecutionResult; + type RunBuilder<'context>: RunBuilder< + 'context, + ArtifactsHandle = Self::ArtifactsHandle, + ExecutionResult = Self::ExecutionResult, + > + 'context + where + Self: 'context; + + fn obs(&self) -> &ObsContext; + + async fn inject_artifacts( + &mut self, + artifacts: HashMap>, + ) -> Self::ArtifactsHandle; + + async fn fetch_artifacts(&self, handle: &Self::ArtifactsHandle) -> HashMap>; + + fn run(&mut self) -> Self::RunBuilder<'_>; +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum DputTest { + Basic, + Rebuild, + ReusePreviousBuild, + Branch, +} + +pub async fn test_dput( + context: &mut C, + test: DputTest, +) -> (C::ArtifactsHandle, ObsBuildInfo) { + let test1_file = "test1"; + let test1_contents = b"123"; + let test1_md5 = compute_md5(test1_contents); + + let dsc1_file = "test1.dsc"; + let dsc1_contents = format!( + "Source: {}\nFiles:\n {} {} {}", + TEST_PACKAGE_1, + test1_md5.clone(), + test1_contents.len(), + test1_file + ); + let dsc1_md5 = compute_md5(dsc1_contents.as_bytes()); + + let dsc1_bad_file = "test1-bad.dsc"; + let dsc1_bad_contents = + dsc1_contents.replace(test1_file, &(test1_file.to_owned() + ".missing")); + + context.obs().mock.add_project(TEST_PROJECT.to_owned()); + + context.obs().mock.add_or_update_repository( + TEST_PROJECT, + TEST_REPO.to_owned(), + TEST_ARCH_1.to_owned(), + MockRepositoryCode::Finished, + ); + context.obs().mock.add_or_update_repository( + TEST_PROJECT, + TEST_REPO.to_owned(), + TEST_ARCH_2.to_owned(), + MockRepositoryCode::Finished, + ); + + if test == DputTest::Rebuild { + // We also test excluded repos on rebuilds; this test makes it easier, + // because it's not testing creating a new package, so we can create it + // ourselves first with the desired metadata. + context.obs().mock.add_new_package( + TEST_PROJECT, + TEST_PACKAGE_1.to_owned(), + MockPackageOptions::default(), + ); + context.obs().mock.set_package_build_status( + TEST_PROJECT, + TEST_REPO, + TEST_ARCH_2, + TEST_PACKAGE_1.to_owned(), + MockBuildStatus::new(MockPackageCode::Disabled), + ); + } + + let artifacts = context + .inject_artifacts( + [ + (dsc1_file.to_owned(), dsc1_contents.as_bytes().to_vec()), + ( + dsc1_bad_file.to_owned(), + dsc1_bad_contents.as_bytes().to_vec(), + ), + (test1_file.to_owned(), test1_contents.to_vec()), + ] + .into(), + ) + .await; + + let mut dput_command = format!("dput {TEST_PROJECT} {dsc1_file}"); + let mut created_project = TEST_PROJECT.to_owned(); + + if test == DputTest::Branch { + created_project += ":branched"; + dput_command += &format!(" --branch-to {created_project}"); + } + + let dput = context + .run() + .command(dput_command.replace(dsc1_file, dsc1_bad_file)) + .artifacts(artifacts.clone()) + .go() + .await; + + assert!(!dput.ok()); + + let results = context.fetch_artifacts(&dput.artifacts()).await; + let build_info: ObsBuildInfo = + serde_yaml::from_slice(results.get(DEFAULT_BUILD_INFO).unwrap()).unwrap(); + + assert_eq!(build_info.project, created_project); + assert_eq!(build_info.package, TEST_PACKAGE_1); + assert_none!(build_info.rev); + assert_eq!(build_info.is_branched, test == DputTest::Branch); + + let mut dput = context + .run() + .command(&dput_command) + .artifacts(artifacts.clone()) + .go() + .await; + assert!(dput.ok()); + + if test == DputTest::Rebuild || test == DputTest::ReusePreviousBuild { + context.obs().mock.add_or_update_repository( + &created_project, + TEST_REPO.to_owned(), + TEST_ARCH_1.to_owned(), + MockRepositoryCode::Building, + ); + // Also test endtimes, since we now have an existing package to modify + // the metadata of. + let dir = assert_ok!( + context + .obs() + .client + .project(TEST_PROJECT.to_owned()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + // Testing of reused builds never had the second arch disabled, so also + // add that build history. + if test == DputTest::ReusePreviousBuild { + context.obs().mock.add_job_history( + TEST_PROJECT, + TEST_REPO, + TEST_ARCH_2, + MockJobHistoryEntry { + package: TEST_PACKAGE_1.to_owned(), + rev: dir.rev.clone().unwrap(), + srcmd5: dir.srcmd5.clone(), + ..Default::default() + }, + ); + } + context.obs().mock.add_job_history( + TEST_PROJECT, + TEST_REPO, + TEST_ARCH_1, + MockJobHistoryEntry { + package: TEST_PACKAGE_1.to_owned(), + rev: dir.rev.unwrap(), + srcmd5: dir.srcmd5, + ..Default::default() + }, + ); + + context.obs().mock.set_package_build_status_for_rebuilds( + &created_project, + MockBuildStatus::new(MockPackageCode::Broken), + ); + context.obs().mock.set_package_build_status( + &created_project, + TEST_REPO, + TEST_ARCH_1, + TEST_PACKAGE_1.to_owned(), + MockBuildStatus::new(MockPackageCode::Failed), + ); + + let status = assert_ok!( + context + .obs() + .client + .project(created_project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .status(TEST_REPO, TEST_ARCH_1) + .await + ); + assert_eq!(status.code, obs::PackageCode::Failed); + + dput = context + .run() + .command(&dput_command) + .artifacts(artifacts.clone()) + .go() + .await; + assert!(dput.ok()); + + assert!(dput.log().contains("unchanged")); + + let status = assert_ok!( + context + .obs() + .client + .project(created_project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .status(TEST_REPO, TEST_ARCH_1) + .await + ); + assert_eq!(status.code, obs::PackageCode::Failed); + + if test == DputTest::Rebuild { + dput = context + .run() + .command(format!("{dput_command} --rebuild-if-unchanged")) + .artifacts(artifacts.clone()) + .go() + .await; + assert!(dput.ok()); + + let status = assert_ok!( + context + .obs() + .client + .project(created_project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .status(TEST_REPO, TEST_ARCH_1) + .await + ); + assert_eq!(status.code, obs::PackageCode::Broken); + + assert!(dput.log().contains("unchanged")); + } + } + + let results = context.fetch_artifacts(&dput.artifacts()).await; + let build_info: ObsBuildInfo = + serde_yaml::from_slice(results.get(DEFAULT_BUILD_INFO).unwrap()).unwrap(); + + assert_eq!(build_info.project, created_project); + assert_eq!(build_info.package, TEST_PACKAGE_1); + assert_some!(build_info.rev.as_deref()); + assert_eq!(build_info.is_branched, test == DputTest::Branch); + + assert_eq!( + build_info.enabled_repos.len(), + if test == DputTest::Rebuild { 1 } else { 2 } + ); + + let arch_1 = build_info + .enabled_repos + .iter() + .find(|e| e.repo_arch.repo == TEST_REPO && e.repo_arch.arch == TEST_ARCH_1) + .unwrap(); + + if test == DputTest::Rebuild { + assert_some!(arch_1.prev_endtime_for_commit); + } else { + assert_none!(arch_1.prev_endtime_for_commit); + + let arch_2 = build_info + .enabled_repos + .iter() + .find(|e| e.repo_arch.repo == TEST_REPO && e.repo_arch.arch == TEST_ARCH_2) + .unwrap(); + assert_none!(arch_2.prev_endtime_for_commit); + } + + let mut dir = assert_ok!( + context + .obs() + .client + .project(created_project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + + assert_eq!(dir.entries.len(), 3); + dir.entries.sort_by(|a, b| a.name.cmp(&b.name)); + assert_eq!(dir.entries[0].name, "_meta"); + assert_eq!(dir.entries[1].name, test1_file); + assert_eq!(dir.entries[1].size, test1_contents.len() as u64); + assert_eq!(dir.entries[1].md5, test1_md5); + assert_eq!(dir.entries[2].name, dsc1_file); + assert_eq!(dir.entries[2].size, dsc1_contents.len() as u64); + assert_eq!(dir.entries[2].md5, dsc1_md5); + + (dput.artifacts(), build_info) +} + +pub const MONITOR_TEST_BUILD_RESULTS_DIR: &str = "results"; +pub const MONITOR_TEST_BUILD_RESULT: &str = "test-build-result"; +pub const MONITOR_TEST_BUILD_RESULT_CONTENTS: &[u8] = b"abcdef"; +pub const MONITOR_TEST_LOG_TAIL: u64 = 50; +pub const MONITOR_TEST_OLD_STATUS_SLEEP_DURATION: Duration = Duration::from_millis(100); + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum MonitorLogTest { + // Test that long logs are truncated. + Long, + // Test that short logs are fully shown. + Short, + // Test that revision mismatches result in unavailable logs. + Unavailable, +} + +#[allow(clippy::too_many_arguments)] +pub async fn test_monitoring( + context: &mut C, + dput: C::ArtifactsHandle, + build_info: &ObsBuildInfo, + repo: &RepoArch, + script: &[String], + success: bool, + dput_test: DputTest, + log_test: MonitorLogTest, + download_binaries: bool, +) { + let srcmd5_prefix = format!( + "srcmd5 '{}' ", + if log_test == MonitorLogTest::Unavailable { + ZERO_REV_SRCMD5.to_owned() + } else { + let dir = assert_ok!( + context + .obs() + .client + .project(build_info.project.to_owned()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + + dir.srcmd5 + } + ); + + let (log_contents, log_vs_limit) = if log_test == MonitorLogTest::Short { + (srcmd5_prefix + "short", Ordering::Less) + } else { + ( + srcmd5_prefix + "this is a long log that will need to be trimmed when printed", + Ordering::Greater, + ) + }; + + assert_eq!( + log_contents.len().cmp(&(MONITOR_TEST_LOG_TAIL as usize)), + log_vs_limit + ); + + // Sanity check this, even though test_dput should have already checked it. + assert_eq!(repo.repo, TEST_REPO); + assert!( + repo.arch == TEST_ARCH_1 || repo.arch == TEST_ARCH_2, + "unexpected arch '{}'", + repo.arch + ); + + context.obs().mock.set_package_build_status( + &build_info.project, + &repo.repo, + &repo.arch, + TEST_PACKAGE_1.to_owned(), + MockBuildStatus::new(if success { + MockPackageCode::Succeeded + } else { + MockPackageCode::Failed + }), + ); + context.obs().mock.add_completed_build_log( + &build_info.project, + TEST_REPO, + &repo.arch, + TEST_PACKAGE_1.to_owned(), + MockBuildLog::new(log_contents.to_owned()), + success, + ); + context.obs().mock.set_package_binaries( + &build_info.project, + TEST_REPO, + &repo.arch, + TEST_PACKAGE_1.to_owned(), + [( + MONITOR_TEST_BUILD_RESULT.to_owned(), + MockBinary { + contents: MONITOR_TEST_BUILD_RESULT_CONTENTS.to_vec(), + mtime: SystemTime::now(), + }, + )] + .into(), + ); + + if dput_test != DputTest::ReusePreviousBuild { + // Update the endtime in the background, otherwise the monitor will hang + // forever waiting. + let mock = context.obs().mock.clone(); + let build_info_2 = build_info.clone(); + let repo_2 = repo.clone(); + tokio::spawn(async move { + tokio::time::sleep(MONITOR_TEST_OLD_STATUS_SLEEP_DURATION * 10).await; + mock.add_job_history( + &build_info_2.project, + &repo_2.repo, + &repo_2.arch, + MockJobHistoryEntry { + package: build_info_2.package, + endtime: SystemTime::UNIX_EPOCH + Duration::from_secs(999), + srcmd5: build_info_2.srcmd5.unwrap(), + ..Default::default() + }, + ); + }); + } + + let monitor = context + .run() + .script(script) + .artifacts(dput.clone()) + .timeout(MONITOR_TEST_OLD_STATUS_SLEEP_DURATION * 20) + .go() + .await; + assert_eq!( + monitor.ok(), + success && log_test != MonitorLogTest::Unavailable + ); + + let log = monitor.log(); + + assert_eq!( + log.contains("unavailable"), + log_test == MonitorLogTest::Unavailable + ); + + // If we reused a previous build, we're not waiting for a new build, so + // don't check for an old build status. + let build_actually_occurred = dput_test != DputTest::ReusePreviousBuild; + assert_eq!( + log.contains("Waiting for build status"), + build_actually_occurred + ); + + assert_eq!( + log.contains(&log_contents), + !success && log_test == MonitorLogTest::Short + ); + + if !success && log_test == MonitorLogTest::Long { + let log_bytes = log_contents.as_bytes(); + let truncated_log_bytes = &log_bytes[log_bytes.len() - (MONITOR_TEST_LOG_TAIL as usize)..]; + assert!(log.contains(String::from_utf8_lossy(truncated_log_bytes).as_ref())); + } + + let results = context.fetch_artifacts(&monitor.artifacts()).await; + let build_result_path = Utf8Path::new(MONITOR_TEST_BUILD_RESULTS_DIR) + .join(MONITOR_TEST_BUILD_RESULT) + .into_string(); + let mut has_built_result = false; + + if log_test != MonitorLogTest::Unavailable { + let full_log = results.get(DEFAULT_BUILD_LOG).unwrap(); + assert_eq!(log_contents, String::from_utf8_lossy(full_log)); + + if success && download_binaries { + let build_result = results.get(&build_result_path).unwrap(); + assert_eq!(MONITOR_TEST_BUILD_RESULT_CONTENTS, &build_result[..]); + + has_built_result = true; + } + } + + if !has_built_result { + assert_none!(results.get(&build_result_path)); + } +} + +pub async fn test_prune_missing_build_info(context: &mut C) { + let prune = context.run().command("prune").go().await; + assert!(!prune.ok()); + + let prune = context + .run() + .command("prune --ignore-missing-build-info") + .go() + .await; + assert!(prune.ok()); + + assert!(prune.log().contains("Skipping prune")); +} + +pub async fn test_prune_deleted_package_1_if_branched( + context: &C, + build_info: &ObsBuildInfo, + prune: &C::ExecutionResult, +) { + if build_info.is_branched { + assert_err!( + context + .obs() + .client + .project(build_info.project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + } else { + assert!(prune.log().contains("package was not branched")); + + assert_ok!( + context + .obs() + .client + .project(build_info.project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + } +} diff --git a/obs-gitlab-runner/Cargo.toml b/obs-gitlab-runner/Cargo.toml new file mode 100644 index 0000000..7b3e203 --- /dev/null +++ b/obs-gitlab-runner/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "obs-gitlab-runner" +version = "0.1.8" +edition = "2024" +license = "MIT OR Apache-2.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait.workspace = true +camino.workspace = true +clap.workspace = true +color-eyre.workspace = true +derivative.workspace = true +futures-util.workspace = true +gitlab-runner = "0.3.0-rc1" +# gitlab-runner = { path = "../gitlab-runner-rs/gitlab-runner" } +obo-core = { path = "../obo-core" } +obo-test-support = { path = "../obo-test-support" } +open-build-service-api.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_yaml.workspace = true +shellexpand = "3.1" +shell-words.workspace = true +strum = { version = "0.27", features = ["derive"] } +tempfile.workspace = true +thiserror.workspace = true +tokio.workspace = true +tokio-util.workspace = true +tracing.workspace = true +tracing-error = "0.2" +tracing-subscriber = { version = "0.3", features = ["default", "json"] } +url = "2.5" + +[dev-dependencies] +claims.workspace = true +gitlab-runner-mock = "0.2.1" +# gitlab-runner-mock = { path = "../gitlab-runner-rs/gitlab-runner-mock" } +obo-tests = { path = "../obo-tests" } +open-build-service-mock.workspace = true +rstest.workspace = true +wiremock.workspace = true +zip = "5.1" diff --git a/obs-gitlab-runner/README.md b/obs-gitlab-runner/README.md new file mode 100644 index 0000000..5881b3f --- /dev/null +++ b/obs-gitlab-runner/README.md @@ -0,0 +1,150 @@ +# obs-gitlab-runner + +This is a custom [GitLab Runner](https://docs.gitlab.com/runner/) implementation +providing a shell-like command language for starting, monitoring, and cleaning up +builds on [OBS](https://build.opensuse.org/), specifically targeting Debian +packages. + +## Usage + +For information on OBS authentication and the commands supported, see [the +project-wide README](../README.md). + +### Sending Jobs + +In order to send commands in a job to the runner, set the job's `tags:` to +include the tag you used during [the deployment](#deployment), e.g.: + +```yaml +my-job: + tags: + - obs-runner # <-- set to run on runners tagged "obs-runner" + stage: some-stage + script: + # [...] +``` + +This will run all the commands inside `before_script`, `script`, and +`after_script` with the runner. + +### Supported Syntax + +A subset of shell syntax is supported for commands: + +- Commands are split at spaces, but parts can be quoted, just like in the shell: + ```bash + some-command this-is-argument-1 "this entire string is argument 2" + ``` +- Variable substitution is supported, as well as the `${VARIABLE:-DEFAULT}` + syntax to use the value `DEFAULT` if `$VARIABLE` is unset: + ```bash + some-command $a_variable ${a_variable_with_default:-this is used if unset} + ``` + The variables are sourced from the pipeline-wide and job-specific variables + set. + - A significant departure from shell argument parsing is that **variable + contents are auto-quoted, thus spaces inside a variable do not split + arguments**. For example, given `MYVAR=a b c`, this: + ```bash + some-command $MYVAR + ``` + is interpreted as: + ```bash + some-command 'a b c' + ``` + *not*: + ```bash + some-command a b c + ``` + There is no way to use a variable without auto-quoting its contents. + +## Deployment + +### Registering the Runner + +In order to use the runner, you must first register it with your GitLab +instance. This requires the use of a registration token, which can be obtained +via the following steps: + +- Enter the GitLab admin area. +- Navigate to Overview -> Runners. +- Click "Register an instance runner". +- Copy the registration token within. + +(Per-group/-project registration tokens can also be retrieved from the CI/CD +settings of the group or project.) + +With this token, you can now register the runner via the [GitLab +API](https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner). + +Example using curl: + +```bash +curl --request POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ + --form description='OBS runner' \ + --form run_untagged=false \ + --form tag_list=obs-runner \ + --form token="$REGISTRATION_TOKEN" +``` + +httpie: + +```bash +http --form POST "https://$GITLAB_SERVER_URL/api/v4/runners" \ + description='OBS runner' \ + run_untagged=false \ + tag_list=obs-runner \ + token="$REGISTRATION_TOKEN" +``` + +**It is critical that you set `run_untagged=false`,** otherwise this runner +will be used for *all* jobs that don't explicitly set a tag, rather than just +the jobs explicitly targeting the runner. + +This API call will return a JSON object containing a `token` key, whose value +is a _runner token_ that is used by the runner to connect to GitLab. + +### Docker + +Docker images are built on every commit, available at +`ghcr.io/collabora/obs-gitlab-runner:main`. The entry point takes two arguments: + +- The GitLab server URL. +- The runner token acquired previously. + +Simple example usage via the Docker CLI: + +```bash +$ docker run --rm -it ghcr.io/collabora/obs-gitlab-runner:main \ + "$GITLAB_SERVER_URL" "$GITLAB_RUNNER_TOKEN" +``` + +In addition, you can instead opt to set the `GITLAB_URL` and `GITLAB_TOKEN` +environment variables: + +```bash +$ docker run --rm -it \ + -e GITLAB_URL="$GITLAB_SERVER_URL" \ + -e GITLAB_TOKEN="$GITLAB_RUNNER_TOKEN" \ + ghcr.io/collabora/obs-gitlab-runner:main +``` + +### Kubernetes + +A [Helm](https://helm.sh/) chart has been provided in the `chart/` directory, +installable via: + +```bash +$ helm install \ + --set-string gitlab.url="$GITLAB_SERVER_URL" \ + --set-string gitlab.token="$GITLAB_RUNNER_TOKEN" \ + obs-gitlab-runner chart +``` + +Upgrades can skip setting `gitlab.token` to re-use the previously set value: + +```bash +$ helm upgrade \ + --set-string gitlab.url="$GITLAB_SERVER_URL" \ + obs-gitlab-runner chart +``` diff --git a/chart/.helmignore b/obs-gitlab-runner/chart/.helmignore similarity index 100% rename from chart/.helmignore rename to obs-gitlab-runner/chart/.helmignore diff --git a/chart/Chart.yaml b/obs-gitlab-runner/chart/Chart.yaml similarity index 100% rename from chart/Chart.yaml rename to obs-gitlab-runner/chart/Chart.yaml diff --git a/chart/templates/_helpers.tpl b/obs-gitlab-runner/chart/templates/_helpers.tpl similarity index 100% rename from chart/templates/_helpers.tpl rename to obs-gitlab-runner/chart/templates/_helpers.tpl diff --git a/chart/templates/deployment.yaml b/obs-gitlab-runner/chart/templates/deployment.yaml similarity index 100% rename from chart/templates/deployment.yaml rename to obs-gitlab-runner/chart/templates/deployment.yaml diff --git a/chart/templates/gitlab-token-secret.yaml b/obs-gitlab-runner/chart/templates/gitlab-token-secret.yaml similarity index 100% rename from chart/templates/gitlab-token-secret.yaml rename to obs-gitlab-runner/chart/templates/gitlab-token-secret.yaml diff --git a/chart/templates/hpa.yaml b/obs-gitlab-runner/chart/templates/hpa.yaml similarity index 100% rename from chart/templates/hpa.yaml rename to obs-gitlab-runner/chart/templates/hpa.yaml diff --git a/chart/values.yaml b/obs-gitlab-runner/chart/values.yaml similarity index 100% rename from chart/values.yaml rename to obs-gitlab-runner/chart/values.yaml diff --git a/obs-gitlab-runner/src/handler.rs b/obs-gitlab-runner/src/handler.rs new file mode 100644 index 0000000..b307b91 --- /dev/null +++ b/obs-gitlab-runner/src/handler.rs @@ -0,0 +1,1202 @@ +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + io::Seek, +}; + +use async_trait::async_trait; +use camino::{Utf8Path, Utf8PathBuf}; +use clap::{Parser, Subcommand}; +use color_eyre::eyre::{Context, Report, Result, eyre}; +use derivative::*; +use futures_util::StreamExt; +use gitlab_runner::{ + JobHandler, JobResult, Phase, UploadableFile, + job::{Dependency, Job, Variable}, +}; +use obo_core::{ + actions::{ + Actions, DEFAULT_BUILD_INFO, DEFAULT_BUILD_LOG, DownloadBinariesAction, DputAction, + FailedBuild, FlagSupportingExplicitValue, LOG_TAIL_2MB, MonitorAction, ObsBuildInfo, + PruneAction, + }, + artifacts::{ArtifactDirectory, ArtifactReader, ArtifactWriter, MissingArtifact, SaveCallback}, + monitor::PackageMonitoringOptions, + outputln, +}; +use open_build_service_api as obs; +use tokio::fs::File as AsyncFile; +use tokio_util::{ + compat::{Compat, TokioAsyncReadCompatExt}, + io::ReaderStream, +}; +use tracing::{error, instrument, warn}; + +use crate::pipeline::{ + GeneratePipelineOptions, PipelineDownloadBinaries, generate_monitor_pipeline, +}; + +const DEFAULT_MONITOR_PIPELINE: &str = "obs.yml"; +const DEFAULT_PIPELINE_JOB_PREFIX: &str = "obs"; +const DEFAULT_ARTIFACT_EXPIRATION: &str = "3 days"; + +#[derive(Parser, Debug)] +struct GenerateMonitorAction { + tag: String, + #[clap(long)] + rules: Option, + #[clap(long = "download-build-results-to")] + build_results_dir: Option, + #[clap(long, default_value_t = DEFAULT_BUILD_INFO.to_owned())] + build_info: String, + #[clap(long, default_value_t = DEFAULT_MONITOR_PIPELINE.to_owned())] + pipeline_out: String, + #[clap(long, default_value_t = DEFAULT_PIPELINE_JOB_PREFIX.to_owned())] + job_prefix: String, + #[clap(long)] + job_timeout: Option, + #[clap(long, default_value_t = DEFAULT_ARTIFACT_EXPIRATION.to_owned())] + artifact_expiration: String, + #[clap(long, default_value_t = DEFAULT_BUILD_LOG.into())] + build_log_out: String, +} + +#[derive(Parser, Debug)] +struct EchoAction { + args: Vec, + #[clap(long, flag_supporting_explicit_value())] + fail: bool, + #[clap(long, flag_supporting_explicit_value())] + uppercase: bool, + #[clap(long, default_value = " ")] + sep: String, +} + +#[derive(Subcommand)] +enum CommonAction { + Dput(DputAction), + Monitor(MonitorAction), + DownloadBinaries(DownloadBinariesAction), + Prune { + #[clap(long, flag_supporting_explicit_value())] + only_if_job_unsuccessful: bool, + #[clap(flatten)] + args: PruneAction, + }, +} + +#[derive(Subcommand)] +enum JobAction { + #[clap(flatten)] + Common(CommonAction), + GenerateMonitor(GenerateMonitorAction), + #[cfg(test)] + Echo(EchoAction), +} + +#[derive(Parser)] +#[clap(bin_name = "obs-gitlab-runner")] +#[clap(no_binary_name = true)] +struct Command { + #[clap(subcommand)] + action: JobAction, +} + +fn get_job_variable<'job>(job: &'job Job, key: &str) -> Result> { + job.variable(key) + .ok_or_else(|| eyre!("Failed to get variable ${}", key)) +} + +#[derive(Debug, Derivative)] +#[derivative(Default)] +pub struct HandlerOptions { + pub monitor: PackageMonitoringOptions, + pub default_monitor_job_timeout: Option, + #[derivative(Default(value = "LOG_TAIL_2MB"))] + pub log_tail: u64, +} + +struct GitLabArtifacts<'a> { + job: &'a Job, + artifacts: &'a mut HashMap, +} + +#[async_trait] +impl ArtifactDirectory for GitLabArtifacts<'_> { + #[instrument(skip(self, path), path = path.as_ref())] + async fn open(&self, path: impl AsRef + Send) -> Result { + let path = path.as_ref(); + + if let Some(file) = self.artifacts.get(path) { + let file = file + .try_clone() + .await + .wrap_err("Failed to reopen artifact")?; + return Ok(file); + } + + for dep in self.job.dependencies() { + if let Some(file) = check_for_artifact(dep, path).await? { + return Ok(file); + } + } + + Err(MissingArtifact(path.to_owned()).into()) + } + + #[tracing::instrument(skip(self, path, func), path = path.as_ref())] + async fn save_with(&mut self, path: P, func: F) -> Result + where + Report: From, + Ret: Send, + Err: Send, + F: for<'a> SaveCallback<'a, Ret, Err> + Send, + P: AsRef + Send, + { + let mut writer = ArtifactWriter::new_anon().await?; + let ret = func(&mut writer).await?; + self.artifacts + .insert(path.as_ref().to_owned(), writer.into_reader().await?); + Ok(ret) + } +} + +pub struct ObsJobHandler { + job: Job, + options: HandlerOptions, + + actions: Actions, + script_failed: bool, + artifacts: HashMap, +} + +impl ObsJobHandler { + pub fn new(job: Job, client: obs::Client, options: HandlerOptions) -> Self { + ObsJobHandler { + job, + options, + actions: Actions { client }, + script_failed: false, + artifacts: HashMap::new(), + } + } + + #[instrument(skip_all, fields(job = job.id()))] + pub fn from_obs_config_in_job(job: Job, options: HandlerOptions) -> Result { + let obs_server = get_job_variable(&job, "OBS_SERVER")?; + let obs_user = get_job_variable(&job, "OBS_USER")?; + let obs_password = get_job_variable(&job, "OBS_PASSWORD")?; + + let client = obs::Client::new( + obs_server.value().try_into().wrap_err("Invalid URL")?, + obs_user.value().to_owned(), + obs_password.value().to_owned(), + ); + Ok(ObsJobHandler::new(job, client, options)) + } + + fn expand_vars<'s>( + &self, + s: &'s str, + quote: bool, + expanding: &mut HashSet, + ) -> Cow<'s, str> { + shellexpand::env_with_context_no_errors(s, |var| { + if !expanding.insert(var.to_owned()) { + return Some("".to_string()); + } + + let value = self.job.variable(var).map_or("", |v| v.value()); + let expanded = self.expand_vars(value, false, expanding); + expanding.remove(var); + Some( + if quote { + shell_words::quote(expanded.as_ref()) + } else { + expanded + } + .into_owned(), + ) + }) + } + + #[instrument(skip(self))] + async fn generate_monitor(&mut self, args: GenerateMonitorAction) -> Result<()> { + let mut artifacts = GitLabArtifacts { + job: &self.job, + artifacts: &mut self.artifacts, + }; + + let build_info_data = artifacts.read_string(&args.build_info).await?; + let build_info: ObsBuildInfo = serde_json::from_str(&build_info_data) + .wrap_err("Failed to parse provided build info file")?; + + let pipeline = generate_monitor_pipeline( + &build_info.project, + &build_info.package, + &build_info + .rev + .ok_or_else(|| eyre!("Build revision was not set"))?, + &build_info + .srcmd5 + .ok_or_else(|| eyre!("Build srcmd5 was not set"))?, + &build_info.enabled_repos, + GeneratePipelineOptions { + tags: vec![args.tag], + artifact_expiration: args.artifact_expiration, + prefix: args.job_prefix, + timeout: args + .job_timeout + .or_else(|| self.options.default_monitor_job_timeout.clone()), + rules: args.rules, + download_binaries: if let Some(build_results_dir) = args.build_results_dir { + PipelineDownloadBinaries::OnSuccess { + build_results_dir: build_results_dir.into_string(), + } + } else { + PipelineDownloadBinaries::Never + }, + build_log_out: args.build_log_out.to_string(), + }, + )?; + + artifacts + .write(&args.pipeline_out, pipeline.as_bytes()) + .await?; + outputln!("Wrote pipeline file '{}'.", args.pipeline_out); + + Ok(()) + } + + #[instrument(skip(self))] + async fn command(&mut self, cmdline: &str) -> Result<()> { + // TODO: inject user? + let cmdline = self.expand_vars(cmdline, true, &mut HashSet::new()); + + outputln!("> {}", cmdline); + + let args = shell_words::split(&cmdline).wrap_err("Invalid command line")?; + let command = Command::try_parse_from(args)?; + + match command.action { + JobAction::Common(action) => { + let mut artifacts = GitLabArtifacts { + job: &self.job, + artifacts: &mut self.artifacts, + }; + + match action { + CommonAction::Dput(args) => self.actions.dput(args, &mut artifacts).await?, + CommonAction::Monitor(args) => { + self.actions + .monitor( + args, + self.options.monitor.clone(), + async |file| { + let mut log_stream = ReaderStream::new(file); + while let Some(bytes) = log_stream.next().await { + let bytes = bytes.wrap_err("Failed to stream log bytes")?; + self.job.trace(String::from_utf8_lossy(&bytes).as_ref()); + } + Ok(()) + }, + self.options.log_tail, + &mut artifacts, + ) + .await? + } + CommonAction::DownloadBinaries(args) => { + self.actions.download_binaries(args, &mut artifacts).await? + } + CommonAction::Prune { + only_if_job_unsuccessful: true, + .. + } if !self.script_failed => { + outputln!("Skipping prune: main script was successful.") + } + CommonAction::Prune { args, .. } => { + self.actions.prune(args, &artifacts).await? + } + } + } + JobAction::GenerateMonitor(args) => self.generate_monitor(args).await?, + #[cfg(test)] + JobAction::Echo(args) => { + use color_eyre::eyre::ensure; + + let mut output = args.args.join(&args.sep); + if args.uppercase { + output = output.to_uppercase(); + } + + outputln!("{}", output); + ensure!(!args.fail, "Failed"); + } + } + + Ok(()) + } +} + +pub struct UploadableArtifact { + path: Utf8PathBuf, + file: ArtifactReader, +} + +#[async_trait] +impl UploadableFile for UploadableArtifact { + type Data<'a> = Compat; + + fn get_path(&self) -> Cow<'_, str> { + Cow::Borrowed(self.path.as_str()) + } + + async fn get_data(&self) -> Result, ()> { + self.file + .try_clone() + .await + .map(TokioAsyncReadCompatExt::compat) + .map_err(|e| { + warn!("Failed to clone {}: {e}", self.path); + }) + } +} + +#[async_trait] +impl JobHandler for ObsJobHandler { + async fn step(&mut self, script: &[String], _phase: Phase) -> JobResult { + for command in script { + if let Err(err) = self.command(command).await { + // Failed builds would already have information on them printed + // above, so don't print anything on them again. + if !err.is::() { + error!(gitlab.output = true, "Error running command: {:?}", err); + } + + self.script_failed = true; + return Err(()); + } + } + + Ok(()) + } + + async fn get_uploadable_files( + &mut self, + ) -> Result + Send>, ()> { + let mut files = vec![]; + for (path, file) in &mut self.artifacts { + match file.try_clone().await { + Ok(file) => files.push(UploadableArtifact { + path: path.clone(), + file, + }), + Err(err) => error!( + gitlab.output = true, + "Failed to prepare to upload {path}: {err:?}" + ), + } + } + + Ok(Box::new(files.into_iter())) + } +} + +#[instrument(skip(dep), fields(dep_id = dep.id(), dep_name = dep.name()))] +async fn check_for_artifact( + dep: Dependency<'_>, + path: &Utf8Path, +) -> Result> { + // This needs to be an owned type, because captured by spawn_blocking must + // have a 'static lifetime, so we also take the opportunity to normalize the + // path from extra trailing slashes and similar. + let path = path.components().collect::(); + + // TODO: not spawn a sync environment for *every single artifact* + if let Some(mut artifact) = dep.download().await? { + if let Some(file) = tokio::task::spawn_blocking(move || { + artifact + .file(path.as_str()) + .map(|mut file| { + let mut temp = tempfile::tempfile()?; + std::io::copy(&mut file, &mut temp)?; + temp.rewind()?; + Ok::<_, Report>(temp) + }) + .transpose() + }) + .await?? + { + return Ok(Some( + ArtifactReader::from_async_file(&AsyncFile::from_std(file)).await?, + )); + } + } + + Ok(None) +} + +#[cfg(test)] +mod tests { + use std::{ + io::{Cursor, Read}, + marker::PhantomData, + sync::{Arc, Once}, + time::Duration, + }; + + use claims::*; + use gitlab_runner::{GitlabLayer, Runner, RunnerBuilder}; + use gitlab_runner_mock::*; + use obo_core::build_meta::{EnabledRepo, RepoArch}; + use obo_test_support::*; + use obo_tests::*; + use rstest::rstest; + use tempfile::TempDir; + use tracing::{Level, instrument::WithSubscriber}; + use tracing_subscriber::{Layer, Registry, filter::Targets, prelude::*}; + use zip::ZipArchive; + + use crate::logging::GitLabForwarder; + + use super::*; + + struct PutArtifactsHandler { + artifacts: Arc>>, + } + + struct PutArtifactsFile { + artifacts: Arc>>, + path: String, + } + + #[async_trait] + impl UploadableFile for PutArtifactsFile { + type Data<'a> = Compat>>; + + fn get_path(&self) -> Cow<'_, str> { + Cow::Borrowed(&self.path) + } + + async fn get_data(&self) -> Result, ()> { + Ok(Cursor::new(self.artifacts.get(&self.path).unwrap()).compat()) + } + } + + #[async_trait] + impl JobHandler for PutArtifactsHandler { + async fn step(&mut self, _script: &[String], _phase: Phase) -> JobResult { + Ok(()) + } + + async fn get_uploadable_files( + &mut self, + ) -> Result + Send>, ()> { + Ok(Box::new( + self.artifacts + .keys() + .map(|k| PutArtifactsFile { + artifacts: self.artifacts.clone(), + path: k.to_owned(), + }) + .collect::>() + .into_iter(), + )) + } + } + + #[derive(Clone, Debug)] + struct GitLabArtifactsHandle(MockJob); + + impl ArtifactsHandle for GitLabArtifactsHandle {} + + #[derive(Clone, Debug)] + struct GitLabExecutionResult(MockJob); + + impl ExecutionResult for GitLabExecutionResult { + type Artifacts = GitLabArtifactsHandle; + + fn ok(&self) -> bool { + self.0.state() == MockJobState::Success + } + + fn log(&self) -> String { + String::from_utf8_lossy(&self.0.log()).into_owned() + } + + fn artifacts(&self) -> Self::Artifacts { + GitLabArtifactsHandle(self.0.clone()) + } + } + + // All the GitLabRunHandlerWrapper* boilerplate is basically a trick to be + // able to type-erase the generics of Runner::request_job(), so that you can + // pass a job handler to the RunBuilder without having to then propagate the + // handler's type everywhere. Ideally we could just use a lambda, but that + // causes a variety of lifetime issues due to the various constraints around + // async function types. + #[async_trait] + trait GitLabRunHandlerWrapper: Send + Sync { + async fn request_job(&mut self, runner: &mut Runner); + } + + struct GitLabRunHandlerWrapperImpl< + U: UploadableFile + Send + Sync + 'static, + H: JobHandler + Send + Sync + 'static, + Func: (FnOnce(Job) -> H) + Send + Sync + 'static, + > { + func: Option, + _phantom: PhantomData<(U, H)>, + } + + #[async_trait] + impl< + U: UploadableFile + Send + Sync + 'static, + H: JobHandler + Send + Sync + 'static, + Func: (FnOnce(Job) -> H) + Send + Sync + 'static, + > GitLabRunHandlerWrapper for GitLabRunHandlerWrapperImpl + { + async fn request_job(&mut self, runner: &mut Runner) { + let handler = self + .func + .take() + .expect("request_job can only be called once"); + let got_job = runner + .request_job(move |job| futures_util::future::ready(Ok(handler(job)))) + .await + .unwrap(); + assert!(got_job); + } + } + + struct GitLabRunBuilder<'context> { + context: &'context mut GitLabTestContext, + script: Vec, + after_script: Vec, + dependencies: Vec, + variables: HashMap, + handler: Box, + timeout: Duration, + } + + fn create_obs_job_handler_factory( + options: HandlerOptions, + ) -> impl FnOnce(Job) -> ObsJobHandler { + move |job| assert_ok!(ObsJobHandler::from_obs_config_in_job(job, options)) + } + + impl GitLabRunBuilder<'_> { + fn after_command(self, cmd: impl Into) -> Self { + self.after_script(&[cmd.into()]) + } + + fn after_script(mut self, script: &[String]) -> Self { + self.after_script.extend_from_slice(script); + self + } + + fn variable(mut self, name: impl Into, value: impl Into) -> Self { + self.variables.insert(name.into(), value.into()); + self + } + + fn job_handler_factory< + U: UploadableFile + Send + Sync + 'static, + H: JobHandler + Send + Sync + 'static, + Func: (FnOnce(Job) -> H) + Send + Sync + 'static, + >( + mut self, + handler_func: Func, + ) -> Self { + self.handler = Box::new(GitLabRunHandlerWrapperImpl { + func: Some(handler_func), + _phantom: PhantomData, + }); + self + } + + fn obs_job_handler(self, options: HandlerOptions) -> Self { + self.job_handler_factory(create_obs_job_handler_factory(options)) + } + } + + #[async_trait] + impl<'context> RunBuilder<'context> for GitLabRunBuilder<'context> { + type ArtifactsHandle = GitLabArtifactsHandle; + type ExecutionResult = GitLabExecutionResult; + + fn script(mut self, script: &[String]) -> Self { + self.script.extend_from_slice(script); + self + } + + fn artifacts(mut self, artifacts: Self::ArtifactsHandle) -> Self { + self.dependencies.push(artifacts.0); + self + } + + fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + async fn go(mut self) -> Self::ExecutionResult { + const JOB_TIMEOUT: u64 = 3600; + + let mut builder = self.context.gitlab_mock.job_builder("test".to_owned()); + + builder.add_step( + MockJobStepName::Script, + self.script, + JOB_TIMEOUT, + MockJobStepWhen::OnSuccess, + false, + ); + + if !self.after_script.is_empty() { + builder.add_step( + MockJobStepName::AfterScript, + self.after_script, + JOB_TIMEOUT, + MockJobStepWhen::OnSuccess, + false, + ); + } + + builder.add_artifact( + None, + false, + vec!["*".to_owned()], + Some(MockJobArtifactWhen::Always), + "archive".to_owned(), + Some("zip".to_owned()), + None, + ); + + for dependency in self.dependencies { + builder.dependency(dependency); + } + for (key, value) in self.variables { + builder.add_variable(key, value, true, false); + } + + builder.add_variable( + "OBS_SERVER".to_owned(), + self.context.obs.client.url().to_string(), + false, + true, + ); + builder.add_variable("OBS_USER".to_owned(), TEST_USER.to_owned(), false, true); + builder.add_variable("OBS_PASSWORD".to_owned(), TEST_PASS.to_owned(), false, true); + + let job = builder.build(); + self.context.gitlab_mock.enqueue_job(job.clone()); + + self.handler.request_job(&mut self.context.runner).await; + self.context.runner.wait_for_space(1).await; + + GitLabExecutionResult(job) + } + } + + const DEFAULT_HANDLER_OPTIONS: HandlerOptions = HandlerOptions { + default_monitor_job_timeout: None, + log_tail: MONITOR_TEST_LOG_TAIL, + monitor: PackageMonitoringOptions { + sleep_on_building: Duration::ZERO, + sleep_on_old_status: MONITOR_TEST_OLD_STATUS_SLEEP_DURATION, + // High limit, since we don't really test that + // functionality in the handler tests. + max_old_status_retries: 99, + }, + }; + + struct GitLabTestContext { + _runner_dir: TempDir, + gitlab_mock: GitlabRunnerMock, + runner: Runner, + obs: ObsContext, + } + + #[async_trait] + impl TestContext for GitLabTestContext { + type ArtifactsHandle = GitLabArtifactsHandle; + type ExecutionResult = GitLabExecutionResult; + type RunBuilder<'context> = GitLabRunBuilder<'context>; + + fn obs(&self) -> &ObsContext { + &self.obs + } + + async fn inject_artifacts( + &mut self, + artifacts: HashMap>, + ) -> Self::ArtifactsHandle { + self.run() + .job_handler_factory(|_| PutArtifactsHandler { + artifacts: Arc::new(artifacts), + }) + .command("dummy") + .go() + .await + .artifacts() + } + + async fn fetch_artifacts( + &self, + handle: &Self::ArtifactsHandle, + ) -> HashMap> { + let Some(artifact) = handle.0.uploaded_artifacts().next() else { + return Default::default(); + }; + + let data = (*artifact.data).clone(); + assert!(!data.is_empty()); + + let cursor = Cursor::new(data); + let mut zip = ZipArchive::new(cursor).unwrap(); + + (0..zip.len()) + .map(|i| { + let mut file = zip.by_index(i).unwrap(); + + let mut contents = vec![]; + file.read_to_end(&mut contents).unwrap(); + + (file.name().to_owned(), contents) + }) + .collect() + } + + fn run(&mut self) -> Self::RunBuilder<'_> { + GitLabRunBuilder { + context: self, + script: vec![], + after_script: vec![], + dependencies: vec![], + variables: Default::default(), + handler: Box::new(GitLabRunHandlerWrapperImpl { + func: Some(create_obs_job_handler_factory(DEFAULT_HANDLER_OPTIONS)), + _phantom: PhantomData, + }), + timeout: EXECUTION_DEFAULT_TIMEOUT, + } + } + } + + static COLOR_EYRE_INSTALL: Once = Once::new(); + + async fn with_context(func: impl AsyncFnOnce(GitLabTestContext) -> T) -> T { + COLOR_EYRE_INSTALL.call_once(|| color_eyre::install().unwrap()); + + let runner_dir = tempfile::tempdir().unwrap(); + let gitlab_mock = GitlabRunnerMock::start().await; + let (layer, jobs) = GitlabLayer::new(); + let runner = RunnerBuilder::new( + gitlab_mock.uri(), + gitlab_mock.runner_token().to_owned(), + runner_dir.path().to_owned(), + jobs, + ) + .build() + .await; + + let obs_mock = create_default_mock().await; + let obs_client = create_default_client(&obs_mock); + + let ctx = GitLabTestContext { + _runner_dir: runner_dir, + gitlab_mock, + runner, + obs: ObsContext { + client: obs_client, + mock: obs_mock, + }, + }; + + func(ctx) + .with_subscriber( + Registry::default() + .with( + tracing_subscriber::fmt::layer() + .with_test_writer() + .with_filter( + Targets::new().with_target("obs_gitlab_runner", Level::TRACE), + ), + ) + .with(tracing_error::ErrorLayer::default()) + .with(GitLabForwarder::new(layer)), + ) + .await + } + + async fn test_monitoring_with_generation( + context: &mut GitLabTestContext, + dput: GitLabArtifactsHandle, + build_info: &ObsBuildInfo, + success: bool, + dput_test: DputTest, + log_test: MonitorLogTest, + download_binaries: bool, + ) { + const TEST_JOB_RUNNER_TAG: &str = "test-tag"; + const TEST_MONITOR_TIMEOUT: &str = "1 day"; + + let mut generate_command = format!( + "generate-monitor {TEST_JOB_RUNNER_TAG} \ + --job-timeout '{TEST_MONITOR_TIMEOUT}' \ + --rules '[{{a: 1}}, {{b: 2}}]'" + ); + if download_binaries { + generate_command += + &format!(" --download-build-results-to {MONITOR_TEST_BUILD_RESULTS_DIR}"); + } + + let generate = context + .run() + .command(generate_command) + .artifacts(dput.clone()) + .go() + .await; + assert!(generate.ok()); + + let results = context.fetch_artifacts(&generate.artifacts()).await; + let pipeline_yaml: serde_yaml::Value = assert_ok!(serde_yaml::from_slice( + results.get(DEFAULT_MONITOR_PIPELINE).unwrap() + )); + let pipeline_map = pipeline_yaml.as_mapping().unwrap(); + + assert_eq!(pipeline_map.len(), build_info.enabled_repos.len()); + + for enabled in &build_info.enabled_repos { + let monitor_job_name = format!( + "{}-{}-{}", + DEFAULT_PIPELINE_JOB_PREFIX, TEST_REPO, &enabled.repo_arch.arch + ); + + let monitor_map = pipeline_yaml + .as_mapping() + .unwrap() + .get(monitor_job_name.as_str()) + .unwrap() + .as_mapping() + .unwrap(); + + let artifacts = monitor_map.get("artifacts").unwrap().as_mapping().unwrap(); + assert_eq!( + artifacts.get("expire_in").unwrap().as_str().unwrap(), + DEFAULT_ARTIFACT_EXPIRATION + ); + + let mut artifact_paths: Vec<_> = artifacts + .get("paths") + .unwrap() + .as_sequence() + .unwrap() + .iter() + .map(|item| item.as_str().unwrap()) + .collect(); + artifact_paths.sort(); + + if download_binaries { + assert_eq!( + &artifact_paths, + &[DEFAULT_BUILD_LOG, MONITOR_TEST_BUILD_RESULTS_DIR] + ); + } else { + assert_eq!(&artifact_paths, &[DEFAULT_BUILD_LOG]); + } + + let tags = monitor_map.get("tags").unwrap().as_sequence().unwrap(); + assert_eq!(tags.len(), 1); + assert_eq!(tags[0].as_str().unwrap(), TEST_JOB_RUNNER_TAG); + + let timeout = monitor_map.get("timeout").unwrap().as_str().unwrap(); + assert_eq!(timeout, TEST_MONITOR_TIMEOUT); + + let rules: Vec<_> = monitor_map + .get("rules") + .unwrap() + .as_sequence() + .unwrap() + .iter() + .map(|v| v.as_mapping().unwrap()) + .collect(); + assert_eq!(rules.len(), 2); + + assert_eq!(rules[0].get("a").unwrap().as_i64().unwrap(), 1); + assert_eq!(rules[1].get("b").unwrap().as_i64().unwrap(), 2); + + for script_key in ["before_script", "after_script"] { + let script = monitor_map.get(script_key).unwrap().as_sequence().unwrap(); + assert_eq!(script.len(), 0); + } + + let script = monitor_map + .get("script") + .unwrap() + .as_sequence() + .unwrap() + .iter() + .map(|v| v.as_str().unwrap().to_owned()) + .collect::>(); + + test_monitoring( + context, + dput.clone(), + build_info, + &enabled.repo_arch, + &script, + success, + dput_test, + log_test, + download_binaries, + ) + .await; + } + } + + async fn test_prune( + context: &mut GitLabTestContext, + dput: GitLabArtifactsHandle, + build_info: &ObsBuildInfo, + only_if_job_unsuccessful: bool, + ) { + test_prune_missing_build_info(context).await; + + let prune = if only_if_job_unsuccessful { + let prune = context + .run() + .command("echo") + .after_command("prune --only-if-job-unsuccessful") + .artifacts(dput.clone()) + .go() + .await; + + assert!(prune.ok()); + assert!(prune.log().contains("Skipping prune")); + + assert_ok!( + context + .obs() + .client + .project(build_info.project.clone()) + .package(TEST_PACKAGE_1.to_owned()) + .list(None) + .await + ); + + context + .run() + .command("echo --fail") + .after_command("prune --only-if-job-unsuccessful") + .artifacts(dput.clone()) + .go() + .await + } else { + context + .run() + .command("prune") + .artifacts(dput.clone()) + .go() + .await + }; + + test_prune_deleted_package_1_if_branched(context, build_info, &prune).await; + } + + #[rstest] + #[tokio::test] + async fn test_handler_flow( + #[values( + DputTest::Basic, + DputTest::Rebuild, + DputTest::ReusePreviousBuild, + DputTest::Branch + )] + dput_test: DputTest, + #[values(true, false)] build_success: bool, + #[values( + MonitorLogTest::Long, + MonitorLogTest::Short, + MonitorLogTest::Unavailable + )] + log_test: MonitorLogTest, + #[values(true, false)] download_binaries: bool, + #[values(true, false)] prune_only_if_job_unsuccessful: bool, + ) { + with_context(async |mut context| { + let (dput, build_info) = test_dput(&mut context, dput_test).await; + + test_monitoring_with_generation( + &mut context, + dput.clone(), + &build_info, + build_success, + dput_test, + log_test, + download_binaries, + ) + .await; + + test_prune( + &mut context, + dput.clone(), + &build_info, + prune_only_if_job_unsuccessful, + ) + .await; + }) + .await; + } + + #[rstest] + #[tokio::test] + async fn test_variable_expansion() { + with_context(async |mut context| { + let expansion = context + .run() + .variable("ESCAPED", "this should not appear") + .variable("QUOTED", "spaces should be preserved") + .variable("RECURSIVE", "recursion($RECURSIVE)") + .command("echo --sep ; $MISSING $$ESCAPED $QUOTED $RECURSIVE") + .go() + .await; + assert!(expansion.ok()); + + assert_eq!( + expansion.log().lines().last().unwrap(), + ";$ESCAPED;spaces should be preserved;recursion()" + ); + }) + .await; + } + + #[rstest] + #[tokio::test] + async fn test_flag_parsing() { + with_context(async |mut context| { + let echo = context.run().command("echo --uppercase false").go().await; + assert!(echo.ok()); + + assert_eq!(echo.log().lines().last().unwrap(), "FALSE"); + + let echo = context + .run() + .command("echo --uppercase=false true") + .go() + .await; + assert!(echo.ok()); + + assert_eq!(echo.log().lines().last().unwrap(), "true"); + + let echo = context + .run() + .command("echo --uppercase=true false") + .go() + .await; + assert!(echo.ok()); + + assert_eq!(echo.log().lines().last().unwrap(), "FALSE"); + + let echo = context.run().command("echo --uppercase=X false").go().await; + assert!(!echo.ok()); + }) + .await; + } + + #[derive(Debug, PartialEq, Eq)] + enum GenerateMonitorTimeoutLocation { + HandlerOption, + Argument, + } + + #[rstest] + #[tokio::test] + async fn test_generate_monitor_timeouts( + #[values( + None, + Some(GenerateMonitorTimeoutLocation::HandlerOption), + Some(GenerateMonitorTimeoutLocation::Argument) + )] + test: Option, + ) { + const TEST_MONITOR_TIMEOUT: &str = "10 minutes"; + + with_context(async |mut context| { + let build_info = ObsBuildInfo { + project: TEST_PROJECT.to_owned(), + package: TEST_PACKAGE_1.to_owned(), + rev: Some("1".to_owned()), + srcmd5: Some("abc".to_owned()), + is_branched: false, + enabled_repos: vec![EnabledRepo { + repo_arch: RepoArch { + repo: TEST_REPO.to_owned(), + arch: TEST_ARCH_1.to_owned(), + }, + + prev_endtime_for_commit: None, + }], + }; + + let build_info = context + .inject_artifacts( + [( + DEFAULT_BUILD_INFO.to_owned(), + serde_json::to_string(&build_info).unwrap().into_bytes(), + )] + .into(), + ) + .await; + + let generate_builder = if test == Some(GenerateMonitorTimeoutLocation::Argument) { + context.run().command(format!( + "generate-monitor tag --job-timeout '{TEST_MONITOR_TIMEOUT}'" + )) + } else { + context.run().command("generate-monitor tag") + } + .artifacts(build_info); + + let generate = if test == Some(GenerateMonitorTimeoutLocation::HandlerOption) { + generate_builder + .obs_job_handler(HandlerOptions { + default_monitor_job_timeout: Some(TEST_MONITOR_TIMEOUT.to_owned()), + ..DEFAULT_HANDLER_OPTIONS + }) + .go() + .await + } else { + generate_builder.go().await + }; + + assert!(generate.ok()); + + let results = context.fetch_artifacts(&generate.artifacts()).await; + let pipeline_yaml: serde_yaml::Value = assert_ok!(serde_yaml::from_slice( + results.get(DEFAULT_MONITOR_PIPELINE).unwrap() + )); + let pipeline_map = pipeline_yaml.as_mapping().unwrap(); + + let monitor_map = pipeline_map + .into_iter() + .next() + .unwrap() + .1 + .as_mapping() + .unwrap(); + + let timeout_yaml = monitor_map.get("timeout"); + if test.is_some() { + assert_eq!( + timeout_yaml.unwrap().as_str().unwrap(), + TEST_MONITOR_TIMEOUT + ); + } else { + assert_none!(timeout_yaml); + } + }) + .await; + } +} diff --git a/src/logging.rs b/obs-gitlab-runner/src/logging.rs similarity index 77% rename from src/logging.rs rename to obs-gitlab-runner/src/logging.rs index 138c9ab..41e11b9 100644 --- a/src/logging.rs +++ b/obs-gitlab-runner/src/logging.rs @@ -1,7 +1,8 @@ use gitlab_runner::GitlabLayer; +use obo_core::logging::{get_event_message, is_output_field_set_in_event}; use tracing::{ Event, Level, Metadata, Subscriber, - field::{self, Field, FieldSet}, + field::FieldSet, span::{Attributes, Id}, subscriber::Interest, }; @@ -12,37 +13,9 @@ use tracing_subscriber::{ registry::LookupSpan, }; -struct OutputTester(bool); - -impl field::Visit for OutputTester { - fn record_bool(&mut self, field: &field::Field, value: bool) { - if field.name() == "obs_gitlab_runner.output" { - self.0 = value - } - } - - fn record_debug(&mut self, _field: &field::Field, _value: &dyn std::fmt::Debug) {} -} - -struct MessageExtractor(Option); - -impl field::Visit for MessageExtractor { - fn record_str(&mut self, field: &Field, value: &str) { - if field.name() == "message" { - self.0 = Some(value.to_owned()); - } - } - - fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { - if field.name() == "message" { - self.0 = Some(format!("{value:?}")); - } - } -} - // This mostly wraps a standard GitlabLayer, but it bypasses the filter to pass -// through any events with `obs_gitlab_runner.output` set, rewriting them to -// instead use `gitlab.output`. +// through any events with TRACING_FIELD set set, rewriting them to instead use +// `gitlab.output`. pub struct GitLabForwarder>(Filtered); impl LookupSpan<'span>, F: Filter + 'static> @@ -50,6 +23,7 @@ impl LookupSpan<'span>, F: Fi { pub fn new(inner: Filtered) -> Filtered { GitLabForwarder(inner).with_filter(Targets::new().with_targets([ + ("obo_core", Level::TRACE), ("obs_gitlab_runner", Level::TRACE), // This target is used to inject the current job ID, which // gitlab-runner needs to actually send the logs out. @@ -99,17 +73,13 @@ impl LookupSpan<'span>, F: Fi } fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { - let mut visitor = OutputTester(false); - event.record(&mut visitor); - if !visitor.0 { + if !is_output_field_set_in_event(event) { // No special behavior needed, so just forward it as-is. self.0.on_event(event, ctx); return; } - let mut visitor = MessageExtractor(None); - event.record(&mut visitor); - let Some(message) = visitor.0 else { + let Some(message) = get_event_message(event) else { return; }; @@ -157,10 +127,3 @@ impl LookupSpan<'span>, F: Fi self.0.on_id_change(old, new, ctx); } } - -#[macro_export] -macro_rules! outputln { - ($($args:tt)*) => { - ::tracing::trace!(obs_gitlab_runner.output = true, $($args)*) - }; -} diff --git a/src/main.rs b/obs-gitlab-runner/src/main.rs similarity index 96% rename from src/main.rs rename to obs-gitlab-runner/src/main.rs index 5e03b55..60a79db 100644 --- a/src/main.rs +++ b/obs-gitlab-runner/src/main.rs @@ -18,21 +18,9 @@ use url::Url; use crate::handler::{HandlerOptions, ObsJobHandler}; -mod actions; -mod artifacts; -mod binaries; -mod build_meta; -mod dsc; mod handler; mod logging; -mod monitor; mod pipeline; -mod prune; -mod retry; -mod upload; - -#[cfg(test)] -mod test_support; #[derive(Debug, Clone)] struct TargetsArg { diff --git a/src/pipeline.rs b/obs-gitlab-runner/src/pipeline.rs similarity index 66% rename from src/pipeline.rs rename to obs-gitlab-runner/src/pipeline.rs index 6936603..a0295fa 100644 --- a/src/pipeline.rs +++ b/obs-gitlab-runner/src/pipeline.rs @@ -1,11 +1,13 @@ use std::collections::HashMap; use color_eyre::eyre::{Context, Result}; +use obo_core::{ + actions::{DownloadBinariesAction, MonitorAction}, + build_meta::{EnabledRepo, RepoArch}, +}; use serde::Serialize; use tracing::instrument; -use crate::build_meta::{CommitBuildInfo, RepoArch}; - #[derive(Clone, Debug, PartialEq, Eq)] pub enum PipelineDownloadBinaries { OnSuccess { build_results_dir: String }, @@ -43,23 +45,13 @@ struct JobSpec { rules: Option, } -fn generate_command(command_name: String, args: &[(&str, String)]) -> String { - let mut command = vec![command_name]; - - for (arg, value) in args { - command.extend_from_slice(&[format!("--{arg}"), shell_words::quote(value).into_owned()]); - } - - command.join(" ") -} - #[instrument] pub fn generate_monitor_pipeline( project: &str, package: &str, rev: &str, srcmd5: &str, - enabled_repos: &HashMap, + enabled_repos: &[EnabledRepo], options: GeneratePipelineOptions, ) -> Result { let rules: Option = options @@ -70,38 +62,40 @@ pub fn generate_monitor_pipeline( .wrap_err("Failed to parse provided rules")?; let mut jobs = HashMap::new(); - for (RepoArch { repo, arch }, info) in enabled_repos { + for enabled in enabled_repos { + let RepoArch { repo, arch } = &enabled.repo_arch; + let mut script = vec![]; let mut artifact_paths = vec![]; - let common_args = vec![ - ("project", project.to_owned()), - ("package", package.to_owned()), - ("repository", repo.to_owned()), - ("arch", arch.to_owned()), - ]; - - let mut monitor_args = vec![ - ("rev", rev.to_owned()), - ("srcmd5", srcmd5.to_owned()), - ("build-log-out", options.build_log_out.clone()), - ]; - if let Some(endtime) = &info.prev_endtime_for_commit { - monitor_args.push(("prev-endtime-for-commit", endtime.to_string())); - } - monitor_args.extend_from_slice(&common_args); - script.push(generate_command("monitor".to_owned(), &monitor_args)); + script.push( + MonitorAction { + project: project.to_owned(), + package: package.to_owned(), + repository: repo.to_owned(), + arch: arch.to_owned(), + rev: rev.to_owned(), + srcmd5: srcmd5.to_owned(), + build_log_out: options.build_log_out.clone(), + prev_endtime_for_commit: enabled.prev_endtime_for_commit, + } + .generate_command(), + ); artifact_paths.push(options.build_log_out.clone()); if let PipelineDownloadBinaries::OnSuccess { build_results_dir } = &options.download_binaries { - let mut download_args = vec![("build-results-dir", build_results_dir.clone())]; - download_args.extend_from_slice(&common_args); - script.push(generate_command( - "download-binaries".to_owned(), - &download_args, - )); + script.push( + DownloadBinariesAction { + project: project.to_owned(), + package: package.to_owned(), + repository: repo.to_owned(), + arch: arch.to_owned(), + build_results_dir: build_results_dir.into(), + } + .generate_command(), + ); artifact_paths.push(build_results_dir.clone()); } diff --git a/src/handler.rs b/src/handler.rs deleted file mode 100644 index 6b761e6..0000000 --- a/src/handler.rs +++ /dev/null @@ -1,1661 +0,0 @@ -use std::{ - borrow::Cow, - collections::{HashMap, HashSet}, - io::Seek, -}; - -use async_trait::async_trait; -use camino::{Utf8Path, Utf8PathBuf}; -use clap::{Parser, Subcommand}; -use color_eyre::eyre::{Context, Report, Result, eyre}; -use derivative::*; -use futures_util::StreamExt; -use gitlab_runner::{ - JobHandler, JobResult, Phase, UploadableFile, - job::{Dependency, Job, Variable}, -}; -use open_build_service_api as obs; -use tokio::fs::File as AsyncFile; -use tokio_util::{ - compat::{Compat, TokioAsyncReadCompatExt}, - io::ReaderStream, -}; -use tracing::{error, instrument, warn}; - -use crate::{ - actions::{ - Actions, DEFAULT_BUILD_INFO, DEFAULT_BUILD_LOG, DownloadBinariesAction, DputAction, - FailedBuild, FlagSupportingExplicitValue, LOG_TAIL_2MB, MonitorAction, ObsBuildInfo, - PruneAction, - }, - artifacts::{ArtifactDirectory, ArtifactReader, ArtifactWriter, MissingArtifact, SaveCallback}, - monitor::PackageMonitoringOptions, - outputln, - pipeline::{GeneratePipelineOptions, PipelineDownloadBinaries, generate_monitor_pipeline}, -}; - -const DEFAULT_MONITOR_PIPELINE: &str = "obs.yml"; -const DEFAULT_PIPELINE_JOB_PREFIX: &str = "obs"; -const DEFAULT_ARTIFACT_EXPIRATION: &str = "3 days"; - -#[derive(Parser, Debug)] -struct GenerateMonitorAction { - tag: String, - #[clap(long)] - rules: Option, - #[clap(long = "download-build-results-to")] - build_results_dir: Option, - #[clap(long, default_value_t = DEFAULT_BUILD_INFO.to_owned())] - build_info: String, - #[clap(long, default_value_t = DEFAULT_MONITOR_PIPELINE.to_owned())] - pipeline_out: String, - #[clap(long, default_value_t = DEFAULT_PIPELINE_JOB_PREFIX.to_owned())] - job_prefix: String, - #[clap(long)] - job_timeout: Option, - #[clap(long, default_value_t = DEFAULT_ARTIFACT_EXPIRATION.to_owned())] - artifact_expiration: String, - #[clap(long, default_value_t = DEFAULT_BUILD_LOG.into())] - build_log_out: String, -} - -#[derive(Parser, Debug)] -struct EchoAction { - args: Vec, - #[clap(long, flag_supporting_explicit_value())] - fail: bool, - #[clap(long, flag_supporting_explicit_value())] - uppercase: bool, - #[clap(long, default_value = " ")] - sep: String, -} - -#[derive(Subcommand)] -enum CommonAction { - Dput(DputAction), - Monitor(MonitorAction), - DownloadBinaries(DownloadBinariesAction), - Prune { - #[clap(long, flag_supporting_explicit_value())] - only_if_job_unsuccessful: bool, - #[clap(flatten)] - args: PruneAction, - }, -} - -#[derive(Subcommand)] -enum JobAction { - #[clap(flatten)] - Common(CommonAction), - GenerateMonitor(GenerateMonitorAction), - #[cfg(test)] - Echo(EchoAction), -} - -#[derive(Parser)] -#[clap(bin_name = "obs-gitlab-runner")] -#[clap(no_binary_name = true)] -struct Command { - #[clap(subcommand)] - action: JobAction, -} - -fn get_job_variable<'job>(job: &'job Job, key: &str) -> Result> { - job.variable(key) - .ok_or_else(|| eyre!("Failed to get variable ${}", key)) -} - -#[derive(Debug, Derivative)] -#[derivative(Default)] -pub struct HandlerOptions { - pub monitor: PackageMonitoringOptions, - pub default_monitor_job_timeout: Option, - #[derivative(Default(value = "LOG_TAIL_2MB"))] - pub log_tail: u64, -} - -struct GitLabArtifacts<'a> { - job: &'a Job, - artifacts: &'a mut HashMap, -} - -#[async_trait] -impl ArtifactDirectory for GitLabArtifacts<'_> { - #[instrument(skip(self, path), path = path.as_ref())] - async fn open(&self, path: impl AsRef + Send) -> Result { - let path = path.as_ref(); - - if let Some(file) = self.artifacts.get(path) { - let file = file - .try_clone() - .await - .wrap_err("Failed to reopen artifact")?; - return Ok(file); - } - - for dep in self.job.dependencies() { - if let Some(file) = check_for_artifact(dep, path).await? { - return Ok(file); - } - } - - Err(MissingArtifact(path.to_owned()).into()) - } - - #[tracing::instrument(skip(self, path, func), path = path.as_ref())] - async fn save_with(&mut self, path: P, func: F) -> Result - where - Report: From, - Ret: Send, - Err: Send, - F: for<'a> SaveCallback<'a, Ret, Err> + Send, - P: AsRef + Send, - { - let mut writer = ArtifactWriter::new().await?; - let ret = func(&mut writer).await?; - self.artifacts - .insert(path.as_ref().to_owned(), writer.into_reader().await?); - Ok(ret) - } -} - -pub struct ObsJobHandler { - job: Job, - options: HandlerOptions, - - actions: Actions, - script_failed: bool, - artifacts: HashMap, -} - -impl ObsJobHandler { - pub fn new(job: Job, client: obs::Client, options: HandlerOptions) -> Self { - ObsJobHandler { - job, - options, - actions: Actions { client }, - script_failed: false, - artifacts: HashMap::new(), - } - } - - #[instrument(skip_all, fields(job = job.id()))] - pub fn from_obs_config_in_job(job: Job, options: HandlerOptions) -> Result { - let obs_server = get_job_variable(&job, "OBS_SERVER")?; - let obs_user = get_job_variable(&job, "OBS_USER")?; - let obs_password = get_job_variable(&job, "OBS_PASSWORD")?; - - let client = obs::Client::new( - obs_server.value().try_into().wrap_err("Invalid URL")?, - obs_user.value().to_owned(), - obs_password.value().to_owned(), - ); - Ok(ObsJobHandler::new(job, client, options)) - } - - fn expand_vars<'s>( - &self, - s: &'s str, - quote: bool, - expanding: &mut HashSet, - ) -> Cow<'s, str> { - shellexpand::env_with_context_no_errors(s, |var| { - if !expanding.insert(var.to_owned()) { - return Some("".to_string()); - } - - let value = self.job.variable(var).map_or("", |v| v.value()); - let expanded = self.expand_vars(value, false, expanding); - expanding.remove(var); - Some( - if quote { - shell_words::quote(expanded.as_ref()) - } else { - expanded - } - .into_owned(), - ) - }) - } - - #[instrument(skip(self))] - async fn generate_monitor(&mut self, args: GenerateMonitorAction) -> Result<()> { - let mut artifacts = GitLabArtifacts { - job: &self.job, - artifacts: &mut self.artifacts, - }; - - let build_info_data = artifacts.read_string(&args.build_info).await?; - let build_info: ObsBuildInfo = serde_yaml::from_str(&build_info_data) - .wrap_err("Failed to parse provided build info file")?; - - let pipeline = generate_monitor_pipeline( - &build_info.project, - &build_info.package, - &build_info - .rev - .ok_or_else(|| eyre!("Build revision was not set"))?, - &build_info - .srcmd5 - .ok_or_else(|| eyre!("Build srcmd5 was not set"))?, - &build_info.enabled_repos, - GeneratePipelineOptions { - tags: vec![args.tag], - artifact_expiration: args.artifact_expiration, - prefix: args.job_prefix, - timeout: args - .job_timeout - .or_else(|| self.options.default_monitor_job_timeout.clone()), - rules: args.rules, - download_binaries: if let Some(build_results_dir) = args.build_results_dir { - PipelineDownloadBinaries::OnSuccess { - build_results_dir: build_results_dir.into_string(), - } - } else { - PipelineDownloadBinaries::Never - }, - build_log_out: args.build_log_out.to_string(), - }, - )?; - - artifacts - .write(&args.pipeline_out, pipeline.as_bytes()) - .await?; - outputln!("Wrote pipeline file '{}'.", args.pipeline_out); - - Ok(()) - } - - #[instrument(skip(self))] - async fn command(&mut self, cmdline: &str) -> Result<()> { - // TODO: inject user? - let cmdline = self.expand_vars(cmdline, true, &mut HashSet::new()); - - outputln!("> {}", cmdline); - - let args = shell_words::split(&cmdline).wrap_err("Invalid command line")?; - let command = Command::try_parse_from(args)?; - - match command.action { - JobAction::Common(action) => { - let mut artifacts = GitLabArtifacts { - job: &self.job, - artifacts: &mut self.artifacts, - }; - - match action { - CommonAction::Dput(args) => self.actions.dput(args, &mut artifacts).await?, - CommonAction::Monitor(args) => { - self.actions - .monitor( - args, - self.options.monitor.clone(), - async |file| { - let mut log_stream = ReaderStream::new(file); - while let Some(bytes) = log_stream.next().await { - let bytes = bytes.wrap_err("Failed to stream log bytes")?; - self.job.trace(String::from_utf8_lossy(&bytes).as_ref()); - } - Ok(()) - }, - self.options.log_tail, - &mut artifacts, - ) - .await? - } - CommonAction::DownloadBinaries(args) => { - self.actions.download_binaries(args, &mut artifacts).await? - } - CommonAction::Prune { - only_if_job_unsuccessful: true, - .. - } if !self.script_failed => { - outputln!("Skipping prune: main script was successful.") - } - CommonAction::Prune { args, .. } => { - self.actions.prune(args, &artifacts).await? - } - } - } - JobAction::GenerateMonitor(args) => self.generate_monitor(args).await?, - #[cfg(test)] - JobAction::Echo(args) => { - use color_eyre::eyre::ensure; - - let mut output = args.args.join(&args.sep); - if args.uppercase { - output = output.to_uppercase(); - } - - outputln!("{}", output); - ensure!(!args.fail, "Failed"); - } - } - - Ok(()) - } -} - -pub struct UploadableArtifact { - path: Utf8PathBuf, - file: ArtifactReader, -} - -#[async_trait] -impl UploadableFile for UploadableArtifact { - type Data<'a> = Compat; - - fn get_path(&self) -> Cow<'_, str> { - Cow::Borrowed(self.path.as_str()) - } - - async fn get_data(&self) -> Result, ()> { - self.file - .try_clone() - .await - .map(TokioAsyncReadCompatExt::compat) - .map_err(|e| { - warn!("Failed to clone {}: {e}", self.path); - }) - } -} - -#[async_trait] -impl JobHandler for ObsJobHandler { - async fn step(&mut self, script: &[String], _phase: Phase) -> JobResult { - for command in script { - if let Err(err) = self.command(command).await { - // Failed builds would already have information on them printed - // above, so don't print anything on them again. - if !err.is::() { - error!(gitlab.output = true, "Error running command: {:?}", err); - } - - self.script_failed = true; - return Err(()); - } - } - - Ok(()) - } - - async fn get_uploadable_files( - &mut self, - ) -> Result + Send>, ()> { - let mut files = vec![]; - for (path, file) in &mut self.artifacts { - match file.try_clone().await { - Ok(file) => files.push(UploadableArtifact { - path: path.clone(), - file, - }), - Err(err) => error!( - gitlab.output = true, - "Failed to prepare to upload {path}: {err:?}" - ), - } - } - - Ok(Box::new(files.into_iter())) - } -} - -#[instrument(skip(dep), fields(dep_id = dep.id(), dep_name = dep.name()))] -async fn check_for_artifact( - dep: Dependency<'_>, - path: &Utf8Path, -) -> Result> { - // This needs to be an owned type, because captured by spawn_blocking must - // have a 'static lifetime, so we also take the opportunity to normalize the - // path from extra trailing slashes and similar. - let path = path.components().collect::(); - - // TODO: not spawn a sync environment for *every single artifact* - if let Some(mut artifact) = dep.download().await? { - if let Some(file) = tokio::task::spawn_blocking(move || { - artifact - .file(path.as_str()) - .map(|mut file| { - let mut temp = tempfile::tempfile()?; - std::io::copy(&mut file, &mut temp)?; - temp.rewind()?; - Ok::<_, Report>(temp) - }) - .transpose() - }) - .await?? - { - return Ok(Some( - ArtifactReader::from_async_file(&AsyncFile::from_std(file)).await?, - )); - } - } - - Ok(None) -} - -#[cfg(test)] -mod tests { - use std::{ - cmp::Ordering, - io::{Cursor, Read}, - sync::{Arc, Once}, - time::{Duration, SystemTime}, - }; - - use camino::Utf8Path; - use claims::*; - use gitlab_runner::{GitlabLayer, Runner, RunnerBuilder}; - use gitlab_runner_mock::*; - use open_build_service_mock::*; - use rstest::rstest; - use tempfile::TempDir; - use tracing::{Level, instrument::WithSubscriber}; - use tracing_subscriber::{Layer, Registry, filter::Targets, prelude::*}; - use zip::ZipArchive; - - use crate::{ - build_meta::RepoArch, logging::GitLabForwarder, test_support::*, upload::compute_md5, - }; - - use super::*; - - const JOB_TIMEOUT: u64 = 3600; - const TEST_LOG_TAIL: u64 = 50; - const OLD_STATUS_SLEEP_DURATION: Duration = Duration::from_millis(100); - - const DEFAULT_HANDLER_OPTIONS: HandlerOptions = HandlerOptions { - default_monitor_job_timeout: None, - log_tail: TEST_LOG_TAIL, - monitor: PackageMonitoringOptions { - sleep_on_building: Duration::ZERO, - sleep_on_old_status: OLD_STATUS_SLEEP_DURATION, - // High limit, since we don't really test that - // functionality in the handler tests. - max_old_status_retries: 99, - }, - }; - - static COLOR_EYRE_INSTALL: Once = Once::new(); - - struct TestContext { - _runner_dir: TempDir, - gitlab_mock: GitlabRunnerMock, - runner: Runner, - - obs_mock: ObsMock, - obs_client: obs::Client, - } - - async fn with_context(func: impl AsyncFnOnce(TestContext) -> T) -> T { - COLOR_EYRE_INSTALL.call_once(|| color_eyre::install().unwrap()); - - let runner_dir = tempfile::tempdir().unwrap(); - let gitlab_mock = GitlabRunnerMock::start().await; - let (layer, jobs) = GitlabLayer::new(); - let runner = RunnerBuilder::new( - gitlab_mock.uri(), - gitlab_mock.runner_token().to_owned(), - runner_dir.path().to_owned(), - jobs, - ) - .build() - .await; - - let obs_mock = create_default_mock().await; - let obs_client = create_default_client(&obs_mock); - - let ctx = TestContext { - _runner_dir: runner_dir, - gitlab_mock, - runner, - obs_mock, - obs_client, - }; - - func(ctx) - .with_subscriber( - Registry::default() - .with( - tracing_subscriber::fmt::layer() - .with_test_writer() - .with_filter( - Targets::new().with_target("obs_gitlab_runner", Level::TRACE), - ), - ) - .with(tracing_error::ErrorLayer::default()) - .with(GitLabForwarder::new(layer)), - ) - .await - } - - #[derive(Default)] - struct JobSpec { - name: String, - dependencies: Vec, - variables: HashMap, - script: Vec, - after_script: Vec, - } - - fn enqueue_job(context: &TestContext, spec: JobSpec) -> MockJob { - let mut builder = context.gitlab_mock.job_builder(spec.name); - - builder.add_step( - MockJobStepName::Script, - spec.script, - JOB_TIMEOUT, - MockJobStepWhen::OnSuccess, - false, - ); - - if !spec.after_script.is_empty() { - builder.add_step( - MockJobStepName::AfterScript, - spec.after_script, - JOB_TIMEOUT, - MockJobStepWhen::OnSuccess, - false, - ); - } - - builder.add_artifact( - None, - false, - vec!["*".to_owned()], - Some(MockJobArtifactWhen::Always), - "archive".to_owned(), - Some("zip".to_owned()), - None, - ); - - for dependency in spec.dependencies { - builder.dependency(dependency); - } - for (key, value) in spec.variables { - builder.add_variable(key, value, true, false); - } - - builder.add_variable( - "OBS_SERVER".to_owned(), - context.obs_client.url().to_string(), - false, - true, - ); - builder.add_variable("OBS_USER".to_owned(), TEST_USER.to_owned(), false, true); - builder.add_variable("OBS_PASSWORD".to_owned(), TEST_PASS.to_owned(), false, true); - - let job = builder.build(); - context.gitlab_mock.enqueue_job(job.clone()); - job - } - - async fn run_handler(context: &mut TestContext, handler_func: Func) - where - U: UploadableFile + Send + 'static, - H: JobHandler + Send + 'static, - Func: (FnOnce(Job) -> H) + Send + Sync + 'static, - { - let got_job = context - .runner - .request_job(move |job| futures_util::future::ready(Ok(handler_func(job)))) - .await - .unwrap(); - assert!(got_job); - context.runner.wait_for_space(1).await; - } - - struct PutArtifactsHandler { - artifacts: Arc>>, - } - - struct PutArtifactsFile { - artifacts: Arc>>, - path: String, - } - - #[async_trait] - impl UploadableFile for PutArtifactsFile { - type Data<'a> = Compat>>; - - fn get_path(&self) -> Cow<'_, str> { - Cow::Borrowed(&self.path) - } - - async fn get_data(&self) -> Result, ()> { - Ok(Cursor::new(self.artifacts.get(&self.path).unwrap()).compat()) - } - } - - #[async_trait] - impl JobHandler for PutArtifactsHandler { - async fn step(&mut self, _script: &[String], _phase: Phase) -> JobResult { - Ok(()) - } - - async fn get_uploadable_files( - &mut self, - ) -> Result + Send>, ()> { - Ok(Box::new( - self.artifacts - .keys() - .map(|k| PutArtifactsFile { - artifacts: self.artifacts.clone(), - path: k.to_owned(), - }) - .collect::>() - .into_iter(), - )) - } - } - - async fn put_artifacts( - context: &mut TestContext, - artifacts: HashMap>, - ) -> MockJob { - let artifacts_job = enqueue_job( - context, - JobSpec { - name: "artifacts".to_owned(), - script: vec!["dummy".to_owned()], - ..Default::default() - }, - ); - run_handler(context, |_| PutArtifactsHandler { - artifacts: Arc::new(artifacts), - }) - .await; - artifacts_job - } - - fn get_job_artifacts(job: &MockJob) -> HashMap> { - let Some(artifact) = job.uploaded_artifacts().next() else { - return Default::default(); - }; - - let data = (*artifact.data).clone(); - assert!(!data.is_empty()); - - let cursor = Cursor::new(data); - let mut zip = ZipArchive::new(cursor).unwrap(); - - (0..zip.len()) - .map(|i| { - let mut file = zip.by_index(i).unwrap(); - - let mut contents = vec![]; - file.read_to_end(&mut contents).unwrap(); - - (file.name().to_owned(), contents) - }) - .collect() - } - - async fn run_obs_handler_with_options(context: &mut TestContext, options: HandlerOptions) { - run_handler(context, move |job| { - assert_ok!(ObsJobHandler::from_obs_config_in_job(job, options)) - }) - .await; - } - - async fn run_obs_handler(context: &mut TestContext) { - run_obs_handler_with_options(context, DEFAULT_HANDLER_OPTIONS).await; - } - - #[derive(Debug, PartialEq, Eq, Clone, Copy)] - enum DputTest { - Basic, - Rebuild, - ReusePreviousBuild, - Branch, - } - - async fn test_dput(context: &mut TestContext, test: DputTest) -> (MockJob, ObsBuildInfo) { - let test1_file = "test1"; - let test1_contents = b"123"; - let test1_md5 = compute_md5(test1_contents); - - let dsc1_file = "test1.dsc"; - let dsc1_contents = format!( - "Source: {}\nFiles:\n {} {} {}", - TEST_PACKAGE_1, - test1_md5.clone(), - test1_contents.len(), - test1_file - ); - let dsc1_md5 = compute_md5(dsc1_contents.as_bytes()); - - let dsc1_bad_file = "test1-bad.dsc"; - let dsc1_bad_contents = - dsc1_contents.replace(test1_file, &(test1_file.to_owned() + ".missing")); - - context.obs_mock.add_project(TEST_PROJECT.to_owned()); - - context.obs_mock.add_or_update_repository( - TEST_PROJECT, - TEST_REPO.to_owned(), - TEST_ARCH_1.to_owned(), - MockRepositoryCode::Finished, - ); - context.obs_mock.add_or_update_repository( - TEST_PROJECT, - TEST_REPO.to_owned(), - TEST_ARCH_2.to_owned(), - MockRepositoryCode::Finished, - ); - - if test == DputTest::Rebuild { - // We also test excluded repos on rebuilds; this test makes it - // easier, because it's not testing creating a new package, so we - // can create it ourselves first with the desired metadata. - context.obs_mock.add_new_package( - TEST_PROJECT, - TEST_PACKAGE_1.to_owned(), - MockPackageOptions::default(), - ); - context.obs_mock.set_package_build_status( - TEST_PROJECT, - TEST_REPO, - TEST_ARCH_2, - TEST_PACKAGE_1.to_owned(), - MockBuildStatus::new(MockPackageCode::Disabled), - ); - } - - let artifacts = put_artifacts( - context, - [ - (dsc1_file.to_owned(), dsc1_contents.as_bytes().to_vec()), - ( - dsc1_bad_file.to_owned(), - dsc1_bad_contents.as_bytes().to_vec(), - ), - (test1_file.to_owned(), test1_contents.to_vec()), - ] - .into(), - ) - .await; - - let mut dput_command = format!("dput {TEST_PROJECT} {dsc1_file}"); - let mut created_project = TEST_PROJECT.to_owned(); - - if test == DputTest::Branch { - created_project += ":branched"; - dput_command += &format!(" --branch-to {created_project}"); - } - - let dput = enqueue_job( - context, - JobSpec { - name: "dput".to_owned(), - dependencies: vec![artifacts.clone()], - script: vec![dput_command.replace(dsc1_file, dsc1_bad_file)], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(MockJobState::Failed, dput.state()); - - let results = get_job_artifacts(&dput); - let build_info: ObsBuildInfo = - serde_yaml::from_slice(results.get(DEFAULT_BUILD_INFO).unwrap()).unwrap(); - - assert_eq!(build_info.project, created_project); - assert_eq!(build_info.package, TEST_PACKAGE_1); - assert_none!(build_info.rev); - assert_eq!(build_info.is_branched, test == DputTest::Branch); - - let mut dput = enqueue_job( - context, - JobSpec { - name: "dput".to_owned(), - dependencies: vec![artifacts.clone()], - script: vec![dput_command.clone()], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(MockJobState::Success, dput.state()); - - if test == DputTest::Rebuild || test == DputTest::ReusePreviousBuild { - context.obs_mock.add_or_update_repository( - &created_project, - TEST_REPO.to_owned(), - TEST_ARCH_1.to_owned(), - MockRepositoryCode::Building, - ); - // Also test endtimes, since we now have an existing package to - // modify the metadata of. - let dir = assert_ok!( - context - .obs_client - .project(TEST_PROJECT.to_owned()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await - ); - // Testing of reused builds never had the second arch disabled, so - // also add that build history. - if test == DputTest::ReusePreviousBuild { - context.obs_mock.add_job_history( - TEST_PROJECT, - TEST_REPO, - TEST_ARCH_2, - MockJobHistoryEntry { - package: TEST_PACKAGE_1.to_owned(), - rev: dir.rev.clone().unwrap(), - srcmd5: dir.srcmd5.clone(), - ..Default::default() - }, - ); - } - context.obs_mock.add_job_history( - TEST_PROJECT, - TEST_REPO, - TEST_ARCH_1, - MockJobHistoryEntry { - package: TEST_PACKAGE_1.to_owned(), - rev: dir.rev.unwrap(), - srcmd5: dir.srcmd5, - ..Default::default() - }, - ); - - context.obs_mock.set_package_build_status_for_rebuilds( - &created_project, - MockBuildStatus::new(MockPackageCode::Broken), - ); - context.obs_mock.set_package_build_status( - &created_project, - TEST_REPO, - TEST_ARCH_1, - TEST_PACKAGE_1.to_owned(), - MockBuildStatus::new(MockPackageCode::Failed), - ); - - let status = assert_ok!( - context - .obs_client - .project(created_project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .status(TEST_REPO, TEST_ARCH_1) - .await - ); - assert_eq!(status.code, obs::PackageCode::Failed); - - dput = enqueue_job( - context, - JobSpec { - name: "dput".to_owned(), - dependencies: vec![artifacts.clone()], - script: vec![dput_command.clone()], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(MockJobState::Success, dput.state()); - - let job_log = String::from_utf8_lossy(&dput.log()).into_owned(); - assert!(job_log.contains("unchanged")); - - let status = assert_ok!( - context - .obs_client - .project(created_project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .status(TEST_REPO, TEST_ARCH_1) - .await - ); - assert_eq!(status.code, obs::PackageCode::Failed); - - if test == DputTest::Rebuild { - dput = enqueue_job( - context, - JobSpec { - name: "dput".to_owned(), - dependencies: vec![artifacts.clone()], - script: vec![format!("{} --rebuild-if-unchanged", dput_command)], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(MockJobState::Success, dput.state()); - - let status = assert_ok!( - context - .obs_client - .project(created_project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .status(TEST_REPO, TEST_ARCH_1) - .await - ); - assert_eq!(status.code, obs::PackageCode::Broken); - - let job_log = String::from_utf8_lossy(&dput.log()).into_owned(); - assert!(job_log.contains("unchanged")); - } - } - - let results = get_job_artifacts(&dput); - let build_info: ObsBuildInfo = - serde_yaml::from_slice(results.get(DEFAULT_BUILD_INFO).unwrap()).unwrap(); - - assert_eq!(build_info.project, created_project); - assert_eq!(build_info.package, TEST_PACKAGE_1); - assert_some!(build_info.rev.as_deref()); - assert_eq!(build_info.is_branched, test == DputTest::Branch); - - assert_eq!( - build_info.enabled_repos.len(), - if test == DputTest::Rebuild { 1 } else { 2 } - ); - - let arch_1 = build_info - .enabled_repos - .get(&RepoArch { - repo: TEST_REPO.to_owned(), - arch: TEST_ARCH_1.to_owned(), - }) - .unwrap(); - - if test == DputTest::Rebuild { - assert_some!(arch_1.prev_endtime_for_commit); - } else { - assert_none!(arch_1.prev_endtime_for_commit); - - let arch_2 = build_info - .enabled_repos - .get(&RepoArch { - repo: TEST_REPO.to_owned(), - arch: TEST_ARCH_2.to_owned(), - }) - .unwrap(); - assert_none!(arch_2.prev_endtime_for_commit); - } - - let mut dir = assert_ok!( - context - .obs_client - .project(created_project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await - ); - - assert_eq!(dir.entries.len(), 3); - dir.entries.sort_by(|a, b| a.name.cmp(&b.name)); - assert_eq!(dir.entries[0].name, "_meta"); - assert_eq!(dir.entries[1].name, test1_file); - assert_eq!(dir.entries[1].size, test1_contents.len() as u64); - assert_eq!(dir.entries[1].md5, test1_md5); - assert_eq!(dir.entries[2].name, dsc1_file); - assert_eq!(dir.entries[2].size, dsc1_contents.len() as u64); - assert_eq!(dir.entries[2].md5, dsc1_md5); - - (dput, build_info) - } - - #[derive(Debug, PartialEq, Eq)] - enum MonitorLogTest { - // Test that long logs are truncated. - Long, - // Test that short logs are fully shown. - Short, - // Test that revision mismatches result in unavailable logs. - Unavailable, - } - - async fn test_monitoring( - context: &mut TestContext, - dput: MockJob, - build_info: &ObsBuildInfo, - success: bool, - dput_test: DputTest, - log_test: MonitorLogTest, - download_binaries: bool, - ) { - const TEST_JOB_RUNNER_TAG: &str = "test-tag"; - const TEST_MONITOR_TIMEOUT: &str = "1 day"; - const TEST_BUILD_RESULTS_DIR: &str = "results"; - const TEST_BUILD_RESULT: &str = "test-build-result"; - const TEST_BUILD_RESULT_CONTENTS: &[u8] = b"abcdef"; - - let srcmd5_prefix = format!( - "srcmd5 '{}' ", - if log_test == MonitorLogTest::Unavailable { - ZERO_REV_SRCMD5.to_owned() - } else { - let dir = assert_ok!( - context - .obs_client - .project(build_info.project.to_owned()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await - ); - - dir.srcmd5 - } - ); - - let (log_contents, log_vs_limit) = if log_test == MonitorLogTest::Short { - (srcmd5_prefix + "short", Ordering::Less) - } else { - ( - srcmd5_prefix + "this is a long log that will need to be trimmed when printed", - Ordering::Greater, - ) - }; - - assert_eq!( - log_contents.len().cmp(&(TEST_LOG_TAIL as usize)), - log_vs_limit - ); - - let mut generate_command = format!( - "generate-monitor {TEST_JOB_RUNNER_TAG} \ - --job-timeout '{TEST_MONITOR_TIMEOUT}' \ - --rules '[{{a: 1}}, {{b: 2}}]'" - ); - if download_binaries { - generate_command += &format!(" --download-build-results-to {TEST_BUILD_RESULTS_DIR}"); - } - let generate = enqueue_job( - context, - JobSpec { - name: "generate".to_owned(), - dependencies: vec![dput.clone()], - script: vec![generate_command], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(generate.state(), MockJobState::Success); - - let results = get_job_artifacts(&generate); - let pipeline_yaml: serde_yaml::Value = assert_ok!(serde_yaml::from_slice( - results.get(DEFAULT_MONITOR_PIPELINE).unwrap() - )); - let pipeline_map = pipeline_yaml.as_mapping().unwrap(); - - assert_eq!(pipeline_map.len(), build_info.enabled_repos.len()); - - for repo in build_info.enabled_repos.keys() { - // Sanity check this, even though test_dput should have already - // checked it. - assert_eq!(repo.repo, TEST_REPO); - assert!( - repo.arch == TEST_ARCH_1 || repo.arch == TEST_ARCH_2, - "unexpected arch '{}'", - repo.arch - ); - - context.obs_mock.set_package_build_status( - &build_info.project, - &repo.repo, - &repo.arch, - TEST_PACKAGE_1.to_owned(), - MockBuildStatus::new(if success { - MockPackageCode::Succeeded - } else { - MockPackageCode::Failed - }), - ); - context.obs_mock.add_completed_build_log( - &build_info.project, - TEST_REPO, - &repo.arch, - TEST_PACKAGE_1.to_owned(), - MockBuildLog::new(log_contents.to_owned()), - success, - ); - context.obs_mock.set_package_binaries( - &build_info.project, - TEST_REPO, - &repo.arch, - TEST_PACKAGE_1.to_owned(), - [( - TEST_BUILD_RESULT.to_owned(), - MockBinary { - contents: TEST_BUILD_RESULT_CONTENTS.to_vec(), - mtime: SystemTime::now(), - }, - )] - .into(), - ); - - let monitor_job_name = format!( - "{}-{}-{}", - DEFAULT_PIPELINE_JOB_PREFIX, TEST_REPO, &repo.arch - ); - - let monitor_map = pipeline_yaml - .as_mapping() - .unwrap() - .get(monitor_job_name.as_str()) - .unwrap() - .as_mapping() - .unwrap(); - - let artifacts = monitor_map.get("artifacts").unwrap().as_mapping().unwrap(); - assert_eq!( - artifacts.get("expire_in").unwrap().as_str().unwrap(), - DEFAULT_ARTIFACT_EXPIRATION - ); - - let mut artifact_paths: Vec<_> = artifacts - .get("paths") - .unwrap() - .as_sequence() - .unwrap() - .iter() - .map(|item| item.as_str().unwrap()) - .collect(); - artifact_paths.sort(); - - if download_binaries { - assert_eq!( - &artifact_paths, - &[DEFAULT_BUILD_LOG, TEST_BUILD_RESULTS_DIR] - ); - } else { - assert_eq!(&artifact_paths, &[DEFAULT_BUILD_LOG]); - } - - let tags = monitor_map.get("tags").unwrap().as_sequence().unwrap(); - assert_eq!(tags.len(), 1); - assert_eq!(tags[0].as_str().unwrap(), TEST_JOB_RUNNER_TAG); - - let timeout = monitor_map.get("timeout").unwrap().as_str().unwrap(); - assert_eq!(timeout, TEST_MONITOR_TIMEOUT); - - let rules: Vec<_> = monitor_map - .get("rules") - .unwrap() - .as_sequence() - .unwrap() - .iter() - .map(|v| v.as_mapping().unwrap()) - .collect(); - assert_eq!(rules.len(), 2); - - assert_eq!(rules[0].get("a").unwrap().as_i64().unwrap(), 1); - assert_eq!(rules[1].get("b").unwrap().as_i64().unwrap(), 2); - - for script_key in ["before_script", "after_script"] { - let script = monitor_map.get(script_key).unwrap().as_sequence().unwrap(); - assert_eq!(script.len(), 0); - } - - let script = monitor_map - .get("script") - .unwrap() - .as_sequence() - .unwrap() - .iter() - .map(|v| v.as_str().unwrap().to_owned()) - .collect::>(); - - let monitor = enqueue_job( - context, - JobSpec { - name: monitor_job_name.clone(), - dependencies: vec![dput.clone()], - script: script.clone(), - ..Default::default() - }, - ); - - if dput_test != DputTest::ReusePreviousBuild { - // Update the endtime in the background, otherwise the monitor - // will hang forever waiting. - let mock = context.obs_mock.clone(); - let build_info_2 = build_info.clone(); - let repo_2 = repo.clone(); - tokio::spawn(async move { - tokio::time::sleep(OLD_STATUS_SLEEP_DURATION * 10).await; - mock.add_job_history( - &build_info_2.project, - &repo_2.repo, - &repo_2.arch, - MockJobHistoryEntry { - package: build_info_2.package, - endtime: SystemTime::UNIX_EPOCH + Duration::from_secs(999), - srcmd5: build_info_2.srcmd5.unwrap(), - ..Default::default() - }, - ); - }); - } - - assert_ok!( - tokio::time::timeout(OLD_STATUS_SLEEP_DURATION * 20, run_obs_handler(context)) - .await - ); - assert_eq!( - monitor.state(), - if success && log_test != MonitorLogTest::Unavailable { - MockJobState::Success - } else { - MockJobState::Failed - } - ); - - let job_log = String::from_utf8_lossy(&monitor.log()).into_owned(); - - assert_eq!( - job_log.contains("unavailable"), - log_test == MonitorLogTest::Unavailable - ); - - // If we reused a previous build, we're not waiting for a new build, - // so don't check for an old build status. - let build_actually_occurred = dput_test != DputTest::ReusePreviousBuild; - assert_eq!( - job_log.contains("Waiting for build status"), - build_actually_occurred - ); - - assert_eq!( - job_log.contains(&log_contents), - !success && log_test == MonitorLogTest::Short - ); - - if !success && log_test == MonitorLogTest::Long { - let log_bytes = log_contents.as_bytes(); - let truncated_log_bytes = &log_bytes[log_bytes.len() - (TEST_LOG_TAIL as usize)..]; - assert!(job_log.contains(String::from_utf8_lossy(truncated_log_bytes).as_ref())); - } - - let results = get_job_artifacts(&monitor); - let build_result_path = Utf8Path::new(TEST_BUILD_RESULTS_DIR) - .join(TEST_BUILD_RESULT) - .into_string(); - let mut has_built_result = false; - - if log_test != MonitorLogTest::Unavailable { - let full_log = results.get(DEFAULT_BUILD_LOG).unwrap(); - assert_eq!(log_contents, String::from_utf8_lossy(full_log)); - - if success && download_binaries { - let build_result = results.get(&build_result_path).unwrap(); - assert_eq!(TEST_BUILD_RESULT_CONTENTS, &build_result[..]); - - has_built_result = true; - } - } - - if !has_built_result { - assert_none!(results.get(&build_result_path)); - } - } - } - - async fn test_prune( - context: &mut TestContext, - dput: MockJob, - build_info: &ObsBuildInfo, - only_if_job_unsuccessful: bool, - ) { - let prune = enqueue_job( - context, - JobSpec { - name: "prune".to_owned(), - script: vec!["prune".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(MockJobState::Failed, prune.state()); - - let prune = enqueue_job( - context, - JobSpec { - name: "prune".to_owned(), - script: vec!["prune --ignore-missing-build-info".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(MockJobState::Success, prune.state()); - - assert!(String::from_utf8_lossy(&prune.log()).contains("Skipping prune")); - - let prune = if only_if_job_unsuccessful { - let prune = enqueue_job( - context, - JobSpec { - name: "prune".to_owned(), - dependencies: vec![dput.clone()], - script: vec!["echo".to_owned()], - after_script: vec!["prune --only-if-job-unsuccessful".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(context).await; - assert_eq!(MockJobState::Success, prune.state()); - - assert!(String::from_utf8_lossy(&prune.log()).contains("Skipping prune")); - - assert_ok!( - context - .obs_client - .project(build_info.project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await - ); - - enqueue_job( - context, - JobSpec { - name: "prune".to_owned(), - - dependencies: vec![dput.clone()], - script: vec!["echo --fail".to_owned()], - after_script: vec!["prune --only-if-job-unsuccessful".to_owned()], - ..Default::default() - }, - ) - } else { - enqueue_job( - context, - JobSpec { - name: "prune".to_owned(), - - dependencies: vec![dput.clone()], - script: vec!["prune".to_owned()], - ..Default::default() - }, - ) - }; - - run_obs_handler(context).await; - assert_eq!( - prune.state(), - if only_if_job_unsuccessful { - MockJobState::Failed - } else { - MockJobState::Success - } - ); - - if build_info.is_branched { - assert_err!( - context - .obs_client - .project(build_info.project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await - ); - } else { - assert!(String::from_utf8_lossy(&prune.log()).contains("package was not branched")); - - assert_ok!( - context - .obs_client - .project(build_info.project.clone()) - .package(TEST_PACKAGE_1.to_owned()) - .list(None) - .await - ); - } - } - - #[rstest] - #[tokio::test] - async fn test_handler_flow( - #[values( - DputTest::Basic, - DputTest::Rebuild, - DputTest::ReusePreviousBuild, - DputTest::Branch - )] - dput_test: DputTest, - #[values(true, false)] build_success: bool, - #[values( - MonitorLogTest::Long, - MonitorLogTest::Short, - MonitorLogTest::Unavailable - )] - log_test: MonitorLogTest, - #[values(true, false)] download_binaries: bool, - #[values(true, false)] prune_only_if_job_unsuccessful: bool, - ) { - with_context(async |mut context| { - let (dput, build_info) = test_dput(&mut context, dput_test).await; - - test_monitoring( - &mut context, - dput.clone(), - &build_info, - build_success, - dput_test, - log_test, - download_binaries, - ) - .await; - - test_prune( - &mut context, - dput, - &build_info, - prune_only_if_job_unsuccessful, - ) - .await; - }) - .await; - } - - #[rstest] - #[tokio::test] - async fn test_variable_expansion() { - with_context(async |mut context| { - let job = enqueue_job( - &context, - JobSpec { - name: "expansion".to_owned(), - variables: [ - ("ESCAPED".to_owned(), "this should not appear".to_owned()), - ("QUOTED".to_owned(), "spaces should be preserved".to_owned()), - ("RECURSIVE".to_owned(), "recursion($RECURSIVE)".to_owned()), - ] - .into(), - script: vec!["echo --sep ; $MISSING $$ESCAPED $QUOTED $RECURSIVE".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(&mut context).await; - assert_eq!(job.state(), MockJobState::Success); - - let job_log = String::from_utf8_lossy(&job.log()).into_owned(); - assert_eq!( - job_log.lines().last().unwrap(), - ";$ESCAPED;spaces should be preserved;recursion()" - ); - }) - .await; - } - - #[rstest] - #[tokio::test] - async fn test_flag_parsing() { - with_context(async |mut context| { - let job = enqueue_job( - &context, - JobSpec { - name: "flag".to_owned(), - script: vec!["echo --uppercase false".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(&mut context).await; - assert_eq!(job.state(), MockJobState::Success); - - let job_log = String::from_utf8_lossy(&job.log()).into_owned(); - assert_eq!(job_log.lines().last().unwrap(), "FALSE"); - - let job = enqueue_job( - &context, - JobSpec { - name: "flag".to_owned(), - script: vec!["echo --uppercase=false true".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(&mut context).await; - assert_eq!(job.state(), MockJobState::Success); - - let job_log = String::from_utf8_lossy(&job.log()).into_owned(); - assert_eq!(job_log.lines().last().unwrap(), "true"); - - let job = enqueue_job( - &context, - JobSpec { - name: "flag".to_owned(), - script: vec!["echo --uppercase=true false".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(&mut context).await; - assert_eq!(job.state(), MockJobState::Success); - - let job_log = String::from_utf8_lossy(&job.log()).into_owned(); - assert_eq!(job_log.lines().last().unwrap(), "FALSE"); - - let job = enqueue_job( - &context, - JobSpec { - name: "flag".to_owned(), - script: vec!["echo --uppercase=X false".to_owned()], - ..Default::default() - }, - ); - - run_obs_handler(&mut context).await; - assert_eq!(job.state(), MockJobState::Failed); - }) - .await; - } - - #[derive(Debug, PartialEq, Eq)] - enum GenerateMonitorTimeoutLocation { - HandlerOption, - Argument, - } - - #[rstest] - #[tokio::test] - async fn test_generate_monitor_timeouts( - #[values( - None, - Some(GenerateMonitorTimeoutLocation::HandlerOption), - Some(GenerateMonitorTimeoutLocation::Argument) - )] - test: Option, - ) { - use crate::build_meta::CommitBuildInfo; - - const TEST_MONITOR_TIMEOUT: &str = "10 minutes"; - - with_context(async |mut context| { - let build_info = ObsBuildInfo { - project: TEST_PROJECT.to_owned(), - package: TEST_PACKAGE_1.to_owned(), - rev: Some("1".to_owned()), - srcmd5: Some("abc".to_owned()), - is_branched: false, - enabled_repos: [( - RepoArch { - repo: TEST_REPO.to_owned(), - arch: TEST_ARCH_1.to_owned(), - }, - CommitBuildInfo { - prev_endtime_for_commit: None, - }, - )] - .into(), - }; - - let build_info = put_artifacts( - &mut context, - [( - DEFAULT_BUILD_INFO.to_owned(), - serde_yaml::to_string(&build_info).unwrap().into_bytes(), - )] - .into(), - ) - .await; - - let mut generate_spec = JobSpec { - name: "generate".to_owned(), - script: vec!["generate-monitor tag".to_owned()], - dependencies: vec![build_info], - ..Default::default() - }; - - if test == Some(GenerateMonitorTimeoutLocation::Argument) { - use std::fmt::Write; - write!( - &mut generate_spec.script[0], - " --job-timeout '{TEST_MONITOR_TIMEOUT}'" - ) - .unwrap(); - } - - let generate = enqueue_job(&context, generate_spec); - - if test == Some(GenerateMonitorTimeoutLocation::HandlerOption) { - run_obs_handler_with_options( - &mut context, - HandlerOptions { - default_monitor_job_timeout: Some(TEST_MONITOR_TIMEOUT.to_owned()), - ..DEFAULT_HANDLER_OPTIONS - }, - ) - .await; - } else { - run_obs_handler(&mut context).await; - } - assert_eq!(generate.state(), MockJobState::Success); - - let results = get_job_artifacts(&generate); - let pipeline_yaml: serde_yaml::Value = assert_ok!(serde_yaml::from_slice( - results.get(DEFAULT_MONITOR_PIPELINE).unwrap() - )); - let pipeline_map = pipeline_yaml.as_mapping().unwrap(); - - let monitor_map = pipeline_map - .into_iter() - .next() - .unwrap() - .1 - .as_mapping() - .unwrap(); - - let timeout_yaml = monitor_map.get("timeout"); - if test.is_some() { - assert_eq!( - timeout_yaml.unwrap().as_str().unwrap(), - TEST_MONITOR_TIMEOUT - ); - } else { - assert_none!(timeout_yaml); - } - }) - .await; - } -}