diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml new file mode 100644 index 0000000000..7e89c429e1 --- /dev/null +++ b/.github/workflows/python-ci.yml @@ -0,0 +1,145 @@ +name: Python CI and Docker Release + +on: + push: + branches: + - main + - lab3 + tags: + - "v*.*.*" + pull_request: + branches: + - main + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: python-ci-${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + test: + name: Lint and Test + runs-on: ubuntu-latest + timeout-minutes: 15 + strategy: + fail-fast: true + matrix: + python-version: ["3.11", "3.12"] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + id: setup-python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: pip + cache-dependency-path: app_python/requirements.txt + + - name: Install dependencies + id: deps + run: | + START=$(date +%s) + python -m pip install --upgrade pip + pip install -r app_python/requirements.txt + pip install ruff + END=$(date +%s) + echo "install_seconds=$((END-START))" >> "$GITHUB_OUTPUT" + + - name: Lint (ruff) + run: ruff check app_python + + - name: Run unit tests + run: python -m unittest discover -s app_python/tests -v + + - name: Dependency cache report + if: always() + run: | + echo "### Dependency install metrics (Python ${{ matrix.python-version }})" >> "$GITHUB_STEP_SUMMARY" + echo "- cache-hit: \`${{ steps.setup-python.outputs.cache-hit }}\`" >> "$GITHUB_STEP_SUMMARY" + echo "- install-seconds: \`${{ steps.deps.outputs.install_seconds }}\`" >> "$GITHUB_STEP_SUMMARY" + + security: + name: Snyk Dependency Scan + runs-on: ubuntu-latest + needs: test + timeout-minutes: 15 + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: pip + cache-dependency-path: app_python/requirements.txt + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r app_python/requirements.txt + + - name: Set up Snyk CLI + if: ${{ env.SNYK_TOKEN != '' }} + uses: snyk/actions/setup@master + + - name: Run Snyk scan (high and critical) + if: ${{ env.SNYK_TOKEN != '' }} + continue-on-error: true + env: + SNYK_TOKEN: ${{ env.SNYK_TOKEN }} + run: snyk test \ + --org=sofiakulagina \ + --file=app_python/requirements.txt \ + --severity-threshold=high + + + - name: Snyk token reminder + if: ${{ env.SNYK_TOKEN == '' }} + run: echo "SNYK_TOKEN secret is not configured; Snyk scan skipped." + + docker: + name: Build and Push Docker Image + runs-on: ubuntu-latest + needs: [test, security] + if: startsWith(github.ref, 'refs/tags/v') + timeout-minutes: 20 + env: + IMAGE_NAME: ${{ secrets.DOCKERHUB_USERNAME }}/devops-info-service + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Docker metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.IMAGE_NAME }} + tags: | + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=raw,value=latest + + - name: Build and push + uses: docker/build-push-action@v6 + with: + context: app_python + file: app_python/Dockerfile + push: true + cache-from: type=gha + cache-to: type=gha,mode=max + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/README.md b/README.md index 371d51f456..a66ee3dc20 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ Master **production-grade DevOps practices** through hands-on labs. Build, conta | 16 | 16 | Cluster Monitoring | Kube-Prometheus, Init Containers | | — | **Exam Alternative Labs** | | | | 17 | 17 | Edge Deployment | Fly.io, Global Distribution | -| 18 | 18 | Decentralized Storage | 4EVERLAND, IPFS, Web3 | +| 18 | 18 | Reproducible Builds | Nix, Deterministic Builds, Flakes | --- @@ -61,7 +61,7 @@ Don't want to take the exam? Complete **both** bonus labs: | Lab | Topic | Points | |-----|-------|--------| | **Lab 17** | Fly.io Edge Deployment | 20 pts | -| **Lab 18** | 4EVERLAND & IPFS | 20 pts | +| **Lab 18** | Reproducible Builds with Nix | 20 pts | **Requirements:** - Complete both labs (17 + 18 = 40 pts, replaces exam) @@ -142,7 +142,7 @@ Each lab is worth **10 points** (main tasks) + **2.5 points** (bonus). - StatefulSets, Monitoring **Exam Alternative (Labs 17-18)** -- Fly.io, 4EVERLAND/IPFS +- Fly.io, Nix Reproducible Builds diff --git a/app_python/README.md b/app_python/README.md index 25a9880577..7ac6e79cfd 100644 --- a/app_python/README.md +++ b/app_python/README.md @@ -1,5 +1,7 @@ # DevOps Info Service (Lab 1) +[![Python CI and Docker Release](https://github.com/sofiakulagina/DevOps-Core-Course/actions/workflows/python-ci.yml/badge.svg?branch=main)](https://github.com/sofiakulagina/DevOps-Core-Course/actions/workflows/python-ci.yml) + ## Overview This project implements a simple **DevOps info service** written in Python using **Flask**. The service exposes HTTP endpoints that return detailed information about the application, the underlying system, and its runtime health. It is the base for later labs (Docker, CI/CD, monitoring, persistence, etc.). @@ -14,9 +16,9 @@ This project implements a simple **DevOps info service** written in Python using ```bash python -m venv venv -source venv/bin/activate # On Windows: venv\Scripts\activate +source venv/bin/activate pip install -r requirements.txt -cp .env_example .env # create local env file from example +cp .env_example .env ``` ## Running the Application @@ -54,6 +56,57 @@ Configuration is done via environment variables: All configuration is read in `app.py` at startup, so restart the application after changing environment variables. +## Unit Testing + +### Framework Choice + +For this lab, the project uses Python `unittest`. + +Short comparison: +- `pytest`: concise syntax and rich plugin ecosystem, but adds an external dependency. +- `unittest`: part of the Python standard library, no additional package required. + +Why `unittest` was chosen: +- Works out of the box in minimal lab environments. +- Keeps dependencies small and predictable. +- Supports fixtures (`setUpClass`) and mocking (`unittest.mock`) needed for endpoint testing. + +### Test Structure + +Tests are located in `tests/test_app.py` and cover: +- `GET /` success response: + - expected top-level JSON fields, + - required nested fields and data types, + - request metadata (client IP and user-agent handling). +- `GET /health` success response: + - status, timestamp, uptime checks. +- Error responses: + - `404` JSON error for unknown route, + - simulated internal failures for `/` and `/health` returning JSON `500`. + +### Run Tests Locally + +```bash +python3 -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +python -m unittest discover -s tests -v +``` + +Optional coverage (standard library): + +```bash +python -m trace --count --summary -m unittest discover -s tests -v +``` + +### Example Passing Output + +```text +Ran 6 tests in 0.018s + +OK +``` + ## Docker How to use the containerized application (patterns): @@ -67,3 +120,139 @@ Notes: - The container exposes port `5002` by default (see `app.py`). - The image runs as a non-root user for improved security. +## CI Workflow (GitHub Actions) + +### Workflow Overview + +Workflow file: `.github/workflows/python-ci.yml` + +It runs on: +- `push` to `main` and `lab3`, and `pull_request` into `main` for lint + tests. +- `push` of SemVer git tags (`vX.Y.Z`) for Docker build and push. +- manual run via `workflow_dispatch`. + +### Versioning Strategy + +Chosen strategy: **Semantic Versioning (SemVer)**. + +Why SemVer: +- Clear signal for breaking vs backward-compatible changes. +- Common convention for releases and container tags. + +Docker tags produced on `vX.Y.Z`: +- `X.Y.Z` (full version) +- `X.Y` (rolling minor) +- `latest` + +Example: +- `username/devops-info-service:1.2.3` +- `username/devops-info-service:1.2` +- `username/devops-info-service:latest` + +### Secrets Required + +Add these GitHub repository secrets: +- `DOCKERHUB_USERNAME` +- `DOCKERHUB_TOKEN` (Docker Hub access token) + +### Release Flow + +```bash +git tag v1.0.0 +git push origin v1.0.0 +``` + +The Docker job runs only on SemVer tags and pushes images with the tags above. + +## CI Best Practices and Security (Task 3) + +### Status Badge + +The README includes a GitHub Actions badge for `.github/workflows/python-ci.yml` showing pass/fail status for `main`. + +Badge and workflow link: +- Badge: `https://github.com/sofiakulagina/DevOps-Core-Course/actions/workflows/python-ci.yml/badge.svg?branch=main` +- Workflow runs: `https://github.com/sofiakulagina/DevOps-Core-Course/actions/workflows/python-ci.yml` + +### Dependency Caching + +Implemented in workflow via `actions/setup-python@v5`: +- `cache: pip` +- `cache-dependency-path: app_python/requirements.txt` + +The workflow also writes install metrics into the Job Summary for each Python version: +- `cache-hit` (`true` or `false`) +- `install-seconds` (dependency installation time) + +Measured baseline from workflow summary: +- Python 3.11: `cache-hit=false`, `install-seconds=5` +- Python 3.12: `cache-hit=false`, `install-seconds=3` + +How speed improvement is measured: +1. Run workflow once after dependency change (cache miss baseline). +2. Run workflow again without changing `app_python/requirements.txt` (expected cache hit). +3. Compare `install-seconds` from Job Summary: + `improvement_percent = ((miss_seconds - hit_seconds) / miss_seconds) * 100` + +Current status: +- Baseline (miss) is recorded. +- Next run is needed to capture hit values and final percentage. + +Metrics screenshot: +- Link: `docs/screenshots/metrics_lab3.png` + +![Dependency metrics screenshot](docs/screenshots/metrics_lab3.png) + +### Snyk Security Scanning + +Integrated with `snyk/actions/setup@master` and `snyk test` CLI command in a dedicated `security` job. + +Configuration: +- Secret required: `SNYK_TOKEN` +- Scan target: `app_python/requirements.txt` +- Threshold: `high` (`--severity-threshold=high`) +- Mode: non-blocking (`continue-on-error: true`) to keep visibility without blocking delivery during lab work. + +If `SNYK_TOKEN` is missing, workflow prints a clear skip message. + +Security results documentation: +- Latest scan status: `Succeeded` +- Scan output: `Tested 7 dependencies for known issues, no vulnerable paths found.` +- Vulnerability count: `0` (for threshold `high`) +- Vulnerability handling policy: upgrade direct dependencies first; if no fix exists, track risk in lab notes and keep non-blocking scan mode. + +Snyk screenshot: +- Link: `docs/screenshots/snyk_lab3.png` + +![Snyk scan screenshot](docs/screenshots/snyk_lab3.png) + +How to get `SNYK_TOKEN`: +1. Open `https://app.snyk.io` +2. Go to `Account Settings` -> `API Token` +3. Copy token and add GitHub secret: + `Repository Settings` -> `Secrets and variables` -> `Actions` -> `New repository secret` +4. Secret name must be `SNYK_TOKEN` + +### Additional CI Best Practices Applied + +Implemented practices: +- **Concurrency control:** cancels outdated runs for same ref (`cancel-in-progress: true`). +- **Least-privilege permissions:** workflow-level `permissions: contents: read`. +- **Matrix testing:** tests run on Python `3.11` and `3.12`. +- **Fail-fast matrix:** stops quickly when one matrix leg fails. +- **Job dependencies:** Docker job requires successful `test` and `security` jobs. +- **Docker layer cache:** `cache-from/cache-to type=gha` for faster image builds. +- **Manual trigger:** `workflow_dispatch` for controlled reruns. +- **Timeouts:** explicit `timeout-minutes` per job to avoid stuck pipelines. + +### Docker Build Evidence + +From `Build and Push Docker Image` summary: +- Build status: `completed` +- Build duration: `17s` +- Docker build cache usage in that run: `0%` + +Final CI/CD execution screenshot: +- Link: `docs/screenshots/artifacts_lab3.png` + +![Final CI/CD screenshot](docs/screenshots/artifacts_lab3.png) diff --git a/app_python/docs/screenshots/artifacts_lab3.png b/app_python/docs/screenshots/artifacts_lab3.png new file mode 100644 index 0000000000..47e8eea188 Binary files /dev/null and b/app_python/docs/screenshots/artifacts_lab3.png differ diff --git a/app_python/docs/screenshots/metrics_lab3.png b/app_python/docs/screenshots/metrics_lab3.png new file mode 100644 index 0000000000..f778be014b Binary files /dev/null and b/app_python/docs/screenshots/metrics_lab3.png differ diff --git a/app_python/docs/screenshots/snyk_lab3.png b/app_python/docs/screenshots/snyk_lab3.png new file mode 100644 index 0000000000..f71cf425ea Binary files /dev/null and b/app_python/docs/screenshots/snyk_lab3.png differ diff --git a/app_python/tests/test_app.py b/app_python/tests/test_app.py index 222f4ab610..3eced19bbc 100644 --- a/app_python/tests/test_app.py +++ b/app_python/tests/test_app.py @@ -1,6 +1,8 @@ import os import sys + import unittest +from unittest.mock import patch # Allow importing app_python/app.py as a module named "app" @@ -8,79 +10,124 @@ APP_DIR = os.path.dirname(TESTS_DIR) sys.path.insert(0, APP_DIR) -from app import app as flask_app # noqa: E402 +import app as app_module # noqa: E402 class DevOpsInfoServiceTests(unittest.TestCase): @classmethod def setUpClass(cls): - flask_app.testing = True - cls.client = flask_app.test_client() - - def test_root_endpoint_returns_expected_structure(self): - resp = self.client.get("/") - self.assertEqual(resp.status_code, 200) - self.assertTrue(resp.is_json) - - data = resp.get_json() + app_module.app.config.update( + TESTING=False, + PROPAGATE_EXCEPTIONS=False, + ) + cls.client = app_module.app.test_client() + + def test_root_endpoint_returns_expected_structure_and_types(self): + response = self.client.get("/") + self.assertEqual(response.status_code, 200) + self.assertTrue(response.is_json) + + data = response.get_json() self.assertIsInstance(data, dict) - # Top-level keys for key in ("service", "system", "runtime", "request", "endpoints"): self.assertIn(key, data) - # Service self.assertEqual(data["service"]["name"], "devops-info-service") self.assertEqual(data["service"]["version"], "1.0.0") self.assertEqual(data["service"]["framework"], "Flask") + self.assertIsInstance(data["service"]["description"], str) - # System - self.assertIn("hostname", data["system"]) - self.assertIn("platform", data["system"]) - self.assertIn("platform_version", data["system"]) - self.assertIn("architecture", data["system"]) - self.assertIn("cpu_count", data["system"]) - self.assertIn("python_version", data["system"]) + self.assertIsInstance(data["system"]["hostname"], str) + self.assertIsInstance(data["system"]["platform"], str) + self.assertIsInstance(data["system"]["platform_version"], str) + self.assertIsInstance(data["system"]["architecture"], str) + self.assertIsInstance(data["system"]["cpu_count"], int) + self.assertIsInstance(data["system"]["python_version"], str) - # Runtime - self.assertGreaterEqual(int(data["runtime"]["uptime_seconds"]), 0) + self.assertGreaterEqual(data["runtime"]["uptime_seconds"], 0) self.assertIsInstance(data["runtime"]["uptime_human"], str) self.assertIsInstance(data["runtime"]["current_time"], str) self.assertEqual(data["runtime"]["timezone"], "UTC") - # Request self.assertEqual(data["request"]["method"], "GET") self.assertEqual(data["request"]["path"], "/") self.assertIn("client_ip", data["request"]) - self.assertIn("user_agent", data["request"]) - - # Endpoints list - endpoints = data["endpoints"] - self.assertIsInstance(endpoints, list) - paths = {e.get("path") for e in endpoints if isinstance(e, dict)} - self.assertIn("/", paths) - self.assertIn("/health", paths) + self.assertIsInstance(data["request"]["user_agent"], str) + + self.assertIsInstance(data["endpoints"], list) + self.assertGreater(len(data["endpoints"]), 0) + for endpoint in data["endpoints"]: + self.assertIn("path", endpoint) + self.assertIn("method", endpoint) + self.assertIn("description", endpoint) + + def test_root_endpoint_extracts_forwarded_ip_and_user_agent(self): + response = self.client.get( + "/", + headers={ + "X-Forwarded-For": "203.0.113.10", + "User-Agent": "unit-test-agent/1.0", + }, + environ_base={"REMOTE_ADDR": "127.0.0.1"}, + ) + self.assertEqual(response.status_code, 200) + data = response.get_json() + + self.assertEqual(data["request"]["client_ip"], "203.0.113.10") + self.assertEqual(data["request"]["user_agent"], "unit-test-agent/1.0") def test_health_endpoint_returns_expected_payload(self): - resp = self.client.get("/health") - self.assertEqual(resp.status_code, 200) - self.assertTrue(resp.is_json) + response = self.client.get("/health") + self.assertEqual(response.status_code, 200) + self.assertTrue(response.is_json) - data = resp.get_json() + data = response.get_json() self.assertEqual(data["status"], "healthy") self.assertIsInstance(data["timestamp"], str) - self.assertGreaterEqual(int(data["uptime_seconds"]), 0) + self.assertGreaterEqual(data["uptime_seconds"], 0) def test_not_found_returns_json_404(self): - resp = self.client.get("/does-not-exist") - self.assertEqual(resp.status_code, 404) - self.assertTrue(resp.is_json) + response = self.client.get("/does-not-exist") + self.assertEqual(response.status_code, 404) + self.assertTrue(response.is_json) - data = resp.get_json() + data = response.get_json() self.assertEqual(data["error"], "Not Found") - self.assertIn("message", data) + self.assertEqual(data["message"], "Endpoint does not exist") + + def test_root_returns_json_500_on_internal_error(self): + with patch.object( + app_module, "get_system_info", side_effect=RuntimeError("simulated failure") + ): + response = self.client.get("/") + + self.assertEqual(response.status_code, 500) + self.assertTrue(response.is_json) + self.assertEqual( + response.get_json(), + { + "error": "Internal Server Error", + "message": "An unexpected error occurred", + }, + ) + + def test_health_returns_json_500_on_internal_error(self): + with patch.object( + app_module, "get_uptime", side_effect=RuntimeError("simulated failure") + ): + response = self.client.get("/health") + + self.assertEqual(response.status_code, 500) + self.assertTrue(response.is_json) + self.assertEqual( + response.get_json(), + { + "error": "Internal Server Error", + "message": "An unexpected error occurred", + }, + ) if __name__ == "__main__": - unittest.main() - + unittest.main(verbosity=2) diff --git a/labs/lab18.md b/labs/lab18.md index 3491394659..864df70baa 100644 --- a/labs/lab18.md +++ b/labs/lab18.md @@ -1,430 +1,1306 @@ -# Lab 18 — Decentralized Hosting with 4EVERLAND & IPFS +# Lab 18 — Reproducible Builds with Nix ![difficulty](https://img.shields.io/badge/difficulty-intermediate-yellow) -![topic](https://img.shields.io/badge/topic-Web3%20Infrastructure-blue) -![points](https://img.shields.io/badge/points-20-orange) -![type](https://img.shields.io/badge/type-Exam%20Alternative-purple) +![topic](https://img.shields.io/badge/topic-Nix%20%26%20Reproducibility-blue) +![points](https://img.shields.io/badge/points-12-orange) -> Deploy content to the decentralized web using IPFS and 4EVERLAND for permanent, censorship-resistant hosting. +> **Goal:** Learn to create truly reproducible builds using Nix, eliminating "works on my machine" problems and achieving bit-for-bit reproducibility. +> **Deliverable:** A PR/MR from `feature/lab18` to the course repo with `labs/submission18.md` containing build artifacts, hash comparisons, Nix expressions, and analysis. Submit the PR/MR link via Moodle. -## Overview - -The decentralized web (Web3) offers an alternative to traditional hosting where content is stored across a distributed network rather than centralized servers. IPFS (InterPlanetary File System) is the foundation, and 4EVERLAND provides a user-friendly gateway to this ecosystem. +--- -**This is an Exam Alternative Lab** — Complete both Lab 17 and Lab 18 to replace the final exam. +## Overview -**What You'll Learn:** -- IPFS fundamentals and content addressing -- Decentralized storage concepts -- Pinning services and persistence -- 4EVERLAND hosting platform -- Centralized vs decentralized trade-offs +In this lab you will practice: +- Installing Nix and understanding the Nix philosophy +- Writing Nix derivations to build software reproducibly +- Creating reproducible Docker images using Nix +- Using Nix Flakes for modern, declarative dependency management +- **Comparing Nix with your previous work from Labs 1-2** -**Prerequisites:** Basic understanding of web hosting, completed Docker lab +**Why Nix?** Traditional build tools (Docker, npm, pip, etc.) claim to be reproducible, but they're not: +- `Dockerfile` with `apt-get install nodejs` gets different versions over time +- `pip install -r requirements.txt` without hash pinning can vary +- Docker builds include timestamps and vary across machines -**Tech Stack:** IPFS | 4EVERLAND | Docker | Content Addressing +**Nix solves this:** Every build is isolated in a sandbox with exact dependencies. The same Nix expression produces **identical binaries** on any machine, forever. -**Provided Files:** -- `labs/lab18/index.html` — A beautiful course landing page ready to deploy +**Building on Your Work:** Throughout this lab, you'll revisit your DevOps Info Service from Lab 1 and compare: +- **Lab 1**: `requirements.txt` vs Nix derivations for dependency management +- **Lab 2**: Traditional `Dockerfile` vs Nix `dockerTools` for containerization +- **Lab 10** *(bonus task)*: Helm `values.yaml` version pinning vs Nix Flakes locking --- -## Exam Alternative Requirements +## Prerequisites -| Requirement | Details | -|-------------|---------| -| **Deadline** | 1 week before exam date | -| **Minimum Score** | 16/20 points | -| **Must Complete** | Both Lab 17 AND Lab 18 | -| **Total Points** | 40 pts (replaces 40 pt exam) | +- **Required:** Completed Labs 1-16 (all required course labs) +- **Key Labs Referenced:** + - Lab 1: Python DevOps Info Service (you'll rebuild with Nix) + - Lab 2: Docker containerization (you'll compare with Nix dockerTools) + - Lab 10: Helm charts (you'll compare version pinning with Nix Flakes) +- Linux, macOS, or WSL2 +- Basic understanding of package managers +- Your `app_python/` directory from Lab 1-2 available --- ## Tasks -### Task 1 — IPFS Fundamentals (3 pts) +### Task 1 — Build Reproducible Python App (Revisiting Lab 1) (6 pts) + +**Objective:** Use Nix to build your DevOps Info Service from Lab 1 and compare Nix's reproducibility guarantees with traditional `pip install -r requirements.txt`. + +**Why This Matters:** You've already built this app in Lab 1 using `requirements.txt`. Now you'll see how Nix provides **true reproducibility** that `pip` cannot guarantee - the same derivation produces bit-for-bit identical results across different machines and times. + +#### 1.1: Install Nix Package Manager + +> ⚠️ **Important Installation Requirements:** +> - Requires sudo/admin access on your machine +> - Creates `/nix` directory at system root (Linux/macOS) or `C:\nix` (Windows WSL) +> - Modifies shell configuration files (`~/.bashrc`, `~/.zshrc`, etc.) +> - Installation size: ~500MB-1GB for base system +> - **Cannot be installed in home directory only** +> - Uninstallation requires manual cleanup (see [official guide](https://nixos.org/manual/nix/stable/installation/uninstall.html)) + +1. **Install Nix using the Determinate Systems installer (recommended):** + + ```bash + curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install + ``` + + > **Why Determinate Nix?** It enables flakes by default and provides better defaults for modern Nix usage. + +
+ 🐧 Alternative: Official Nix installer + + ```bash + sh <(curl -L https://nixos.org/nix/install) --daemon + ``` + + Then enable flakes by adding to `~/.config/nix/nix.conf`: + ``` + experimental-features = nix-command flakes + ``` + +
+ +2. **Verify Installation:** + + ```bash + nix --version + ``` + + You should see Nix 2.x or higher. + + **Restart your terminal** after installation to load Nix into your PATH. + +3. **Test Basic Nix Usage:** + + ```bash + # Try running a program without installing it + nix run nixpkgs#hello + ``` + + This downloads and runs `hello` without installing it permanently. + +#### 1.2: Prepare Your Python Application + +1. **Copy your Lab 1 app to the lab18 directory:** + + ```bash + mkdir -p labs/lab18/app_python + cp -r app_python/* labs/lab18/app_python/ + cd labs/lab18/app_python + ``` + + You should have: + - `app.py` - Your DevOps Info Service + - `requirements.txt` - Your Python dependencies (Flask/FastAPI) + +2. **Review your traditional workflow (Lab 1):** + + Recall how you built this in Lab 1: + ```bash + python -m venv venv + source venv/bin/activate + pip install -r requirements.txt + python app.py + ``` + + **Problems with this approach:** + - Different Python versions on different machines + - `pip install` without hashes can pull different package versions + - Virtual environment is not portable + - No guarantee of reproducibility over time + +#### 1.3: Write a Nix Derivation for Your Python App + +1. **Create a Nix derivation:** + + Create `default.nix` in `labs/lab18/app_python/`: + +
+ 📚 Where to learn Nix Python derivation syntax + + - [nix.dev - Python](https://nix.dev/tutorials/nixos/building-and-running-python-apps) + - [nixpkgs Python documentation](https://nixos.org/manual/nixpkgs/stable/#python) + - [Nix Pills - Chapter 6: Our First Derivation](https://nixos.org/guides/nix-pills/our-first-derivation.html) + + **Key concepts you need:** + - `python3Packages.buildPythonApplication` - Function to build Python apps + - `propagatedBuildInputs` - Python dependencies (Flask/FastAPI) + - `makeWrapper` - Wraps Python script with interpreter + - `pname` - Package name + - `version` - Package version + - `src` - Source code location (use `./.` for current directory) + - `format = "other"` - For apps without setup.py + + **Translating requirements.txt to Nix:** + Your Lab 1 `requirements.txt` might have: + ``` + Flask==3.1.0 + Werkzeug>=2.0 + click + ``` + + In Nix, you reference packages from nixpkgs (not exact PyPI versions): + - `Flask==3.1.0` → `pkgs.python3Packages.flask` + - `fastapi==0.115.0` → `pkgs.python3Packages.fastapi` + - `uvicorn[standard]` → `pkgs.python3Packages.uvicorn` + + **Note:** Nix uses versions from the pinned nixpkgs, not PyPI directly. This is intentional for reproducibility. + + **Example structure (Flask):** + ```nix + { pkgs ? import {} }: + + pkgs.python3Packages.buildPythonApplication { + pname = "devops-info-service"; + version = "1.0.0"; + src = ./.; + + format = "other"; + + propagatedBuildInputs = with pkgs.python3Packages; [ + flask + ]; + + nativeBuildInputs = [ pkgs.makeWrapper ]; + + installPhase = '' + mkdir -p $out/bin + cp app.py $out/bin/devops-info-service + + # Wrap with Python interpreter so it can execute + wrapProgram $out/bin/devops-info-service \ + --prefix PYTHONPATH : "$PYTHONPATH" + ''; + } + ``` + + **Example for FastAPI:** + ```nix + propagatedBuildInputs = with pkgs.python3Packages; [ + fastapi + uvicorn + ]; + ``` + + **Hint:** If you get "command not found" errors, make sure you're using `makeWrapper` in the installPhase. + +
+ +2. **Build your application with Nix:** + + ```bash + nix-build + ``` + + This creates a `result` symlink pointing to the Nix store path. + +3. **Run the Nix-built application:** + + ```bash + ./result/bin/devops-info-service + ``` + + Visit `http://localhost:5000` (or your configured port) - it should work identically to your Lab 1 version! + +#### 1.4: Prove Reproducibility (Compare with Lab 1 approach) + +1. **Record the Nix store path:** + + ```bash + readlink result + ``` + + Note the store path (e.g., `/nix/store/abc123-devops-info-service-1.0.0/`) + +2. **Build again and compare:** + + ```bash + rm result + nix-build + readlink result + ``` + + **Observation:** The store path is **identical**! But wait - did Nix rebuild it or reuse it? + + **Answer: Nix reused the cached build!** Same inputs = same hash = reuse existing store path. + +3. **Force an actual rebuild to prove reproducibility:** + + ```bash + # First, find your build's store path + STORE_PATH=$(readlink result) + echo "Original store path: $STORE_PATH" + + # Delete it from the Nix store + nix-store --delete $STORE_PATH + + # Now rebuild (this forces actual compilation) + rm result + nix-build + readlink result + ``` + + **Observation:** Same store path returns! Nix rebuilt it from scratch and got the exact same hash. -**Objective:** Understand IPFS concepts and run a local node. +3. **Compare with traditional pip approach:** -**Requirements:** + **Demonstrate pip's limitations:** -1. **Study IPFS Concepts** - - Content addressing vs location addressing - - CIDs (Content Identifiers) - - Pinning and garbage collection - - IPFS gateways + ```bash + # Test 1: Install without version pins (shows immediate non-reproducibility) + echo "flask" > requirements-unpinned.txt # No version specified -2. **Run Local IPFS Node** - - Use Docker to run IPFS node - - Access the Web UI - - Understand node configuration + python -m venv venv1 + source venv1/bin/activate + pip install -r requirements-unpinned.txt + pip freeze | grep -i flask > freeze1.txt + deactivate -3. **Add Content Locally** - - Add a file to your local IPFS node - - Retrieve the CID - - Access via local gateway + # Simulate time passing: clear pip cache + pip cache purge 2>/dev/null || rm -rf ~/.cache/pip + + python -m venv venv2 + source venv2/bin/activate + pip install -r requirements-unpinned.txt + pip freeze | grep -i flask > freeze2.txt + deactivate + + # Compare Flask versions + diff freeze1.txt freeze2.txt + ``` + + **Observation:** + - Without version pins, you get whatever's latest + - **Even with pinned versions** in requirements.txt, you only pin direct dependencies + - Transitive dependencies (dependencies of your dependencies) can still drift + - Over weeks/months, `pip install -r requirements.txt` can produce different environments + + **The fundamental problem:** + ``` + Lab 1 approach: requirements.txt pins what YOU install + Problem: Doesn't pin what FLASK installs (Werkzeug, Click, etc.) + Result: Different machines = different transitive dependency versions + + Nix approach: Pins EVERYTHING in the entire dependency tree + Result: Bit-for-bit identical on all machines, forever + ``` + +4. **Understand Nix's caching behavior:** + + **Key insight:** Nix uses content-addressable storage: + ``` + Store path format: /nix/store/-- + Example: /nix/store/abc123xyz-devops-info-service-1.0.0 + + The is computed from: + - All source code + - All dependencies (transitively!) + - Build instructions + - Compiler flags + - Everything needed to reproduce the build + + Same inputs → Same hash → Reuse existing build (cache hit) + Different inputs → Different hash → New build required + ``` + +5. **Nix's guarantee:** + + ```bash + # Hash the entire Nix output + nix-hash --type sha256 result + ``` + + This hash will be **identical** on any machine, any time, forever - if the inputs don't change. + + This is why Nix can safely share binary caches (cache.nixos.org) - the hash proves the content! + +**📊 Comparison Table - Lab 1 vs Lab 18:** + +| Aspect | Lab 1 (pip + venv) | Lab 18 (Nix) | +|--------|-------------------|--------------| +| Python version | System-dependent | Pinned in derivation | +| Dependency resolution | Runtime (`pip install`) | Build-time (pure) | +| Reproducibility | Approximate (with lockfiles) | Bit-for-bit identical | +| Portability | Requires same OS + Python | Works anywhere Nix runs | +| Binary cache | No | Yes (cache.nixos.org) | +| Isolation | Virtual environment | Sandboxed build | +| Store path | N/A | Content-addressable hash | + +#### 1.5: Optional - Go Application (If you completed Lab 1 Bonus)
-💡 Hints +🎁 For students who built the Go version in Lab 1 Bonus -**IPFS Concepts:** -- **Content Addressing:** Files identified by hash of content, not location -- **CID:** Unique identifier derived from content hash (e.g., `QmXxx...` or `bafyxxx...`) -- **Pinning:** Marking content to keep it (prevent garbage collection) -- **Gateway:** HTTP interface to IPFS network +If you implemented the compiled language bonus in Lab 1, you can also build it with Nix: -**Run IPFS with Docker:** -```bash -docker run -d --name ipfs \ - -p 4001:4001 \ - -p 8080:8080 \ - -p 5001:5001 \ - ipfs/kubo:latest - -# Web UI at http://localhost:5001/webui -# Gateway at http://localhost:8080 -``` +1. **Copy your Go app:** + ```bash + mkdir -p labs/lab18/app_go + cp -r app_go/* labs/lab18/app_go/ + cd labs/lab18/app_go + ``` -**Add Content:** -```bash -# Create test file -echo "Hello IPFS from DevOps course!" > hello.txt +2. **Create `default.nix` for Go:** + ```nix + { pkgs ? import {} }: -# Add to IPFS -docker exec ipfs ipfs add /hello.txt -# Returns: added QmXxx... hello.txt + pkgs.buildGoModule { + pname = "devops-info-service-go"; + version = "1.0.0"; + src = ./.; -# Access via gateway -curl http://localhost:8080/ipfs/QmXxx... -``` + vendorHash = null; # or use pkgs.lib.fakeHash if you have dependencies + } + ``` -**Resources:** -- [IPFS Docs](https://docs.ipfs.tech/) -- [IPFS Concepts](https://docs.ipfs.tech/concepts/) +3. **Build and compare binary size:** + ```bash + nix-build + ls -lh result/bin/ + ``` + + Compare this with your multi-stage Docker build from Lab 2 Bonus!
+In `labs/submission18.md`, document: +- Installation steps and verification output +- Your `default.nix` file with explanations of each field +- Store path from multiple builds (prove they're identical) +- Comparison table: `pip install` vs Nix derivation +- Why does `requirements.txt` provide weaker guarantees than Nix? +- Screenshots showing your Lab 1 app running from Nix-built version +- Explanation of the Nix store path format and what each part means +- **Reflection:** How would Nix have helped in Lab 1 if you had used it from the start? + --- -### Task 2 — 4EVERLAND Setup (3 pts) +### Task 2 — Reproducible Docker Images (Revisiting Lab 2) (4 pts) + +**Objective:** Use Nix's `dockerTools` to containerize your DevOps Info Service and compare with your traditional Dockerfile from Lab 2. + +**Why This Matters:** In Lab 2, you created a `Dockerfile` that built your Python app. While Docker provides isolation, it's **not reproducible**: +- Build timestamps differ between builds +- Base image tags like `python:3.13-slim` can point to different versions over time +- `apt-get` installs latest packages, which change +- Two builds of the same Dockerfile can produce different image hashes + +Nix's `dockerTools` creates **truly reproducible** container images with content-addressable layers. + +#### 2.1: Review Your Lab 2 Dockerfile + +1. **Find your Dockerfile from Lab 2:** + + ```bash + # From repository root directory + cat app_python/Dockerfile + ``` + + You likely have something like: + ```dockerfile + FROM python:3.13-slim + RUN useradd -m appuser + WORKDIR /app + COPY requirements.txt . + RUN pip install -r requirements.txt + COPY app.py . + USER appuser + EXPOSE 5000 + CMD ["python", "app.py"] + ``` + +
+ 💡 Don't have your Lab 2 Dockerfile? + + If you lost your Lab 2 work, create a minimal Dockerfile now: + + ```dockerfile + FROM python:3.13-slim + WORKDIR /app + COPY requirements.txt app.py ./ + RUN pip install -r requirements.txt + EXPOSE 5000 + CMD ["python", "app.py"] + ``` + + Save as `app_python/Dockerfile`. + +
+ +2. **Test Lab 2 Dockerfile reproducibility:** + + ```bash + # Make sure you're in repository root + cd ~/path/to/DevOps-Core-Course # Adjust to your path + + # Build from app_python directory + docker build -t lab2-app:v1 ./app_python + docker inspect lab2-app:v1 | grep Created + + # Wait a few seconds, then rebuild + sleep 5 + docker build -t lab2-app:v2 ./app_python + docker inspect lab2-app:v2 | grep Created + ``` + + **Observation:** Different creation timestamps! The image hashes are different even though the content is identical. + +#### 2.2: Build Docker Image with Nix + +1. **Create a Nix Docker image using `dockerTools`:** + + Create `labs/lab18/app_python/docker.nix`: + +
+ 📚 Where to learn about dockerTools + + - [nix.dev - Building Docker images](https://nix.dev/tutorials/nixos/building-and-running-docker-images.html) + - [nixpkgs dockerTools documentation](https://ryantm.github.io/nixpkgs/builders/images/dockertools/) + + **Key concepts:** + - `pkgs.dockerTools.buildLayeredImage` - Builds efficient layered images + - `name` - Image name + - `tag` - Image tag (optional, defaults to latest) + - `contents` - Packages/derivations to include in the image + - `config.Cmd` - Default command to run + - `config.ExposedPorts` - Ports to expose + + **Critical for reproducibility:** + - **DO NOT** use `created = "now"` - this breaks reproducibility! + - **DO** use `created = "1970-01-01T00:00:01Z"` for reproducible builds + - **DO** use exact derivations (from Task 1) instead of arbitrary packages + + **Example structure:** + ```nix + { pkgs ? import {} }: + + let + app = import ./default.nix { inherit pkgs; }; + in + pkgs.dockerTools.buildLayeredImage { + name = "devops-info-service-nix"; + tag = "1.0.0"; + + contents = [ app ]; + + config = { + Cmd = [ "${app}/bin/devops-info-service" ]; + ExposedPorts = { + "5000/tcp" = {}; + }; + }; + + created = "1970-01-01T00:00:01Z"; # Reproducible timestamp + } + ``` + +
+ +2. **Build the Nix Docker image:** + + ```bash + cd labs/lab18/app_python + nix-build docker.nix + ``` + + This creates a tarball in `result`. + +3. **Load into Docker:** + + ```bash + docker load < result + ``` + + Output shows the image was loaded with a specific tag. + +4. **Run both containers side-by-side:** + + ```bash + # First, clean up any existing containers to avoid port conflicts + docker stop lab2-container nix-container 2>/dev/null || true + docker rm lab2-container nix-container 2>/dev/null || true + + # Run Lab 2 traditional Docker image on port 5000 + docker run -d -p 5000:5000 --name lab2-container lab2-app:v1 + + # Run Nix-built image on port 5001 (mapped to container's 5000) + docker run -d -p 5001:5000 --name nix-container devops-info-service-nix:1.0.0 + ``` + + Test both: + ```bash + curl http://localhost:5000/health # Lab 2 version + curl http://localhost:5001/health # Nix version + ``` + + Both should work identically! + + **Troubleshooting:** + - If port 5000 is in use: `lsof -i :5000` to find the process + - Container won't start: Check logs with `docker logs lab2-container` + - Permission denied: Make sure Docker daemon is running + +#### 2.3: Compare Reproducibility - Lab 2 vs Lab 18 + +**Test 1: Rebuild Reproducibility** -**Objective:** Set up 4EVERLAND account and explore the platform. +1. **Rebuild Nix image multiple times:** -**Requirements:** + ```bash + rm result + nix-build docker.nix + sha256sum result -1. **Create Account** - - Sign up at [4everland.org](https://www.4everland.org/) - - Connect with GitHub or wallet - - Explore dashboard + rm result + nix-build docker.nix + sha256sum result + ``` -2. **Understand Services** - - Hosting: Deploy websites/apps - - Storage: IPFS pinning - - Gateway: Access IPFS content + **Observation:** Identical SHA256 hashes! The tarball is bit-for-bit identical. -3. **Explore Free Tier** - - Understand limits and capabilities - - Review pricing for reference +2. **Compare with Lab 2 Dockerfile:** + + ```bash + # Make sure you're in repository root + # Build Lab 2 Dockerfile twice and compare saved image hashes + + docker build -t lab2-app:test1 ./app_python/ + docker save lab2-app:test1 | sha256sum + + sleep 2 # Wait a moment + + docker build -t lab2-app:test2 ./app_python/ + docker save lab2-app:test2 | sha256sum + ``` + + **Observation:** Different hashes! Even though the Dockerfile and source are identical, Lab 2's approach is not reproducible. + +**Test 2: Image Size Comparison** + +```bash +docker images | grep -E "lab2-app|devops-info-service-nix" +``` + +Create a comparison table: + +| Metric | Lab 2 Dockerfile | Lab 18 Nix dockerTools | +|--------|------------------|------------------------| +| Image size | ~150MB (with python:3.13-slim) | ~50-80MB (minimal closure) | +| Reproducibility | ❌ Different hashes each build | ✅ Identical hashes | +| Build caching | Layer-based (timestamp-dependent) | Content-addressable | +| Base image dependency | Yes (python:3.13-slim) | No base image needed | + +**Test 3: Layer Analysis** + +1. **Examine Lab 2 image layers:** + + ```bash + docker history lab2-app:v1 + ``` + + Note the timestamps in the "CREATED" column - they vary between builds! + +2. **Examine Nix image layers:** + + ```bash + docker history devops-info-service-nix:1.0.0 + ``` + + Nix uses content-addressable layers - same content = same layer hash. + +#### 2.4: Advanced Comparison - Multi-Stage Builds
-💡 Hints +🎁 Optional: Compare with Lab 2 Bonus Multi-Stage Build -**4EVERLAND Services:** -- **Hosting:** Deploy from Git repos, automatic builds -- **Bucket (Storage):** Upload files, get IPFS CIDs -- **Gateway:** Access content via 4everland.link +If you completed the Lab 2 bonus with Go and multi-stage builds, you can compare: -**Dashboard:** -- Projects: Your deployed sites -- Bucket: File storage -- Domains: Custom domain setup +**Your Lab 2 multi-stage Dockerfile:** +```dockerfile +FROM golang:1.22 AS builder +COPY . . +RUN go build -o app main.go -**Free Tier Includes:** -- 100 deployments/month -- 5GB storage -- 100GB bandwidth +FROM alpine:latest +COPY --from=builder /app/app /app +ENTRYPOINT ["/app"] +``` + +**Problems:** +- `golang:1.22` and `alpine:latest` change over time +- Build includes timestamps +- Not reproducible across machines + +**Nix equivalent (fully reproducible):** +```nix +pkgs.dockerTools.buildLayeredImage { + name = "go-app-nix"; + contents = [ goApp ]; # Built in Task 1.5 + config.Cmd = [ "${goApp}/bin/go-app" ]; + created = "1970-01-01T00:00:01Z"; +} +``` -**Resources:** -- [4EVERLAND Docs](https://docs.4everland.org/) +Same result size, but **fully reproducible**!
+**📊 Comprehensive Comparison - Lab 2 vs Lab 18:** + +| Aspect | Lab 2 Traditional Dockerfile | Lab 18 Nix dockerTools | +|--------|------------------------------|------------------------| +| **Base images** | `python:3.13-slim` (changes over time) | No base image (pure derivations) | +| **Timestamps** | Different on each build | Fixed or deterministic | +| **Package installation** | `pip install` at build time | Nix store paths (immutable) | +| **Reproducibility** | ❌ Same Dockerfile → Different images | ✅ Same docker.nix → Identical images | +| **Caching** | Layer-based (breaks on timestamp) | Content-addressable (perfect caching) | +| **Image size** | ~150MB+ with full base image | ~50-80MB with minimal closure | +| **Portability** | Requires Docker | Requires Nix (then loads to Docker) | +| **Security** | Base image vulnerabilities | Minimal dependencies, easier auditing | +| **Lab 2 Learning** | Best practices, non-root user | Build on Lab 2 knowledge | + +In `labs/submission18.md`, document: +- Your `docker.nix` file with explanations of each field +- Side-by-side comparison: Lab 2 Dockerfile vs Nix docker.nix +- SHA256 hash comparison proving Nix reproducibility +- Image size comparison table with analysis +- `docker history` output for both approaches +- Screenshots showing both containers running simultaneously +- **Analysis:** Why can't traditional Dockerfiles achieve bit-for-bit reproducibility? +- **Reflection:** If you could redo Lab 2 with Nix, what would you do differently? +- Practical scenarios where Nix's reproducibility matters (CI/CD, security audits, rollbacks) + --- -### Task 3 — Deploy Static Content (4 pts) +### Bonus Task — Modern Nix with Flakes (Includes Lab 10 Comparison) (2 pts) + +**Objective:** Modernize your Nix expressions using Flakes for better dependency locking and reproducibility. Compare Nix Flakes with Helm's version pinning approach from Lab 10. + +**Why This Matters:** Nix Flakes are the modern standard (2026) for Nix projects. They provide: +- Automatic dependency locking via `flake.lock` +- Standardized project structure +- Better reproducibility across time +- Easier sharing and collaboration + +**Comparison with Lab 10:** In Lab 10 (Helm), you used `values.yaml` to pin image versions. Flakes take this concept further by locking **all** dependencies, not just container images. + +#### Bonus.1: Convert to Flake + +1. **Create a `flake.nix`:** + + Create `labs/lab18/app_python/flake.nix`: + +
+ 📚 Where to learn about Flakes + + - [Zero to Nix - Flakes](https://zero-to-nix.com/concepts/flakes) + - [NixOS Wiki - Flakes](https://wiki.nixos.org/wiki/Flakes) + - [Nix Flakes explained](https://nix.dev/concepts/flakes) + + **Key structure:** + ```nix + { + description = "DevOps Info Service - Reproducible Build"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11"; # Pin exact nixpkgs version + }; + + outputs = { self, nixpkgs }: + let + # ⚠️ Architecture note: This example uses x86_64-linux + # - Works on: Linux (x86_64), WSL2 + # - Mac Intel: Change to "x86_64-darwin" + # - Mac M1/M2/M3: Change to "aarch64-darwin" + # - For multi-system support, see: https://github.com/numtide/flake-utils + system = "x86_64-linux"; + pkgs = nixpkgs.legacyPackages.${system}; + in + { + packages.${system} = { + default = import ./default.nix { inherit pkgs; }; + dockerImage = import ./docker.nix { inherit pkgs; }; + }; + + # Development shell with all dependencies + devShells.${system}.default = pkgs.mkShell { + buildInputs = with pkgs; [ + python313 + python313Packages.flask # or fastapi + ]; + }; + }; + } + ``` + + **Platform-specific adjustments:** + - **Linux/WSL2**: Use `system = "x86_64-linux";` (shown above) + - **Mac Intel**: Use `system = "x86_64-darwin";` + - **Mac ARM (M1/M2/M3)**: Use `system = "aarch64-darwin";` + + **Hint:** Use `nix flake init` to generate a template, then modify it. + +
+ +2. **Generate lock file:** + + ```bash + cd labs/lab18/app_python + nix flake update + ``` + + This creates `flake.lock` with pinned dependencies. + +3. **Build using flake:** + + ```bash + nix build # Builds default package + nix build .#dockerImage # Builds Docker image + ./result/bin/devops-info-service # Run the app + ``` + +#### Bonus.2: Compare with Lab 10 Helm Values + +**Lab 10 Helm approach to version pinning:** + +In `k8s/mychart/values.yaml`: +```yaml +image: + repository: yourusername/devops-info-service + tag: "1.0.0" # Pin specific version + pullPolicy: IfNotPresent + +# Environment-specific overrides +# values-prod.yaml: +image: + tag: "1.0.0" # Explicit version for prod +``` + +**Limitations:** +- Only pins the container image tag +- Doesn't lock Python dependencies inside the image +- Doesn't lock Helm chart dependencies +- Image tag `1.0.0` could point to different content if rebuilt + +**Nix Flakes approach:** + +`flake.lock` locks **everything**: +```json +{ + "nodes": { + "nixpkgs": { + "locked": { + "lastModified": 1704321342, + "narHash": "sha256-abc123...", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "52e3e80afff4b16ccb7c52e9f0f5220552f03d04", + "type": "github" + } + } + } +} +``` + +This locks: +- ✅ Exact nixpkgs revision (all 80,000+ packages) +- ✅ Python version and all dependencies +- ✅ Build tools and compilers +- ✅ Everything in the closure + +**Combined Approach:** + +You can use both together! +1. Build reproducible image with Nix: `nix build .#dockerImage` +2. Load to Docker and tag: `docker load < result` +3. Reference in Helm with content hash: `image.tag: "sha256-abc123..."` + +This gives you: +- Helm's declarative Kubernetes deployment +- Nix's perfect reproducibility for the image + +Create a comparison table in your submission. + +#### Bonus.3: Test Cross-Machine Reproducibility + +1. **Commit your flake to git:** + + ```bash + git add flake.nix flake.lock default.nix docker.nix + git commit -m "feat: add Nix flake for reproducible builds" + git push + ``` + +2. **Test on another machine or ask a classmate:** + + ```bash + # Build directly from GitHub + nix build github:yourusername/DevOps-Core-Course?dir=labs/lab18/app_python#default + ``` + +3. **Compare store paths:** + + ```bash + readlink result + ``` + + Both machines should get **identical store paths** - same hash, same content! + +#### Bonus.4: Add Development Shell + +1. **Enter the dev shell:** + + ```bash + nix develop + ``` + + This gives you an isolated environment with exact Python version and dependencies. -**Objective:** Deploy a static site to 4EVERLAND. +2. **Compare with Lab 1 virtual environment:** -**Requirements:** + **Lab 1 approach:** + ```bash + python -m venv venv + source venv/bin/activate + pip install -r requirements.txt + ``` -1. **Use the Provided Static Site** - - A course landing page is provided at `labs/lab18/index.html` - - Review the HTML/CSS to understand the structure - - You may customize it or create your own + **Lab 18 Nix approach:** + ```bash + nix develop + # Python and all dependencies instantly available + # Same environment on every machine + ``` -2. **Deploy via 4EVERLAND** - - Connect your GitHub repository - - Configure build settings - - Deploy to IPFS via 4EVERLAND +3. **Try it:** -3. **Verify Deployment** - - Access via 4EVERLAND URL - - Access via IPFS gateway - - Note the CID + ```bash + nix develop + python --version # Exact pinned version + python -c "import flask; print(flask.__version__)" + ``` -4. **Test Permanence** - - Understand that content with same hash = same CID - - Make a change, redeploy, observe new CID + Exit and enter again - same versions, always! + +**📊 Dependency Management Comparison:** + +| Aspect | Lab 1 (venv + requirements.txt) | Lab 10 (Helm values.yaml) | Lab 18 (Nix Flakes) | +|--------|--------------------------------|---------------------------|---------------------| +| **Locks Python version** | ❌ Uses system Python | ❌ Uses image Python | ✅ Pinned in flake | +| **Locks dependencies** | ⚠️ Approximate (versions drift) | ❌ Only image tag | ✅ Exact hashes | +| **Locks build tools** | ❌ No | ❌ No | ✅ Yes | +| **Reproducibility** | ⚠️ Probabilistic | ⚠️ Tag-based | ✅ Cryptographic | +| **Cross-machine** | ❌ Varies | ⚠️ Depends on image | ✅ Identical | +| **Dev environment** | ✅ Yes (venv) | ❌ No | ✅ Yes (nix develop) | +| **Time-stable** | ❌ Packages update | ⚠️ Tags can change | ✅ Locked forever | + +In `labs/submission18.md`, document: +- Your complete `flake.nix` with explanations +- `flake.lock` snippet showing locked dependencies (especially nixpkgs revision) +- Build outputs from `nix build` +- Proof that builds are identical across machines/time +- Dev shell experience: Compare `nix develop` vs Lab 1's `venv` +- Comparison with Lab 10 Helm values.yaml approach (Bonus.2) +- **Reflection:** How do Flakes improve upon traditional dependency management? +- Practical scenarios where flake.lock prevented a "works on my machine" problem + +--- + +## Troubleshooting Common Issues
-💡 Hints - -**Provided Static Site:** -The course provides a beautiful landing page at `labs/lab18/index.html` that you can deploy. It includes: -- Modern responsive design -- Course curriculum overview -- Learning roadmap -- "Deployed on IPFS" badge - -**Deployment Steps:** -1. Go to 4EVERLAND Dashboard → Hosting -2. Click "New Project" -3. Import from GitHub -4. Select your repository and branch -5. Configure: - - Framework: None (static) - - Build command: (leave empty for static) - - Output directory: `labs/lab18` (or root if you moved the file) -6. Deploy - -**Alternative: Create Your Own** -You can also create your own static site. Keep it simple: -```html - - - - My DevOps Portfolio - - -

Welcome to My DevOps Journey

-

Deployed on IPFS via 4EVERLAND

- - +🔧 Python app doesn't run: "command not found" or "No such file or directory" + +**Problem:** Your `app.py` doesn't have a shebang line and isn't being wrapped with Python interpreter. + +**Solution:** Ensure you're using `makeWrapper` in your `default.nix`: + +```nix +nativeBuildInputs = [ pkgs.makeWrapper ]; + +installPhase = '' + mkdir -p $out/bin + cp app.py $out/bin/devops-info-service + + wrapProgram $out/bin/devops-info-service \ + --prefix PYTHONPATH : "$PYTHONPATH" +''; ``` -**Access URLs:** -- 4EVERLAND: `https://your-project.4everland.app` -- IPFS Gateway: `https://ipfs.4everland.link/ipfs/CID` +Alternatively, add a shebang to your `app.py`: +```python +#!/usr/bin/env python3 +```
---- +
+🔧 "error: hash mismatch in fixed-output derivation" + +**Problem:** The hash you specified doesn't match the actual content. + +**Solution:** +1. Use `pkgs.lib.fakeHash` initially to get the correct hash +2. Nix will fail and tell you the expected hash +3. Replace `fakeHash` with the correct hash from the error message + +Example: +```nix +vendorHash = pkgs.lib.fakeHash; # Start with this +# Error will say: "got: sha256-abc123..." +# Then use: vendorHash = "sha256-abc123..."; +``` -### Task 4 — IPFS Pinning (4 pts) +
-**Objective:** Use 4EVERLAND's storage (Bucket) for IPFS pinning. +
+🔧 Docker image doesn't load or fails to run -**Requirements:** +**Common causes:** -1. **Upload Files to Bucket** - - Upload multiple files (images, documents, etc.) - - Get CIDs for each file +1. **Image tarball not built:** Check `result` is a `.tar.gz` file + ```bash + file result + # Should show: gzip compressed data + ``` -2. **Create a Directory Structure** - - Upload a folder with multiple files - - Understand directory CIDs +2. **Wrong Cmd path:** Verify the app path in docker.nix + ```nix + config.Cmd = [ "${app}/bin/devops-info-service" ]; + # Make sure this matches your installPhase output + ``` -3. **Access via Multiple Gateways** - - Access your content via: - - 4EVERLAND gateway - - Public IPFS gateways (ipfs.io, dweb.link) - - Understand gateway differences +3. **Missing dependencies in image:** Add required packages to `contents` + ```nix + contents = [ app pkgs.coreutils ]; # Add tools if needed + ``` -4. **Verify Pinning** - - Confirm content is pinned - - Understand pinning vs local storage +
-💡 Hints +🔧 Port conflicts when running containers -**Bucket Upload:** -1. Dashboard → Bucket -2. Create new bucket -3. Upload files or folders -4. Get CID from file details +**Problem:** Port 5000 or 5001 already in use. -**Multiple Gateways:** +**Solution:** ```bash -# 4EVERLAND -https://ipfs.4everland.link/ipfs/QmXxx... - -# IPFS.io -https://ipfs.io/ipfs/QmXxx... +# Find what's using the port +lsof -i :5000 -# Cloudflare -https://cloudflare-ipfs.com/ipfs/QmXxx... +# Stop old containers +docker stop $(docker ps -aq) 2>/dev/null -# DWeb.link -https://dweb.link/ipfs/QmXxx... +# Or use different ports +docker run -d -p 5002:5000 --name my-container my-image ``` -**Directory Upload:** -- Upload entire folder -- Get directory CID -- Access files: `gateway/ipfs/DirCID/filename` +
+ +
+🔧 Flakes don't work: "experimental features" error -**Pinning Importance:** -- Unpinned content may be garbage collected -- Pinning services keep content available -- Multiple pins = more redundancy +**Problem:** Flakes not enabled in your Nix configuration. + +**Solution:** +```bash +# Check if flakes are enabled +nix flake --help + +# If error, enable flakes: +mkdir -p ~/.config/nix +echo "experimental-features = nix-command flakes" >> ~/.config/nix/nix.conf + +# Restart terminal +```
---- +
+🔧 Build fails on macOS: "unsupported system" -### Task 5 — IPNS & Updates (3 pts) +**Problem:** Flake hardcodes `x86_64-linux` but you're on macOS. -**Objective:** Understand mutable content with IPNS. +**Solution:** Change the system in `flake.nix`: +```nix +# For Mac Intel: +system = "x86_64-darwin"; -**Requirements:** +# For Mac M1/M2/M3: +system = "aarch64-darwin"; +``` -1. **Understand IPNS** - - IPFS = immutable (content changes = new CID) - - IPNS = mutable pointer to IPFS content - - IPNS name stays same, content can change +
-2. **Explore 4EVERLAND Domains** - - Custom domains for your deployment - - How 4EVERLAND handles updates +
+🔧 "cannot build derivation: no builder for this system" + +**Problem:** Trying to build Linux binaries on macOS or vice versa. -3. **Update Deployment** - - Make changes to your static site - - Redeploy - - Observe: same URL, new CID +**Solution:** Either: +1. Match your system architecture in the flake +2. Use Docker builds which work cross-platform +3. Use Nix's cross-compilation features (advanced) + +
-💡 Hints +🔧 Don't have Lab 1/2 artifacts to use + +**No problem!** Create a minimal example: -**IPFS vs IPNS:** -- **IPFS CID:** `QmXxx...` - changes when content changes -- **IPNS Name:** `/ipns/k51xxx...` - stays same, points to current CID +1. **Create simple Flask app:** + ```python + # app.py + from flask import Flask, jsonify + app = Flask(__name__) -**4EVERLAND Handles This:** -- Your project URL stays constant -- Behind scenes, updates the IPNS pointer -- Users always get latest version + @app.route('/health') + def health(): + return jsonify({"status": "healthy"}) -**Domain Configuration:** -1. Dashboard → Hosting → Your Project -2. Settings → Domains -3. Add custom domain or use provided subdomain + if __name__ == '__main__': + app.run(host='0.0.0.0', port=5000) + ``` + +2. **Create requirements.txt:** + ``` + flask + ``` + +3. **Create basic Dockerfile:** + ```dockerfile + FROM python:3.13-slim + WORKDIR /app + COPY requirements.txt app.py ./ + RUN pip install -r requirements.txt + EXPOSE 5000 + CMD ["python", "app.py"] + ``` + +Now you can proceed with the lab using these minimal examples!
--- -### Task 6 — Documentation & Analysis (3 pts) +## How to Submit -**Objective:** Document your work and analyze decentralized hosting. +1. Create a branch for this lab and push it: -**Create `4EVERLAND.md` with:** + ```bash + git switch -c feature/lab18 + # create labs/submission18.md with your findings + git add labs/submission18.md labs/lab18/ + git commit -m "docs: add lab18 submission - Nix reproducible builds" + git push -u origin feature/lab18 + ``` -1. **Deployment Summary** - - What you deployed - - URLs (4EVERLAND and IPFS gateways) - - CIDs obtained +2. **Open a PR (GitHub) or MR (GitLab)** from your fork's `feature/lab18` branch → **course repository's main branch**. -2. **Screenshots** - - 4EVERLAND dashboard - - Deployed site - - Bucket storage - - Multiple gateway access +3. In the PR/MR description, include: -3. **Centralized vs Decentralized Comparison** + ```text + Platform: [GitHub / GitLab] -| Aspect | Traditional Hosting | IPFS/4EVERLAND | -|--------|---------------------|----------------| -| Content addressing | | | -| Single point of failure | | | -| Censorship resistance | | | -| Update mechanism | | | -| Cost model | | | -| Speed/latency | | | -| Best use cases | | | + - [x] Task 1 — Build Reproducible Artifacts from Scratch (6 pts) + - [x] Task 2 — Reproducible Docker Images with Nix (4 pts) + - [ ] Bonus Task — Modern Nix with Flakes (2 pts) [if completed] + ``` -4. **Use Case Analysis** - - When decentralized hosting makes sense - - When traditional hosting is better - - Your recommendations +4. **Copy the PR/MR URL** and submit it via **Moodle before the deadline**. --- -## Checklist +## Acceptance Criteria -- [ ] IPFS concepts understood -- [ ] Local IPFS node running -- [ ] Content added to local IPFS -- [ ] 4EVERLAND account created -- [ ] Static site deployed via 4EVERLAND -- [ ] Files uploaded to Bucket -- [ ] Content accessed via multiple gateways -- [ ] IPNS/updates understood -- [ ] `4EVERLAND.md` documentation complete -- [ ] Comparison analysis complete +- ✅ Branch `feature/lab18` exists with commits for each task +- ✅ File `labs/submission18.md` contains required outputs and analysis for all completed tasks +- ✅ Directory `labs/lab18/` contains your application code and Nix expressions +- ✅ Nix derivations successfully build reproducible artifacts +- ✅ Docker image built with Nix and compared to traditional Dockerfile +- ✅ Hash comparisons prove reproducibility +- ✅ **Bonus (if attempted):** `flake.nix` and `flake.lock` present and working +- ✅ PR/MR from `feature/lab18` → **course repo main branch** is open +- ✅ PR/MR link submitted via Moodle before the deadline --- -## Rubric - -| Criteria | Points | -|----------|--------| -| **IPFS Fundamentals** | 3 pts | -| **4EVERLAND Setup** | 3 pts | -| **Static Deployment** | 4 pts | -| **IPFS Pinning** | 4 pts | -| **IPNS & Updates** | 3 pts | -| **Documentation** | 3 pts | -| **Total** | **20 pts** | +## Rubric (12 pts max) -**Grading:** -- **18-20:** Excellent understanding, thorough deployment, insightful analysis -- **16-17:** Working deployment, good documentation -- **14-15:** Basic deployment, incomplete analysis -- **<14:** Incomplete deployment +| Criterion | Points | +| --------------------------------------------------- | -----: | +| Task 1 — Build Reproducible Artifacts from Scratch | **6** | +| Task 2 — Reproducible Docker Images with Nix | **4** | +| Bonus Task — Modern Nix with Flakes | **2** | +| **Total** | **12** | --- -## Resources +## Guidelines + +- Use clear Markdown headers to organize sections in `submission18.md` +- Include command outputs and written analysis for each task +- Explain WHY Nix provides better reproducibility than traditional tools +- Compare before/after results when proving reproducibility +- Document challenges encountered and how you solved them +- Include code snippets with explanations, not just paste
-📚 IPFS Documentation +📚 Helpful Resources + +**Official Documentation:** +- [nix.dev - Official tutorials](https://nix.dev/) +- [Zero to Nix - Beginner-friendly guide](https://zero-to-nix.com/) +- [Nix Pills - Deep dive](https://nixos.org/guides/nix-pills/) +- [NixOS Package Search](https://search.nixos.org/) + +**Docker with Nix:** +- [Building Docker images - nix.dev](https://nix.dev/tutorials/nixos/building-and-running-docker-images.html) +- [dockerTools reference](https://ryantm.github.io/nixpkgs/builders/images/dockertools/) + +**Flakes:** +- [Nix Flakes - NixOS Wiki](https://wiki.nixos.org/wiki/Flakes) +- [Flakes - Zero to Nix](https://zero-to-nix.com/concepts/flakes) +- [Practical Nix Flakes](https://serokell.io/blog/practical-nix-flakes) -- [IPFS Docs](https://docs.ipfs.tech/) -- [IPFS Concepts](https://docs.ipfs.tech/concepts/) -- [Content Addressing](https://docs.ipfs.tech/concepts/content-addressing/) -- [IPNS](https://docs.ipfs.tech/concepts/ipns/) +**Community:** +- [awesome-nix - Curated resources](https://github.com/nix-community/awesome-nix) +- [NixOS Discourse](https://discourse.nixos.org/)
-🌐 4EVERLAND +💡 Nix Tips -- [4EVERLAND Docs](https://docs.4everland.org/) -- [Hosting Guide](https://docs.4everland.org/hosting/overview) -- [Bucket (Storage)](https://docs.4everland.org/storage/bucket) +1. **Store paths are content-addressable:** Same inputs = same output hash +2. **Use `nix-shell -p pkg` for quick testing** before adding to derivations +3. **Garbage collect unused builds:** `nix-collect-garbage -d` +4. **Search for packages:** `nix search nixpkgs golang` +5. **Read error messages carefully:** Nix errors are verbose but informative +6. **Use `lib.fakeHash` initially** when you don't know the hash yet +7. **Avoid network access in builds:** Nix sandboxes block network by default +8. **Pin nixpkgs version** for maximum reproducibility
-🔗 Public Gateways +🔧 Troubleshooting + +**If Nix installation fails:** +- Ensure you have multi-user support (daemon mode recommended) +- Check `/nix` directory permissions +- Try the Determinate Systems installer instead of official + +**If builds fail with "hash mismatch":** +- Update the hash in your derivation to match the error message +- Use `lib.fakeHash` to discover the correct hash + +**If Docker load fails:** +- Verify result is a valid tarball: `file result` +- Check Docker daemon is running: `docker info` +- Try `docker load -i result` instead of `docker load < result` + +**If flakes don't work:** +- Ensure experimental features are enabled in `~/.config/nix/nix.conf` +- Run `nix flake check` to validate flake syntax +- Make sure your flake is in a git repository -- [IPFS Gateway Checker](https://ipfs.github.io/public-gateway-checker/) -- [Gateway List](https://docs.ipfs.tech/concepts/ipfs-gateway/#gateway-providers) +**If cross-machine builds differ:** +- Check nixpkgs input is locked in `flake.lock` +- Verify both machines use same Nix version +- Ensure no `created = "now"` or timestamps in image builds
---- +
+🎯 Understanding Reproducibility + +**What makes a build reproducible?** +- ✅ Deterministic inputs (exact versions, hashes) +- ✅ Isolated environment (no system dependencies) +- ✅ No timestamps or random values +- ✅ Same compiler, same flags, same libraries +- ✅ Content-addressable storage + +**Why traditional tools fail:** +```bash +# Docker - timestamps in layers +docker build . # Different timestamp = different image hash + +# npm - lockfiles help but aren't perfect +npm install # Still uses local cache, system libraries + +# apt/yum - version drift +apt-get install nodejs # Gets different version next week +``` -**Good luck!** 🌐 +**How Nix succeeds:** +```bash +# Nix - pure, sandboxed, content-addressed +nix-build # Same inputs = bit-for-bit identical output + # Today, tomorrow, on any machine +``` + +**Real-world impact:** +- **CI/CD:** No more "works on my machine" +- **Security:** Audit exact dependency tree +- **Rollback:** Atomic updates with perfect rollbacks +- **Collaboration:** Everyone gets identical environment + +
+ +
+🌟 Advanced Concepts (Optional Reading) + +**Content-Addressable Store:** +- Every package has a unique hash based on its inputs +- `/nix/store/abc123...` where `abc123` = hash of inputs +- Same inputs = same hash = reuse existing build + +**Sandboxing:** +- Builds run in isolated namespaces +- No network access (except for fixed-output derivations) +- No access to `/home`, `/tmp`, or system paths +- Only declared dependencies are available + +**Lazy Evaluation:** +- Nix expressions are lazily evaluated +- Only builds what's actually needed +- Enables massive codebase (all of nixpkgs) without performance issues + +**Binary Cache:** +- cache.nixos.org provides pre-built binaries +- If your build matches a cached hash, download instead of rebuild +- Set up private caches for your team + +**Cross-Compilation:** +- Nix makes cross-compilation trivial +- `pkgs.pkgsCross.aarch64-multiplatform.hello` +- Same reproducibility guarantees across architectures -> **Remember:** Decentralized hosting trades some convenience for resilience and censorship resistance. Content-addressed storage ensures integrity - the same content always has the same identifier. +
diff --git a/labs/lab18/index.html b/labs/lab18/index.html deleted file mode 100644 index b3de65bc8b..0000000000 --- a/labs/lab18/index.html +++ /dev/null @@ -1,927 +0,0 @@ - - - - - - DevOps Core Course | Production-Grade Practices - - - - - - - -
- -
- -
-
-
-
-
- 2026 Edition — 7th Year — Evolved every semester -
-

Master Production-Grade DevOps Practices

-

16 lectures and hands-on labs covering Kubernetes, GitOps, CI/CD, Monitoring, and beyond. 18 weeks of learning to build real-world skills.

- -
-
-
- -
-
-
-
7
-
Years Running
-
-
-
1000+
-
Students Trained
-
-
-
16
-
Lectures & Labs
-
-
-
18
-
Weeks of Learning
-
-
-
- -
-
-

Why This Course?

-

Build production-ready skills through hands-on practice with tools used by top tech companies worldwide.

-
-
-
-
-

Cloud-Native Architecture

-

Master Kubernetes, Helm, StatefulSets, and container orchestration for scalable deployments.

-
-
-
-

GitOps & Automation

-

Implement ArgoCD, Argo Rollouts, and progressive delivery for safe, automated deployments.

-
-
-
🔒
-

Security & Secrets

-

Learn HashiCorp Vault, Kubernetes Secrets, and secure configuration management practices.

-
-
-
📊
-

Observability

-

Build monitoring stacks with Prometheus, Grafana, Loki, and implement effective alerting.

-
-
-
-

Infrastructure as Code

-

Automate infrastructure with Terraform and Ansible for reproducible environments.

-
-
-
🌐
-

Beyond Kubernetes

-

Explore edge computing with Fly.io and decentralized hosting with IPFS and 4EVERLAND.

-
-
-
- -
-
-

Lectures & Labs

-

16 lectures with corresponding hands-on labs, plus 2 bonus labs as exam alternatives.

-
-
-
-
01
-
-

Web Application Development

-

Python/Go, Best Practices

-
-
-
-
02
-
-

Containerization

-

Docker, Multi-stage Builds

-
-
-
-
03
-
-

Continuous Integration

-

GitHub Actions, Snyk

-
-
-
-
04
-
-

Infrastructure as Code

-

Terraform, Cloud Providers

-
-
-
-
05
-
-

Configuration Management

-

Ansible Basics

-
-
-
-
06
-
-

Continuous Deployment

-

Ansible Advanced

-
-
-
-
07
-
-

Logging

-

Promtail, Loki, Grafana

-
-
-
-
08
-
-

Monitoring

-

Prometheus, Grafana

-
-
-
-
09
-
-

Kubernetes Basics

-

Minikube, Deployments, Services

-
-
-
-
10
-
-

Helm Charts

-

Templating, Hooks

-
-
-
-
11
-
-

Secrets Management

-

K8s Secrets, HashiCorp Vault

-
-
-
-
12
-
-

Configuration & Storage

-

ConfigMaps, PVCs

-
-
-
-
13
-
-

GitOps

-

ArgoCD

-
-
-
-
14
-
-

Progressive Delivery

-

Argo Rollouts

-
-
-
-
15
-
-

StatefulSets

-

Persistent Storage, Headless Services

-
-
-
-
16
-
-

Cluster Monitoring

-

Kube-Prometheus, Init Containers

-
-
-
-
17
-
-

Fly.io Edge Deployment

-

Global Distribution, PaaS

- Exam Alternative -
-
-
-
18
-
-

4EVERLAND & IPFS

-

Decentralized Hosting

- Exam Alternative -
-
-
-
- -
-
-

Learning Roadmap

-

A structured 16-week journey from foundations to advanced production patterns, plus 2 weeks for bonus labs or exam preparation.

-
-
-
-
- Phase - 1 -
-
-

Foundations (Weeks 1-6)

-

Build core skills in containerization, CI/CD, and infrastructure automation.

-
- Docker - GitHub Actions - Terraform - Ansible -
-
-
-
-
- Phase - 2 -
-
-

Observability (Weeks 7-8)

-

Master logging and monitoring for production visibility.

-
- Prometheus - Grafana - Loki - Alerting -
-
-
-
-
- Phase - 3 -
-
-

Kubernetes Core (Weeks 9-12)

-

Deep dive into Kubernetes orchestration and package management.

-
- Kubernetes - Helm - Secrets - ConfigMaps -
-
-
-
-
- Phase - 4 -
-
-

Advanced Patterns (Weeks 13-16)

-

Implement GitOps, progressive delivery, stateful workloads, and production monitoring.

-
- ArgoCD - Argo Rollouts - StatefulSets - Vault -
-
-
-
-
- Bonus - +2 -
-
-

Bonus Labs / Exam Prep (Weeks 17-18)

-

Complete exam alternative labs or prepare for the final exam.

-
- Fly.io - IPFS - 4EVERLAND - Edge Computing -
-
-
-
-
- -
-
-

Ready to Start Your DevOps Journey?

-

Join 1000+ students who have built production-ready skills through this battle-tested curriculum.

- - Get Started Free → - -
-
-
- -
-
-

© 2020–2026 DevOps Core Course. 7 years of continuous improvement. Open source educational content.

- -
-
- -
-
🌐
-
- Deployed on
- IPFS via 4EVERLAND -
-
- -