From e8ea2241a45b7c571bc41c3e88d838281b54efdb Mon Sep 17 00:00:00 2001 From: Annie Tallund Date: Wed, 20 Aug 2025 08:37:10 +0200 Subject: [PATCH 1/5] Update test workflows - Add data directory to .gitignore - Verify fields in _index.md files for LPs - Remove unused workflows and scripts --- .github/workflows/external-links.yml | 39 --------- .github/workflows/maintenance.yml | 108 ------------------------ .github/workflows/profanity.yml | 39 --------- .github/workflows/test-lp.yml | 7 +- .gitignore | 3 +- .profanity_ignore.yml | 12 --- tools/profanity.py | 39 --------- tools/verify_index_fields.py | 121 +++++++++++++++++++++++++++ 8 files changed, 129 insertions(+), 239 deletions(-) delete mode 100644 .github/workflows/external-links.yml delete mode 100644 .github/workflows/maintenance.yml delete mode 100644 .github/workflows/profanity.yml delete mode 100644 .profanity_ignore.yml delete mode 100644 tools/profanity.py create mode 100644 tools/verify_index_fields.py diff --git a/.github/workflows/external-links.yml b/.github/workflows/external-links.yml deleted file mode 100644 index b0601c0f6c..0000000000 --- a/.github/workflows/external-links.yml +++ /dev/null @@ -1,39 +0,0 @@ - -name: external-links - -# Controls when the workflow will run -on: - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - # This workflow contains a single job called "build" - check-external-links: - # The type of runner that the job will run on - runs-on: ubuntu-latest - - # Steps represent a sequence of tasks that will be executed as part of the job - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - submodules: true - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: 3.11.3 - - - name: Check external HTML links - continue-on-error: true - run: | - pip install linkchecker - linkchecker --no-robots --config .linkcheckerrc --check-extern https://learn.arm.com/learning-paths/laptops-and-desktops/ - linkchecker --no-robots --config .linkcheckerrc --check-extern https://learn.arm.com/learning-paths/servers-and-cloud-computing/ - linkchecker --no-robots --config .linkcheckerrc --check-extern https://learn.arm.com/learning-paths/mobile-graphics-and-gaming/ - linkchecker --no-robots --config .linkcheckerrc --check-extern https://learn.arm.com/learning-paths/embedded-and-microcontrollers/ - linkchecker --no-robots --config .linkcheckerrc --check-extern https://learn.arm.com/learning-paths/iot/ - linkchecker --no-robots --config .linkcheckerrc --check-extern https://learn.arm.com/learning-paths/automotive/ diff --git a/.github/workflows/maintenance.yml b/.github/workflows/maintenance.yml deleted file mode 100644 index 9edac85b41..0000000000 --- a/.github/workflows/maintenance.yml +++ /dev/null @@ -1,108 +0,0 @@ -# This workflow check instructions in articles - -name: Maintenance workflow - -# Controls when the action will run. -on: - # Triggers the workflow on the 1st every month. Disable for now - #schedule: - # - cron: '* * 1 * *' - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - # Generate report of outdated articles - report: - # The type of runner that the job will run on - runs-on: ubuntu-latest - # Steps represent a sequence of tasks that will be executed as part of the job - steps: - # Check out repo and the whole history - - name: Check out repository code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - # Report outdated files, store result as artifact and updates stats - - name: Report outdated files of more than 20 days - run: | - pip install -r tools/requirements.txt - python3 tools/maintenance.py -r 20 - - # Upload report as artifact - - name: Upload artifact - uses: actions/upload-artifact@v4 - with: - name: outdated - path: outdated_files.csv - - # Test content of outdated articles - test: - # The type of runner that the job will run on - runs-on: self-hosted - # Depends on report generated in previous step - needs: report - - # Steps represent a sequence of tasks that will be executed as part of the job - steps: - # Check out repo - - name: Check out repository code - uses: actions/checkout@v4 - - # Download list of outdated files to test - - name: Download a single artifact - uses: actions/download-artifact@v4 - with: - name: outdated - - # Run tests for install guides - - name: Test commands and output reports for intall-guides - run: | - for i in $(tree -i -f content/install-guides/ | tail -n +2 | grep ".md$"); do - python3 tools/maintenance.py -i $i -l ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - done - - # Run tests for learning paths - #- name: Test commands and output reports for learning paths - # run: | - # for i in $(tree -i -d -f content/learning-paths/ | tail -n +2); do - # python3 tools/maintenance.py -i $i -l ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - # done - - # Move reports to result folder - - name: Move reports - run: | - mkdir junit-reports - for i in $(tree -i -f | grep "_cmd.xml"); do - mv $i junit-reports/ - done - - # Upload test reports as artifact - - name: Upload artifacts - uses: actions/upload-artifact@v4 - with: - name: results - path: junit-reports/*_cmd.xml - - # Publish Junit reports - - name: Publish Test Reports - id: junit - uses: mikepenz/action-junit-report@v3 - if: always() # always run even if the previous step fails - with: - report_paths: '**/junit-reports/*_cmd.xml' - detailed_summary: true - include_passed: true - - # Commit and push changes - - name: Commit test status - run: | - git config user.name github-actions - git config user.email github-actions@github.com - git ls-files --modified | xargs git add - git pull - if git commit -m "Add test status"; then - git push - fi diff --git a/.github/workflows/profanity.yml b/.github/workflows/profanity.yml deleted file mode 100644 index 025b508c77..0000000000 --- a/.github/workflows/profanity.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: profanity - -# Controls when the workflow will run -on: - - workflow_dispatch: - - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - profanity_scan: - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - submodules: true # Fetch Hugo Themes - fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: 3.11.3 - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install better_profanity - - name: Scan for profanities - run: | - python tools/profanity.py - cat profanity_log.txt - - - name: Export profanities - uses: actions/upload-artifact@v4 - with: - name: profanities - path: profanity_log.txt - retention-days: 5 diff --git a/.github/workflows/test-lp.yml b/.github/workflows/test-lp.yml index fd187b6ef5..ab04f2084b 100644 --- a/.github/workflows/test-lp.yml +++ b/.github/workflows/test-lp.yml @@ -2,7 +2,7 @@ name: Test Learning Path on: pull_request env: HUGO_VERSION: 0.130.0 - + jobs: Test-Pull-Request: runs-on: ubuntu-24.04-arm @@ -58,6 +58,11 @@ jobs: - name: Install dependencies if: steps.changed-markdown-files.outputs.any_changed == 'true' run: pip install -r tools/requirements.txt + - name: Validate _index.md files + if: steps.changed-markdown-files.outputs.any_changed == 'true' + run: | + echo "Checking YAML fields for changed learning paths..." + python3 tools/validate_index_fields.py ${{ steps.changed-markdown-files.outputs.all_changed_files }} - name: Run test suite for all changed .md files id: run-suite if: steps.changed-markdown-files.outputs.any_changed == 'true' diff --git a/.gitignore b/.gitignore index c92386632d..0022932a69 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ package-lock.json .vscode .env startup.sh +data/ # macOS files *.DS_Store @@ -22,4 +23,4 @@ z_local_saved/ *.xml # CTags symbol index -tags +tags \ No newline at end of file diff --git a/.profanity_ignore.yml b/.profanity_ignore.yml deleted file mode 100644 index 8cde2313b4..0000000000 --- a/.profanity_ignore.yml +++ /dev/null @@ -1,12 +0,0 @@ -XX --kill -kill -KVM -#IO_L3N_T0_DQS_AD1N_35 -172.X.X.X -naked -(x=x -**VM -Kill -slave -Slave diff --git a/tools/profanity.py b/tools/profanity.py deleted file mode 100644 index 39c495bee1..0000000000 --- a/tools/profanity.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -from better_profanity import profanity - -def load_excluded_words(file_path): - with open(file_path, 'r') as f: - excluded_words = [word.strip() for word in f.readlines()] - return excluded_words - -def scan_for_profanities(directory, log_file, excluded_words_file=None): - exclude_words = None - if excluded_words_file: - exclude_words = load_excluded_words(excluded_words_file) - - with open(log_file, 'w') as f: - for root, dirs, files in os.walk(directory): - for file in files: - if file.endswith('.md'): # Read only markdown files - file_path = os.path.join(root, file) - with open(file_path, 'r') as code_file: - content = code_file.read() - if exclude_words: - excluded_content = content - for word in exclude_words: - excluded_content = excluded_content.replace(word, '') - if profanity.contains_profanity(excluded_content): - f.write(f"Profanity found in file: {file_path}\n") - f.write("Profanities found: ") - profanities = set(word for word in excluded_content.split() if profanity.contains_profanity(word)) - f.write(", ".join(profanities)) - f.write("\n\n") - else: - if profanity.contains_profanity(content): - f.write(f"Profanity found in file: {file_path}\n") - f.write("Profanities found: ") - profanities = set(word for word in content.split() if profanity.contains_profanity(word)) - f.write(", ".join(profanities)) - f.write("\n\n") - -scan_for_profanities("./content/", "./profanity_log.txt", excluded_words_file="./.profanity_ignore.yml") diff --git a/tools/verify_index_fields.py b/tools/verify_index_fields.py new file mode 100644 index 0000000000..58808669a4 --- /dev/null +++ b/tools/verify_index_fields.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 + +import sys +import yaml +import os +import re + +ALLOWLIST_FILE = "tools/closed-filters-allow-list.yml" +REQUIRED_FIELDS = [ + "title", "minutes_to_complete", "who_is_this_for", "learning_objectives", + "prerequisites", "author", "skilllevels", "subjects", "armips", "tools_software_languages", + "operatingsystems", "further_reading", "weight", "layout", "learning_path_main_page" +] +VALID_SKILLLEVELS = {"Introductory", "Advanced"} + +def load_allowlist(): + with open(ALLOWLIST_FILE, 'r') as f: + data = yaml.safe_load(f) + + flat_subjects = set() + for _, subjects in data.get('subjects', {}).items(): + flat_subjects.update(subjects) + + return { + "subjects_by_category": data.get("subjects", {}), + "flat_subjects": flat_subjects, + "operatingsystems": set(data.get("operatingsystems", [])), + "cloud_service_providers": set(data.get("cloud_service_providers", [])), + } + +def extract_frontmatter(path): + with open(path, 'r') as f: + content = f.read() + if not content.startswith('---'): + return None + parts = content.split('---', 2) + if len(parts) < 3: + return None + return yaml.safe_load(parts[1]) + +def get_category_from_path(path): + match = re.match(r"content/learning-paths/([^/]+)/", path) + return match.group(1) if match else None + +import os + +def is_valid_index_path(path): + norm_path = os.path.normpath(path) + parts = norm_path.split(os.sep) + + try: + lp_index = parts.index("learning-paths") + # Ensure path is: .../learning-paths///_index.md + return ( + parts[-1] == "_index.md" and + len(parts) == lp_index + 4 # learning-paths + category + tutorial + _index.md + ) + except ValueError: + return False + +def validate_file(path, allowlist): + if not is_valid_index_path(path): + print(f"Skipping {path} as it is not a learning path index file.") + return + + data = extract_frontmatter(path) + if not data: + print(f"❌ Invalid or missing YAML frontmatter: {path}") + return True + + errors = [] + # Check for required fields + for field in REQUIRED_FIELDS: + if field not in data: + errors.append(f"Missing required field: {field}") + + # Validate skilllevels + skill = data.get("skilllevels") + if skill and skill not in VALID_SKILLLEVELS: + errors.append(f"Invalid skilllevels: {skill}. Please choose from {', '.join(VALID_SKILLLEVELS)}") + + # Validate subjects + subject = data.get("subjects") + if subject and subject not in allowlist["flat_subjects"]: + errors.append(f"Invalid subjects: {subject}.") + + # Validate OS if present + osys = data.get("operatingsystems", []) + if isinstance(osys, list): + for os_entry in osys: + if os_entry not in allowlist["operatingsystems"]: + errors.append(f"Invalid operatingsystem: {os_entry}. Please choose from {', '.join(allowlist['operatingsystems'])}") + + # Validate subject/category mapping + category = get_category_from_path(path) + if category and subject: + category_subjects = allowlist["subjects_by_category"].get(category) + if category_subjects and subject not in category_subjects: + errors.append(f"Subject '{subject}' not allowed for category '{category}'. Please choose from {', '.join(category_subjects)}") + + if errors: + print(f"❌ Validation errors in {path}:") + for e in errors: + print(f" - {e}") + return True + + print(f"✅ {path} fields are verified.") + return False + +if __name__ == "__main__": + files = sys.argv[1:] + allowlist = load_allowlist() + any_errors = False + + for f in files: + if "content/" in f and f.endswith("_index.md") and os.path.exists(f): + if validate_file(f, allowlist): + any_errors = True + + if any_errors: + sys.exit(1) \ No newline at end of file From e5617e8123b5439bc9a847c316245ba13573584f Mon Sep 17 00:00:00 2001 From: Annie Tallund Date: Thu, 21 Aug 2025 12:54:08 +0200 Subject: [PATCH 2/5] Add profanity scanning to Code Quality checks --- .github/workflows/content-checks.yml | 11 +++ .profanity_ignore.yml | 26 +++++++ profanity_log.txt | 111 +++++++++++++++++++++++++++ tools/profanity.py | 39 ++++++++++ 4 files changed, 187 insertions(+) create mode 100644 .profanity_ignore.yml create mode 100644 profanity_log.txt create mode 100644 tools/profanity.py diff --git a/.github/workflows/content-checks.yml b/.github/workflows/content-checks.yml index 949573cb31..3f87782e75 100644 --- a/.github/workflows/content-checks.yml +++ b/.github/workflows/content-checks.yml @@ -59,6 +59,17 @@ jobs: name: spellcheck-output path: spellcheck-output.txt retention-days: 5 # Default is 90 days + - name: Scan for profanities + run: | + python tools/profanity.py + cat profanity_log.txt + + - name: Export profanities + uses: actions/upload-artifact@v4 + with: + name: profanities + path: profanity_log.txt + retention-days: 5 - name: Scan for malware run: | diff --git a/.profanity_ignore.yml b/.profanity_ignore.yml new file mode 100644 index 0000000000..cd7840829d --- /dev/null +++ b/.profanity_ignore.yml @@ -0,0 +1,26 @@ +XX +-kill +kill +KVM +kvm +X.X.X +.xx. +.xxx. +naked +facial +Facial +screw +len +LEN +test +TEST +Test +--strip- +(x=x +**VM +Kill +slave +Slave +A55 +a55 +455 \ No newline at end of file diff --git a/profanity_log.txt b/profanity_log.txt new file mode 100644 index 0000000000..3204da927e --- /dev/null +++ b/profanity_log.txt @@ -0,0 +1,111 @@ +Profanity found in file: ./content/install-guides/perf.md +Profanities found: dummy + +Profanity found in file: ./content/learning-paths/embedded-and-microcontrollers/tfm/tfm.md +Profanities found: TFM_DUMMY_PROVISIONING, TFM_DUMMY_PROVISIONING, dummy + +Profanity found in file: ./content/learning-paths/embedded-and-microcontrollers/advanced_soc/connecting_peripheral.md +Profanities found: #IO_L3N_T0_DQS_AD1N_35 + +Profanity found in file: ./content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-4.md +Profanities found: vmax=[np.max(l) + +Profanity found in file: ./content/learning-paths/embedded-and-microcontrollers/training-inference-pytorch/fine-tune-2.md +Profanities found: outputs.max(1) + +Profanity found in file: ./content/learning-paths/cross-platform/kleidiai-explainer/page1.md +Profanities found: *z* + +Profanity found in file: ./content/learning-paths/cross-platform/ipexplorer/custom1.md +Profanities found: m4x1_cache_2kb, m4x1_nocache_ws0, m4x1_nocache_ws4, m4x1_cache_64kb + +Profanity found in file: ./content/learning-paths/cross-platform/vectorization-friendly-data-layout/a-more-complex-problem-revisited-sve.md +Profanities found: objects->vz[i], &objects->vz[i]);, box_hi_v);, &objects->vz[i], + +Profanity found in file: ./content/learning-paths/cross-platform/vectorization-friendly-data-layout/a-more-complex-problem-revisited.md +Profanities found: objects->vz[i], vnegq_f32(box_hi_v);, box_hi_v);, vld1q_f32(&objects->vz[i]);, vst1q_f32(&objects->vz[i], + +Profanity found in file: ./content/learning-paths/cross-platform/vectorization-friendly-data-layout/a-more-complex-problem-manual-simd.md +Profanities found: vnegq_f32(box_hi_v);, box_hi_v); + +Profanity found in file: ./content/learning-paths/cross-platform/adler32/neon-run-8.md +Profanities found: Test**: + +Profanity found in file: ./content/learning-paths/cross-platform/psa-tfm/run.md +Profanities found: CVM + +Profanity found in file: ./content/learning-paths/mobile-graphics-and-gaming/kleidiai-on-android-with-mediapipe-and-xnnpack/2-run-gemma-2b.md +Profanities found: --cxxopt=-DABSL_FLAGS_STRIP_NAMES=0 + +Profanity found in file: ./content/learning-paths/mobile-graphics-and-gaming/get-started-with-arm-asr/04-generic_library.md +Profanities found: **1**, + +Profanity found in file: ./content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/background.md +Profanities found: Facial + +Profanity found in file: ./content/learning-paths/mobile-graphics-and-gaming/best-practices-for-hwrt-lumen-performance/8-lumen-general.md +Profanities found: color=#00FF00>**1** + +Profanity found in file: ./content/learning-paths/mobile-graphics-and-gaming/build-android-selfie-app-using-mediapipe-multimodality/6-flow-data-to-view-1.md +Profanities found: Paint.Style.STROKE, LANDMARK_STROKE_WIDTH + +Profanity found in file: ./content/learning-paths/iot/iot-sdk/openiot.md +Profanities found: TFM_DUMMY_PROVISIONING, TFM_DUMMY_PROVISIONING, dummy + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/vLLM-quant/3-run-benchmark.md +Profanities found: metrics.py:455] + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/arcee-foundation-model-on-aws/02_setting_up_the_instance.md +Profanities found: G++**: + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/from-iot-to-the-cloud-part3/how-to-2.md +Profanities found: **1**, **Dev/Test** + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/glibc-with-lse/mongo_benchmark.md +Profanities found: 455 + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/triggering-pmu-events/icache.md +Profanities found: 455 + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/wordpress/wordpress.md +Profanities found: --strip + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/redis/single-node_deployment.md +Profanities found: **-h**, **-p** + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/java-gc-tuning/setup.md +Profanities found: 21.0.4+7-LTS,, 21.0.4+7-LTS) + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/java-gc-tuning/tuning-parameters.md +Profanities found: 21.0.4+7-LTS,, 21.0.4+7-LTS) + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/arm_linux_page_size/centos.md +Profanities found: Strip + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/rag/backend.md +Profanities found: "").strip() + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/multiarch_ollama_on_gke/0-spin_up_gke_cluster.md +Profanities found: **1**)., **1** + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/from-iot-to-the-cloud-part1/how-to-4.md +Profanities found: **vm-arm64**, **vm-arm64**, + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/from-iot-to-the-cloud-part1/how-to-2.md +Profanities found: **vm-arm64** + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/mysql/install_mysql.md +Profanities found: Manual](https://dev.mysql.com/doc/refman/8.1/en/), options](https://dev.mysql.com/doc/refman/8.1/en/installing.html)., documentation](https://dev.mysql.com/doc/refman/8.1/en/mysqld-server.html),, [instructions](https://dev.mysql.com/doc/refman/8.1/en/mysql.html) + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/llama-vision/backend.md +Profanities found: f"<|user|>\n{prompt.strip()}<|end_of_text|>\n", token.strip(): + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/llama-vision/frontend.md +Profanities found: data.strip(), line.strip(), user_prompt.strip() + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/arm_pmu/perf_event_open.md +Profanities found: configure_event(&pe[3], + +Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/distributed-inference-with-llama-cpp/_index.md +Profanities found: Aryan + diff --git a/tools/profanity.py b/tools/profanity.py new file mode 100644 index 0000000000..f8127d6ffc --- /dev/null +++ b/tools/profanity.py @@ -0,0 +1,39 @@ +import os +from better_profanity import profanity +import argparse +import sys +def load_excluded_words(file_path): + with open(file_path, 'r') as f: + excluded_words = [word.strip() for word in f.readlines()] + return excluded_words + +def scan_for_profanities(directory, log_file, excluded_words_file=None): + exclude_words = None + profanities = None + if excluded_words_file: + exclude_words = load_excluded_words(excluded_words_file) + + with open(log_file, 'w') as f: + for root, dirs, files in os.walk(directory): + for file in files: + if file.endswith('.md'): # Read only markdown files + file_path = os.path.join(root, file) + with open(file_path, 'r') as code_file: + profanities = None + content = code_file.read() + if exclude_words: + excluded_content = content + for word in exclude_words: + excluded_content = excluded_content.replace(word, '') + if profanity.contains_profanity(excluded_content): + profanities = set(word for word in excluded_content.split() if profanity.contains_profanity(word)) + else: + if profanity.contains_profanity(content): + profanities = set(word for word in content.split() if profanity.contains_profanity(word)) + if profanities: + f.write(f"Profanity found in file: {file_path}\n") + f.write("Profanities found: ") + f.write(", ".join(profanities)) + f.write("\n\n") + +scan_for_profanities("./content/", "./profanity_log.txt", excluded_words_file="./.profanity_ignore.yml") \ No newline at end of file From 551d195d302be9a3259a962f9d59ac47bbf73454 Mon Sep 17 00:00:00 2001 From: Annie Tallund Date: Thu, 21 Aug 2025 12:55:28 +0200 Subject: [PATCH 3/5] Remove log file --- profanity_log.txt | 111 ---------------------------------------------- 1 file changed, 111 deletions(-) delete mode 100644 profanity_log.txt diff --git a/profanity_log.txt b/profanity_log.txt deleted file mode 100644 index 3204da927e..0000000000 --- a/profanity_log.txt +++ /dev/null @@ -1,111 +0,0 @@ -Profanity found in file: ./content/install-guides/perf.md -Profanities found: dummy - -Profanity found in file: ./content/learning-paths/embedded-and-microcontrollers/tfm/tfm.md -Profanities found: TFM_DUMMY_PROVISIONING, TFM_DUMMY_PROVISIONING, dummy - -Profanity found in file: ./content/learning-paths/embedded-and-microcontrollers/advanced_soc/connecting_peripheral.md -Profanities found: #IO_L3N_T0_DQS_AD1N_35 - -Profanity found in file: ./content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-4.md -Profanities found: vmax=[np.max(l) - -Profanity found in file: ./content/learning-paths/embedded-and-microcontrollers/training-inference-pytorch/fine-tune-2.md -Profanities found: outputs.max(1) - -Profanity found in file: ./content/learning-paths/cross-platform/kleidiai-explainer/page1.md -Profanities found: *z* - -Profanity found in file: ./content/learning-paths/cross-platform/ipexplorer/custom1.md -Profanities found: m4x1_cache_2kb, m4x1_nocache_ws0, m4x1_nocache_ws4, m4x1_cache_64kb - -Profanity found in file: ./content/learning-paths/cross-platform/vectorization-friendly-data-layout/a-more-complex-problem-revisited-sve.md -Profanities found: objects->vz[i], &objects->vz[i]);, box_hi_v);, &objects->vz[i], - -Profanity found in file: ./content/learning-paths/cross-platform/vectorization-friendly-data-layout/a-more-complex-problem-revisited.md -Profanities found: objects->vz[i], vnegq_f32(box_hi_v);, box_hi_v);, vld1q_f32(&objects->vz[i]);, vst1q_f32(&objects->vz[i], - -Profanity found in file: ./content/learning-paths/cross-platform/vectorization-friendly-data-layout/a-more-complex-problem-manual-simd.md -Profanities found: vnegq_f32(box_hi_v);, box_hi_v); - -Profanity found in file: ./content/learning-paths/cross-platform/adler32/neon-run-8.md -Profanities found: Test**: - -Profanity found in file: ./content/learning-paths/cross-platform/psa-tfm/run.md -Profanities found: CVM - -Profanity found in file: ./content/learning-paths/mobile-graphics-and-gaming/kleidiai-on-android-with-mediapipe-and-xnnpack/2-run-gemma-2b.md -Profanities found: --cxxopt=-DABSL_FLAGS_STRIP_NAMES=0 - -Profanity found in file: ./content/learning-paths/mobile-graphics-and-gaming/get-started-with-arm-asr/04-generic_library.md -Profanities found: **1**, - -Profanity found in file: ./content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/background.md -Profanities found: Facial - -Profanity found in file: ./content/learning-paths/mobile-graphics-and-gaming/best-practices-for-hwrt-lumen-performance/8-lumen-general.md -Profanities found: color=#00FF00>**1** - -Profanity found in file: ./content/learning-paths/mobile-graphics-and-gaming/build-android-selfie-app-using-mediapipe-multimodality/6-flow-data-to-view-1.md -Profanities found: Paint.Style.STROKE, LANDMARK_STROKE_WIDTH - -Profanity found in file: ./content/learning-paths/iot/iot-sdk/openiot.md -Profanities found: TFM_DUMMY_PROVISIONING, TFM_DUMMY_PROVISIONING, dummy - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/vLLM-quant/3-run-benchmark.md -Profanities found: metrics.py:455] - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/arcee-foundation-model-on-aws/02_setting_up_the_instance.md -Profanities found: G++**: - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/from-iot-to-the-cloud-part3/how-to-2.md -Profanities found: **1**, **Dev/Test** - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/glibc-with-lse/mongo_benchmark.md -Profanities found: 455 - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/triggering-pmu-events/icache.md -Profanities found: 455 - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/wordpress/wordpress.md -Profanities found: --strip - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/redis/single-node_deployment.md -Profanities found: **-h**, **-p** - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/java-gc-tuning/setup.md -Profanities found: 21.0.4+7-LTS,, 21.0.4+7-LTS) - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/java-gc-tuning/tuning-parameters.md -Profanities found: 21.0.4+7-LTS,, 21.0.4+7-LTS) - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/arm_linux_page_size/centos.md -Profanities found: Strip - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/rag/backend.md -Profanities found: "").strip() - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/multiarch_ollama_on_gke/0-spin_up_gke_cluster.md -Profanities found: **1**)., **1** - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/from-iot-to-the-cloud-part1/how-to-4.md -Profanities found: **vm-arm64**, **vm-arm64**, - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/from-iot-to-the-cloud-part1/how-to-2.md -Profanities found: **vm-arm64** - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/mysql/install_mysql.md -Profanities found: Manual](https://dev.mysql.com/doc/refman/8.1/en/), options](https://dev.mysql.com/doc/refman/8.1/en/installing.html)., documentation](https://dev.mysql.com/doc/refman/8.1/en/mysqld-server.html),, [instructions](https://dev.mysql.com/doc/refman/8.1/en/mysql.html) - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/llama-vision/backend.md -Profanities found: f"<|user|>\n{prompt.strip()}<|end_of_text|>\n", token.strip(): - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/llama-vision/frontend.md -Profanities found: data.strip(), line.strip(), user_prompt.strip() - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/arm_pmu/perf_event_open.md -Profanities found: configure_event(&pe[3], - -Profanity found in file: ./content/learning-paths/servers-and-cloud-computing/distributed-inference-with-llama-cpp/_index.md -Profanities found: Aryan - From 27bfd01f54a654fe0bdf9983c2426cf5f9246925 Mon Sep 17 00:00:00 2001 From: Annie Tallund Date: Thu, 21 Aug 2025 13:11:13 +0200 Subject: [PATCH 4/5] Update dependencies --- .github/workflows/content-checks.yml | 1 + tools/profanity.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/content-checks.yml b/.github/workflows/content-checks.yml index 3f87782e75..fcb6c2fef9 100644 --- a/.github/workflows/content-checks.yml +++ b/.github/workflows/content-checks.yml @@ -61,6 +61,7 @@ jobs: retention-days: 5 # Default is 90 days - name: Scan for profanities run: | + pip install better_profanity python tools/profanity.py cat profanity_log.txt diff --git a/tools/profanity.py b/tools/profanity.py index f8127d6ffc..bab7323ec5 100644 --- a/tools/profanity.py +++ b/tools/profanity.py @@ -1,7 +1,6 @@ import os from better_profanity import profanity -import argparse -import sys + def load_excluded_words(file_path): with open(file_path, 'r') as f: excluded_words = [word.strip() for word in f.readlines()] From 15c4acbed3c0a8731172c2316b3f7775cb65fb89 Mon Sep 17 00:00:00 2001 From: Annie <36964858+annietllnd@users.noreply.github.com> Date: Mon, 25 Aug 2025 10:28:19 +0200 Subject: [PATCH 5/5] Update test-lp.yml --- .github/workflows/test-lp.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-lp.yml b/.github/workflows/test-lp.yml index ab04f2084b..4ed6b1d352 100644 --- a/.github/workflows/test-lp.yml +++ b/.github/workflows/test-lp.yml @@ -62,7 +62,7 @@ jobs: if: steps.changed-markdown-files.outputs.any_changed == 'true' run: | echo "Checking YAML fields for changed learning paths..." - python3 tools/validate_index_fields.py ${{ steps.changed-markdown-files.outputs.all_changed_files }} + python3 tools/verify_index_fields.py ${{ steps.changed-markdown-files.outputs.all_changed_files }} - name: Run test suite for all changed .md files id: run-suite if: steps.changed-markdown-files.outputs.any_changed == 'true'