Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,11 @@ repos:
entry: uv run ruff check
language: system
types: [python]
exclude: ^skills/
args: [--exit-non-zero-on-fix]
- id: ruff-format
name: ruff-format
entry: uv run ruff format
language: system
types: [python]
exclude: ^skills/
6 changes: 4 additions & 2 deletions contrib/README.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
# Contrib

This directory documents contrib-related examples for `bubseek`.
This directory contains plugins for `bubseek`.

`bubseek` does not install contrib from this directory. Contrib packages remain standard Python packages and should be added through normal dependency management in `pyproject.toml`.
Contrib packages remain standard Python packages and should be added through normal dependency management in `pyproject.toml`.

A typical plugin should work with bub as well.

Typical example:

Expand Down
7 changes: 7 additions & 0 deletions skills/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Skills

This directory contains a curated collection of skills designed to address tasks across various fields.

By default, these skills are not included in the bubseek release.

You can install them in your project’s `.agents/skills` directory using `npx skills` or a similar command.
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ uv run scripts/gh_repo_card.py <org>/<repo> [--top-n 5] [--analysis "Your analys
```

The script path is relative to this skill directory:
`src/skills/github-repo-cards/scripts/gh_repo_card.py`
`skills/github-repo-cards/scripts/gh_repo_card.py`

This will:
1. Call `gh` to fetch repo metadata, stargazer counts, commit activity, and top contributors.
Expand All @@ -42,8 +42,7 @@ This will:
uv run scripts/gh_trending_card.py [--language python] [--since daily] [--limit 10] [--output trending.svg]
```

The script path is relative to this skill directory:
`src/skills/github-repo-cards/scripts/gh_trending_card.py`
The script path is relative to this skill directory: `scripts/gh_trending_card.py`

This will:
1. Scrape GitHub trending page (or use `gh api` search with recent star sorting).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,63 @@
import base64
import html
import json
import os
import shutil
import subprocess
import sys
import textwrap
import time
import urllib.error
import urllib.request
from datetime import datetime
from http import HTTPStatus
from pathlib import Path
from typing import cast

# ── Data fetching via gh CLI ─────────────────────────────────────────────────
# ── Data fetching via gh CLI / GitHub API ────────────────────────────────────

_GITHUB_API = "https://api.github.com"


def _github_headers(*, accept: str = "application/vnd.github+json") -> dict[str, str]:
headers = {
"Accept": accept,
"User-Agent": "bubseek-github-repo-cards",
"X-GitHub-Api-Version": "2022-11-28",
}
token = os.environ.get("GITHUB_TOKEN") or os.environ.get("GH_TOKEN")
if token:
headers["Authorization"] = f"Bearer {token}"
return headers


def _http_get(url: str, *, accept: str) -> tuple[bytes, str]:
curl = shutil.which("curl")
headers = _github_headers(accept=accept)
if curl:
command = [curl, "-fsSL", "--compressed", "--retry", "2", "--connect-timeout", "20"]
for name, value in headers.items():
command.extend(["-H", f"{name}: {value}"])
command.append(url)
response = subprocess.run(command, capture_output=True, check=True)
return response.stdout, "application/octet-stream"

request = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(request, timeout=20) as response:
return response.read(), response.headers.get("Content-Type", "application/octet-stream")


def _api_json(url: str, *, accept: str = "application/vnd.github+json") -> dict | list:
payload, _ = _http_get(url, accept=accept)
return cast(dict | list, json.loads(payload.decode("utf-8")))


def _api_bytes(url: str, *, accept: str = "application/octet-stream") -> tuple[bytes, str]:
return _http_get(url, accept=accept)


def _gh_available() -> bool:
return shutil.which("gh") is not None


def _gh(*args: str) -> str:
Expand All @@ -47,18 +94,47 @@ def _gh_stats_json(endpoint: str, retries: int = 4) -> dict | list:
GitHub stats APIs return ``{}`` while computing data on the first call.
We retry with exponential back-off until an array is returned.
"""
raw: dict | list = {}
for attempt in range(retries):
raw = _gh_json("api", endpoint, "--cache", "0s")
if _gh_available():
raw = _gh_json("api", endpoint, "--cache", "0s")
else:
try:
raw = _api_json(f"{_GITHUB_API}/{endpoint}")
except urllib.error.HTTPError as exc:
if exc.code != HTTPStatus.ACCEPTED:
raise
raw = {}

if isinstance(raw, list):
return raw

delay = 2**attempt
print(f" ⏳ stats computing, retry in {delay}s …", file=sys.stderr)
time.sleep(delay)

return raw


def fetch_repo_info(nwo: str) -> dict:
"""Fetch basic repo metadata."""
if not _gh_available():
raw = _api_json(f"{_GITHUB_API}/repos/{nwo}")
if not isinstance(raw, dict):
raise TypeError(f"Unexpected response for repository {nwo!r}")
return {
"name": raw.get("name"),
"owner": {"login": raw.get("owner", {}).get("login", "")},
"description": raw.get("description"),
"stargazerCount": raw.get("stargazers_count", 0),
"forkCount": raw.get("forks_count", 0),
"primaryLanguage": {"name": raw.get("language") or ""},
"licenseInfo": {"name": (raw.get("license") or {}).get("name", "")},
"updatedAt": raw.get("updated_at"),
"url": raw.get("html_url"),
"homepageUrl": raw.get("homepage"),
}

return cast(
dict,
_gh_json(
Expand Down Expand Up @@ -86,21 +162,28 @@ def fetch_stargazer_counts(nwo: str) -> list[int]:
rough weekly bucketed curve.
"""
try:
raw = _gh(
"api",
f"repos/{nwo}/stargazers?per_page=100",
"-H",
"Accept: application/vnd.github.star+json",
"--cache",
"1h",
)
if not raw:
return []
items = json.loads(raw)
if _gh_available():
raw = _gh(
"api",
f"repos/{nwo}/stargazers?per_page=100",
"-H",
"Accept: application/vnd.github.star+json",
"--cache",
"1h",
)
if not raw:
return []
items = json.loads(raw)
else:
items = _api_json(
f"{_GITHUB_API}/repos/{nwo}/stargazers?per_page=100",
accept="application/vnd.github.star+json",
)

if not isinstance(items, list):
return []

from collections import Counter
from datetime import datetime

weeks: Counter[str] = Counter()
for item in items:
Expand All @@ -125,17 +208,18 @@ def _download_avatar_b64(url: str, size: int = 64) -> str:
"""
fetch_url = f"{url}&s={size}" if "?" in url else f"{url}?s={size}"
try:
with urllib.request.urlopen(fetch_url, timeout=10) as resp: # noqa: S310
data = resp.read()
ct = resp.headers.get("Content-Type", "image/png")
return f"data:{ct};base64,{base64.b64encode(data).decode()}"
data, content_type = _api_bytes(fetch_url)
return f"data:{content_type};base64,{base64.b64encode(data).decode()}"
except Exception:
return ""


def fetch_top_contributors(nwo: str, n: int = 5) -> list[dict]:
"""Return top-N contributors by commit count (with embedded avatar data)."""
raw = _gh_json("api", f"repos/{nwo}/contributors?per_page={n}", "--cache", "1h")
if _gh_available():
raw = _gh_json("api", f"repos/{nwo}/contributors?per_page={n}", "--cache", "1h")
else:
raw = _api_json(f"{_GITHUB_API}/repos/{nwo}/contributors?per_page={n}")
if not isinstance(raw, list):
return []
results = []
Expand All @@ -149,6 +233,31 @@ def fetch_top_contributors(nwo: str, n: int = 5) -> list[dict]:
return results


def build_default_analysis(info: dict) -> str:
"""Generate a concise analysis paragraph from repository metadata."""
updated_at = info.get("updatedAt")
updated_text = ""
if isinstance(updated_at, str):
try:
updated_dt = datetime.fromisoformat(updated_at.replace("Z", "+00:00"))
updated_text = updated_dt.strftime("%Y-%m-%d")
except ValueError:
updated_text = updated_at

license_name = (info.get("licenseInfo") or {}).get("name", "") or "No license metadata"
lang = (info.get("primaryLanguage") or {}).get("name", "") or "Unknown language"
homepage = info.get("homepageUrl") or "No homepage"

fragments = [
f"Primary language: {lang}.",
f"License: {license_name}.",
f"Homepage: {homepage}.",
]
if updated_text:
fragments.append(f"Last updated: {updated_text}.")
return " ".join(fragments)


# ── SVG rendering ────────────────────────────────────────────────────────────

_LANG_COLORS: dict[str, str] = {
Expand Down Expand Up @@ -512,7 +621,8 @@ def main() -> None:
contributors = fetch_top_contributors(nwo, args.top_n)

print("🎨 Rendering SVG …")
svg = render_repo_svg(info, commits, stars, contributors, analysis=args.analysis, top_n=args.top_n)
analysis = args.analysis or build_default_analysis(info)
svg = render_repo_svg(info, commits, stars, contributors, analysis=analysis, top_n=args.top_n)
out.write_text(svg, encoding="utf-8")
print(f" → {out}")

Expand Down
Loading
Loading