Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 20 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ pinned: false

Practice implementing operators and architectures from scratch — the exact skills top ML teams test for.

*Like LeetCode, but for tensors. Self-hosted. Jupyter-based. Instant feedback.*
*An interactive coding platform, but for tensors. Self-hosted. Jupyter-based. Instant feedback.*

[![PyTorch](https://img.shields.io/badge/PyTorch-ee4c2c?style=for-the-badge&logo=pytorch&logoColor=white)](https://pytorch.org)
[![Jupyter](https://img.shields.io/badge/Jupyter-F37626?style=for-the-badge&logo=jupyter&logoColor=white)](https://jupyter.org)
Expand Down Expand Up @@ -100,6 +100,25 @@ make run

Open **<http://localhost:8888>** — that's it. Works with both Docker and Podman (auto-detected).

### Option 3 — Standalone Web UI (Next.js + FastAPI)

For a modern, standalone coding experience with an integrated IDE and dual-pane layout:

1. **Start Backend (FastAPI):**
```bash
pip install -r api/requirements.txt
python -m uvicorn api.main:app --port 8000 --reload
```
2. **Start Frontend (Next.js):**
```bash
cd web
npm install
npm run dev
```
3. Open **<http://localhost:3000>** in your browser.

![TorchCode UI Preview](assets/ui_preview.png)

---

## 📋 Problem Set
Expand Down
61 changes: 61 additions & 0 deletions api/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import sys
import os

# Add the root directory to PYTHONPATH so we can import torch_judge
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from torch_judge.tasks import TASKS, get_task
from torch_judge.web_engine import execute_code
from api.parser import get_all_templates

app = FastAPI(title="TorchCode UI Backend")

# Allow CORS for local frontend development
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)

# Load templates on startup
TEMPLATES = get_all_templates()

class SubmitRequest(BaseModel):
code: str

@app.get("/api/tasks")
def list_tasks():
tasks_list = []
for task_id, task_data in TASKS.items():
tasks_list.append({
"id": task_id,
"title": task_data["title"],
"difficulty": task_data.get("difficulty", "Unknown")
})
return tasks_list

@app.get("/api/tasks/{task_id}")
def get_task_details(task_id: str):
task = get_task(task_id)
if not task:
raise HTTPException(status_code=404, detail="Task not found")

template = TEMPLATES.get(task_id, {})
return {
"id": task_id,
"title": task["title"],
"difficulty": task.get("difficulty", "Unknown"),
"hint": task.get("hint", ""),
"description": template.get("description", "Description not found."),
"initial_code": template.get("initial_code", "# Write your code here.")
}

@app.post("/api/submit/{task_id}")
def submit_code(task_id: str, request: SubmitRequest):
result = execute_code(task_id, request.code)
return result
78 changes: 78 additions & 0 deletions api/parser.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import json
import os
import glob
import re

def parse_notebook_template(filepath: str) -> dict:
"""
Parses a Jupyter Notebook template to extract the problem description and initial code.
"""
try:
with open(filepath, 'r', encoding='utf-8') as f:
nb = json.load(f)

description = ""
initial_code = ""

for cell in nb.get("cells", []):
if cell["cell_type"] == "markdown":
# Only take the first markdown block (usually the problem statement)
if not description:
source = cell.get("source", [])
# Filter out Colab badge lines
filtered_source = [
line for line in source
if "![Open In Colab]" not in line
]
description = "".join(filtered_source).strip()

elif cell["cell_type"] == "code":
source = cell.get("source", [])
source_str = "".join(source)

# Check for the implementation placeholder
if "# ✏️ YOUR IMPLEMENTATION HERE" in source_str:
initial_code = "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\n" + source_str

return {
"description": description,
"initial_code": initial_code
}
except Exception as e:
print(f"Error parsing {filepath}: {e}")
return {
"description": "Error loading description.",
"initial_code": "# Error loading template code."
}

def get_all_templates(templates_dir: str = "../templates") -> dict:
"""
Returns a dictionary mapping task_ids to their extracted template data.
task_id is inferred from the filename (e.g., '01_relu.ipynb' -> 'relu').
"""
templates = {}

# Resolve absolute path based on current directory
base_dir = os.path.dirname(os.path.abspath(__file__))
templates_path = os.path.join(base_dir, templates_dir)

for filepath in glob.glob(os.path.join(templates_path, "*.ipynb")):
filename = os.path.basename(filepath)
if filename == "00_welcome.ipynb":
continue

# Extract task_id (e.g. 01_relu.ipynb -> relu)
match = re.match(r"^\d+_(.+)\.ipynb$", filename)
if match:
task_id = match.group(1)
templates[task_id] = parse_notebook_template(filepath)

return templates

if __name__ == "__main__":
# Test parser
res = get_all_templates()
if "relu" in res:
print("Successfully parsed relu:")
print("Description:", res["relu"]["description"][:100], "...")
print("Code:", res["relu"]["initial_code"][:50], "...")
3 changes: 3 additions & 0 deletions api/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
fastapi
uvicorn
pydantic
Binary file added assets/ui_preview.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion templates/07_batchnorm.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
"- Compute batch mean and variance over `dim=0` with `unbiased=False`\n",
"- Update running stats like PyTorch: `running = (1 - momentum) * running + momentum * batch_stat`\n",
"- Use `running_mean` / `running_var` for inference when `training=False`\n",
"- Must support autograd w.r.t. `x`, `gamma`, `beta`running statistics 应视作 buffer,而不是需要梯度的参数)"
"- Must support autograd w.r.t. `x`, `gamma`, `beta` (running statistics should be treated as buffers, not parameters requiring gradients)"
]
},
{
Expand Down
10 changes: 5 additions & 5 deletions torch_judge/__init__.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
"""TorchCode — PyTorch 刷题判定引擎。在 Jupyter Notebook 中使用。
"""TorchCode — PyTorch practice engine. Used in Jupyter Notebooks.

Usage:
from torch_judge import check, status
Example:
from torch_judge import status, check

# 查看所有题目进度
# View progress for all tasks
status()

# 实现完函数后,运行判定
# After implementing the function, run the judge
check("relu")
"""

Expand Down
2 changes: 1 addition & 1 deletion torch_judge/tasks/adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"title": "Adam Optimizer",
"difficulty": "Medium",
"function_name": "MyAdam",
"hint": "Track m (1st moment) and v (2nd moment). m = beta1*m + (1-beta1)*grad, v = beta2*v + (1-beta2)*grad^2. Bias correct: m_hat = m/(1-beta1^t). Update: p -= lr * m_hat / (sqrt(v_hat) + eps).",
"hint": "Track $m$ (1st moment) and $v$ (2nd moment). $m = \\beta_1 m + (1-\\beta_1)\\nabla$, $v = \\beta_2 v + (1-\\beta_2)\\nabla^2$. Bias correct: $\\hat{m} = m/(1-\\beta_1^t)$, $\\hat{v} = v/(1-\\beta_2^t)$. Update: $p \\leftarrow p - \\text{lr} \\cdot \\hat{m} / (\\sqrt{\\hat{v}} + \\epsilon)$.",
"tests": [
{
"name": "Parameters change after step",
Expand Down
2 changes: 1 addition & 1 deletion torch_judge/tasks/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"title": "Softmax Attention",
"difficulty": "Hard",
"function_name": "scaled_dot_product_attention",
"hint": "scores = Q @ K^T / sqrt(d_k), then softmax(scores, dim=-1) @ V. Use torch.bmm for batched matmul.",
"hint": "$\\text{scores} = (Q K^T) / \\sqrt{d_k}$, then $\\text{softmax}(\\text{scores}, \\text{dim}=-1) V$. Use `torch.bmm` for batched matmul.",
"tests": [
{
"name": "Output shape",
Expand Down
2 changes: 1 addition & 1 deletion torch_judge/tasks/gelu.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"title": "GELU Activation",
"difficulty": "Easy",
"function_name": "my_gelu",
"hint": "Exact: x * 0.5 * (1 + erf(x / sqrt(2))). Or approximate: 0.5*x*(1+tanh(sqrt(2/pi)*(x+0.044715*x^3))).",
"hint": "Exact: $x \\cdot 0.5 \\cdot (1 + \\text{erf}(x / \\sqrt{2}))$. Or approximate: $0.5x(1+\\tanh(\\sqrt{2/\\pi}(x+0.044715x^3)))$.",
"tests": [
{
"name": "Matches F.gelu",
Expand Down
2 changes: 1 addition & 1 deletion torch_judge/tasks/layernorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"title": "Implement LayerNorm",
"difficulty": "Medium",
"function_name": "my_layer_norm",
"hint": "Normalize over the last dim: (x - mean) / sqrt(var + eps), then scale by gamma and shift by beta.",
"hint": "Normalize over the last dim: $(x - \\mu) / \\sqrt{\\sigma^2 + \\epsilon}$, then scale by $\\gamma$ and shift by $\\beta$.",
"tests": [
{
"name": "Shape and basic behavior",
Expand Down
2 changes: 1 addition & 1 deletion torch_judge/tasks/linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"title": "Linear Regression",
"difficulty": "Medium",
"function_name": "LinearRegression",
"hint": "Closed-form: augment X with ones column, solve w = (X^T X)^{-1} X^T y via torch.linalg.lstsq. Gradient descent: grad_w = (2/N) * X^T @ (pred - y), update w -= lr * grad_w. nn.Linear: create nn.Linear(D, 1), use MSELoss + optimizer.step() loop.",
"hint": "Closed-form: augment $X$ with ones column, solve $w = (X^T X)^{-1} X^T y$ via `torch.linalg.lstsq`. Gradient descent: $\\nabla w = \\frac{2}{N} X^T (\\hat{y} - y)$, update $w \\leftarrow w - \\text{lr} \\cdot \\nabla w$. `nn.Linear`: create `nn.Linear(D, 1)`, use `MSELoss` + `optimizer.step()` loop.",
"tests": [
{
"name": "Closed-form returns correct shapes",
Expand Down
2 changes: 1 addition & 1 deletion torch_judge/tasks/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"title": "LoRA (Low-Rank Adaptation)",
"difficulty": "Medium",
"function_name": "LoRALinear",
"hint": "Freeze base linear. Add lora_A (rank, in) and lora_B (out, rank) as Parameters. B init to zeros. output = linear(x) + (x @ A^T @ B^T) * (alpha/rank).",
"hint": "Freeze base linear. Add `lora_A` (rank, in) and `lora_B` (out, rank) as Parameters. B init to zeros. $\\text{output} = \\text{linear}(x) + (x A^T B^T) \\cdot (\\alpha/r)$.",
"tests": [
{
"name": "Base weights frozen",
Expand Down
2 changes: 1 addition & 1 deletion torch_judge/tasks/rmsnorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"title": "Implement RMSNorm",
"difficulty": "Medium",
"function_name": "rms_norm",
"hint": "RMS(x) = sqrt(mean(x^2) + eps). RMSNorm(x) = x / RMS(x) * weight. Simpler than LayerNorm — no mean subtraction.",
"hint": "$\\text{RMS}(x) = \\sqrt{\\text{mean}(x^2) + \\epsilon}$. $\\text{RMSNorm}(x) = \\frac{x}{\\text{RMS}(x)} \\cdot \\text{weight}$. Simpler than LayerNorm — no mean subtraction.",
"tests": [
{
"name": "Basic behavior",
Expand Down
2 changes: 1 addition & 1 deletion torch_judge/tasks/rope.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"title": "Rotary Position Embedding (RoPE)",
"difficulty": "Hard",
"function_name": "apply_rope",
"hint": "Split into pairs (x_even, x_odd). Compute angles = pos * 1/(10000^(2i/d)). Rotate: [x_e*cos - x_o*sin, x_e*sin + x_o*cos]. Stack and flatten.",
"hint": "Split into pairs $(x_{\\text{even}}, x_{\\text{odd}})$. Compute $\\theta = \\text{pos} \\cdot 1/(10000^{2i/d})$. Rotate: $[x_e\\cos\\theta - x_o\\sin\\theta, x_e\\sin\\theta + x_o\\cos\\theta]$. Stack and flatten.",
"tests": [
{
"name": "Output shapes",
Expand Down
Loading