Skip to content

Commit 52274f5

Browse files
author
Simon Guo
committed
notes with sok discussion
1 parent 1f054c7 commit 52274f5

File tree

3 files changed

+46
-26
lines changed

3 files changed

+46
-26
lines changed

src/loader.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,11 @@ def _gpu_context_from_py(py_path: str, gpu_name: str) -> Dict[str, str]:
7373
"gpu_best_practices_bullets": best_bullets,
7474
}
7575

76+
# maybe a class-based approach
77+
# class PromptConstruction:
78+
# def __init__(self, prompts_toml: str):
79+
# self.cfg = PromptConfig.from_toml(prompts_toml)
80+
7681
def render_prompt_by_option(
7782
*,
7883
prompts_toml: str,
@@ -81,6 +86,7 @@ def render_prompt_by_option(
8186
context: Dict[str, str],
8287
gpu_specs_py: Optional[str] = None,
8388
gpu_name: Optional[str] = None,
89+
log_prompt: bool=False
8490
) -> str:
8591
"""
8692
New function that uses languages.X and options.Y structure
@@ -143,6 +149,25 @@ def render_prompt_by_option(
143149
raise ValueError(f"Option '{option}' requires GPU info; provide gpu_specs_py and gpu_name")
144150
context = {**context, **_gpu_context_from_py(_abs_path(gpu_specs_py), gpu_name)}
145151

152+
153+
# 1. you can either use your our custome ones
154+
# by calling one of the options
155+
# 2. here if you want to change th eprompt
156+
157+
# you do it here
158+
159+
160+
# TODO for Sokserey:
161+
# 1. base prompt (one-shot examples based on the laguage)
162+
# 2. base prompt + hardware info
163+
# 3. base prompt + few-shot examples (specify which one)
164+
# load the source -> which one shtye wanna use, compose the paris in here
165+
166+
# precision
167+
168+
# custom prmotp
169+
compoonents A, B, C, "my secret prompt "
170+
146171
# Build the prompt from components
147172
prompt_parts = []
148173
for component in option_data["components"]:
@@ -167,3 +192,9 @@ def render_prompt_by_option(
167192
return prompt_text.format(**context).strip() + "\n"
168193
except KeyError as e:
169194
raise KeyError(f"Missing placeholder in context: {e.args[0]}. Available: {list(context.keys())}") from e
195+
196+
197+
def main():
198+
# cal l thsi fie prompt
199+
# call exmmples ->
200+

src/prompt_constructor_multilang.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def get_prompt_for_language(ref_arch_src: str, language: str = "triton", option:
2222
context={"ref_arch_src": ref_arch_src},
2323
)
2424

25-
def get_prompt_with_hardware(ref_arch_src: str, language: str, gpu_name: str) -> str:
25+
def get_prompt_with_hardware_info(ref_arch_src: str, language: str, gpu_name: str) -> str:
2626
"""
2727
Generate a hardware-aware prompt for a specific language.
2828

src/prompts/prompts.toml

Lines changed: 14 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,27 @@
1-
[meta]
1+
[metadata]
22
version = "1.0"
33
default_language = "cuda"
44

55
# -------------------------------------------------------------------------
6-
# Shared Templates: Used by all languages with placeholders
6+
# Shared Templates: Used by all backends with placeholders
77
# -------------------------------------------------------------------------
88
[shared]
99
problem_statement = """
10-
You write custom {language_display} to replace the pytorch operators in the given architecture to get speedups.
10+
You write custom {language_display} to replace the PyTorch operators in the given architecture to get speedups.
1111
1212
You have complete freedom to choose the set of operators you want to replace. You may make the decision to replace some operators with custom {language_display} and leave others unchanged. You may replace multiple operators with custom implementations, consider operator fusion opportunities (combining multiple operators into a single kernel, for example, combining matmul+relu), or algorithmic changes (such as online softmax). You are only limited by your imagination.
1313
"""
1414

15+
# on {hardware}, with {precision} as inputs and weigths
1516
instruction = """
16-
Optimize the architecture named Model with custom {language_display}! Name your optimized output architecture ModelNew. Output the new code in codeblocks. Please generate real code, NOT pseudocode, make sure the code compiles and is fully functional. Just output the new model code, no other text, and NO testing code!
17+
Optimize the architecture named Model with custom {language_display},! Name your optimized output architecture ModelNew. Output the new code in codeblocks. Please generate real code, NOT pseudocode, make sure the code compiles and is fully functional. Just output the new model code, no other text, and NO testing code!
1718
"""
1819

19-
# Shared example architecture (same for all languages)
20-
few_shot_example_arch = "src/prompts/model_ex_add.py"
20+
# Shared example architecture (same refernece for all backends)
21+
base_one_shot_example_arch = "src/prompts/model_ex_add.py"
2122

2223
# -------------------------------------------------------------------------
23-
# Languages: Language-specific configuration (minimal, just what varies)
24+
# Backends: Backend-specific configuration (minimal, just what varies)
2425
# -------------------------------------------------------------------------
2526
[languages.triton]
2627
language_display = "Triton kernels"
@@ -36,6 +37,8 @@ few_shot_new_arch = "src/prompts/model_new_ex_add_cute.py"
3637

3738
# -------------------------------------------------------------------------
3839
# Options: Different prompt construction modes
40+
# You should specify...
41+
# let's sayif you want tomake your ...
3942
# -------------------------------------------------------------------------
4043
[options.basic]
4144
# Basic prompt: problem statement + architecture + instruction
@@ -55,36 +58,19 @@ components = ["problem_statement", "few_shot_block", "hardware_header", "hardwar
5558
requires_gpu = true
5659
requires_example = true
5760

58-
[options.fix_compile]
59-
# For fixing compilation errors
60-
description = "Prompt for fixing compilation errors"
61-
components = ["problem_statement", "arch_with_context", "failed_kernel", "compile_metadata", "fix_footer"]
6261

63-
[options.fix_correctness]
64-
# For fixing correctness errors
65-
description = "Prompt for fixing correctness errors"
66-
components = ["problem_statement", "arch_with_context", "failed_kernel", "correctness_metadata", "fix_footer"]
6762

6863
# -------------------------------------------------------------------------
6964
# Templates: Reusable text blocks with placeholders
7065
# -------------------------------------------------------------------------
7166
[templates.common]
7267

73-
# --- Architecture Presentation ---
68+
# --- We present the task, a Problem / PyTorch Architecture ---
7469
# Used in prompts to present the reference architecture that needs optimization
7570
arch_block = """
7671
You are given the following architecture:
7772
7873
79-
{ref_arch_src}
80-
81-
"""
82-
83-
# Used in fix prompts to reference the architecture with contextual phrasing
84-
arch_with_context = """
85-
With the following architecture:
86-
87-
8874
{ref_arch_src}
8975
9076
"""
@@ -104,6 +90,9 @@ The example new arch with custom {language_display} looks like this:
10490
10591
"""
10692

93+
94+
95+
10796
# --- Error Fix Templates ---
10897
# Presents a kernel that failed (used in fix_compile and fix_correctness options)
10998
failed_kernel = """

0 commit comments

Comments
 (0)