-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathrun.py
33 lines (27 loc) · 851 Bytes
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import pytest
from pathfinder import LlamaChat, assistant, gen, select, user
from transformers import AutoModelForCausalLM, AutoTokenizer
arg_LLM = "TheBloke/Llama-2-7B-GPTQ"
model = AutoModelForCausalLM.from_pretrained(
arg_LLM,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(arg_LLM, use_fast=True)
base = LlamaChat(model, tokenizer)
lm = base
with user():
lm += "Write a list"
with assistant():
lm += "1)"
lm += gen(max_tokens=100, stop_regex=r"4\)", name="list")
lm += "Write a list"
lm += "1)"
lm += gen(max_tokens=100, stop_regex=["4)"], name="list")
assert not "4)" in str(lm["list"])
lm = base
with user():
lm += "Do you like apples or oranges?"
with assistant():
lm += "I like "
lm += select(["apples", "oranges"], name="fruit")
assert lm["fruit"] == "apples" or lm["fruit"] == "oranges"