forked from techwithtim/OllamaTutorial
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsample_request.py
More file actions
32 lines (28 loc) · 1.18 KB
/
sample_request.py
File metadata and controls
32 lines (28 loc) · 1.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import requests
import json
# Set up the base URL for the local Ollama API
url = "http://localhost:11434/api/chat"
# Define the payload (your input prompt)
payload = {
"model": "llama3.2", # Replace with the model name you're using
"messages": [{"role": "user", "content": "What is Python?"}]
}
# Send the HTTP POST request with streaming enabled
response = requests.post(url, json=payload, stream=True)
# Check the response status
if response.status_code == 200:
print("Streaming response from Ollama:")
for line in response.iter_lines(decode_unicode=True):
if line: # Ignore empty lines
try:
# Parse each line as a JSON object
json_data = json.loads(line)
# Extract and print the assistant's message content
if "message" in json_data and "content" in json_data["message"]:
print(json_data["message"]["content"], end="")
except json.JSONDecodeError:
print(f"\nFailed to parse line: {line}")
print() # Ensure the final output ends with a newline
else:
print(f"Error: {response.status_code}")
print(response.text)