Skip to content

Commit 2ab28b1

Browse files
committed
feat: Add support for Ollama as a chat provider and update related settings
1 parent 27f0330 commit 2ab28b1

10 files changed

+620
-114
lines changed

Info.plist

+59-28
Original file line numberDiff line numberDiff line change
@@ -2,33 +2,64 @@
22
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
33
<plist version="1.0">
44
<dict>
5-
<key>CFBundleName</key>
6-
<string>$(PRODUCT_NAME)</string>
7-
<key>CFBundleExecutable</key>
8-
<string>$(EXECUTABLE_NAME)</string>
9-
<key>CFBundleIdentifier</key>
10-
<string>ai.kroonen.ophelia</string>
11-
<key>CFBundleVersion</key>
12-
<string>1</string>
13-
<key>CFBundleShortVersionString</key>
14-
<string>1.0</string>
15-
<key>CFBundlePackageType</key>
16-
<string>APPL</string>
17-
<key>ITSAppUsesNonExemptEncryption</key>
18-
<false/>
19-
<key>UILaunchStoryboardName</key>
20-
<string>LaunchScreen</string>
21-
<key>UIRequiredDeviceCapabilities</key>
22-
<array>
23-
<string>arm64</string>
24-
</array>
25-
<key>UISupportedInterfaceOrientations</key>
26-
<array>
27-
<string>UIInterfaceOrientationPortrait</string>
28-
<string>UIInterfaceOrientationLandscapeLeft</string>
29-
<string>UIInterfaceOrientationLandscapeRight</string>
30-
</array>
31-
<key>UIRequiresFullScreen</key>
32-
<true/>
5+
<key>CFBundleInfoDictionaryVersion</key>
6+
<string>6.0</string>
7+
<key>CFBundleName</key>
8+
<string>$(PRODUCT_NAME)</string>
9+
<key>CFBundleExecutable</key>
10+
<string>$(EXECUTABLE_NAME)</string>
11+
<key>CFBundleIdentifier</key>
12+
<string>ai.kroonen.ophelia</string>
13+
<key>CFBundleVersion</key>
14+
<string>1</string>
15+
<key>CFBundleShortVersionString</key>
16+
<string>1.0</string>
17+
<key>CFBundlePackageType</key>
18+
<string>APPL</string>
19+
<key>ITSAppUsesNonExemptEncryption</key>
20+
<false/>
21+
<key>UILaunchStoryboardName</key>
22+
<string>LaunchScreen</string>
23+
<key>UIRequiredDeviceCapabilities</key>
24+
<array>
25+
<string>arm64</string>
26+
</array>
27+
<key>UISupportedInterfaceOrientations</key>
28+
<array>
29+
<string>UIInterfaceOrientationPortrait</string>
30+
<string>UIInterfaceOrientationLandscapeLeft</string>
31+
<string>UIInterfaceOrientationLandscapeRight</string>
32+
</array>
33+
<key>UIRequiresFullScreen</key>
34+
<true/>
35+
<key>NSAppTransportSecurity</key>
36+
<dict>
37+
<key>NSAllowsArbitraryLoads</key>
38+
<true/>
39+
<key>NSExceptionDomains</key>
40+
<dict>
41+
<key>stargate.local</key>
42+
<dict>
43+
<key>NSIncludesSubdomains</key>
44+
<true/>
45+
<key>NSTemporaryExceptionAllowsInsecureHTTPLoads</key>
46+
<true/>
47+
</dict>
48+
<key>192.168.2.112</key>
49+
<dict>
50+
<key>NSIncludesSubdomains</key>
51+
<true/>
52+
<key>NSTemporaryExceptionAllowsInsecureHTTPLoads</key>
53+
<true/>
54+
</dict>
55+
<key>192.168.2.112:11434</key>
56+
<dict>
57+
<key>NSIncludesSubdomains</key>
58+
<true/>
59+
<key>NSTemporaryExceptionAllowsInsecureHTTPLoads</key>
60+
<true/>
61+
</dict>
62+
</dict>
63+
</dict>
3364
</dict>
3465
</plist>

Sources/ChatViewModel.swift

+108-70
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,12 @@ class ChatViewModel: ObservableObject {
111111
private var openAITTSService: OpenAITTSService?
112112
private var systemVoiceService: SystemVoiceService?
113113

114+
/**
115+
If we choose to add a custom OllamaService instance, we handle it the same as
116+
OpenAI or Anthropic or GitHub. We'll store it in chatService if user picks
117+
that provider. This is done in `initializeChatService`.
118+
*/
119+
114120
// MARK: - Persistence Tools
115121

116122
/// Reference to UserDefaults for reading/writing settings and messages.
@@ -123,7 +129,7 @@ class ChatViewModel: ObservableObject {
123129
// MARK: - Token Batching Configuration
124130

125131
/// Number of tokens to accumulate before flushing to the AI message text.
126-
private let tokenBatchSize = 5
132+
private let tokenBatchSize = 1
127133

128134
/// Max time interval before forcibly flushing tokens, to keep UI responsive.
129135
private let tokenFlushInterval: TimeInterval = 0.2
@@ -240,9 +246,11 @@ class ChatViewModel: ObservableObject {
240246
print("[Debug] Provider: \(appSettings.selectedProvider)")
241247
print("[Debug] Using API Key: \(maskAPIKey(appSettings.currentAPIKey))")
242248

243-
// 1) Validate non-empty input and valid API key.
249+
// 1) Validate non-empty input and valid API key when needed
244250
guard !inputText.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty else { return }
245-
guard !appSettings.currentAPIKey.isEmpty else {
251+
252+
// Only check for API key if the provider isn't Ollama
253+
if appSettings.selectedProvider != .ollama && appSettings.currentAPIKey.isEmpty {
246254
print("[ChatViewModel] No valid API key for provider \(appSettings.selectedProvider).")
247255
handleError(ChatServiceError.invalidAPIKey)
248256
return
@@ -363,7 +371,8 @@ class ChatViewModel: ObservableObject {
363371
Ensures changes to provider, model, or keys take effect immediately.
364372
*/
365373
internal func initializeChatService(with settings: AppSettings) {
366-
guard !settings.currentAPIKey.isEmpty else {
374+
// For Ollama, we don't require an API key
375+
if settings.selectedProvider != .ollama && settings.currentAPIKey.isEmpty {
367376
chatService = nil
368377
print("[ChatViewModel] No valid API key for provider \(settings.selectedProvider).")
369378
return
@@ -381,6 +390,11 @@ class ChatViewModel: ObservableObject {
381390
case .githubModel:
382391
chatService = GitHubModelChatService(apiKey: settings.githubToken)
383392
print("[ChatViewModel] Initialized GitHub Model Chat Service with key: \(maskAPIKey(settings.githubToken))")
393+
394+
case .ollama:
395+
let service = OllamaService(serverURL: settings.ollamaServerURL)
396+
self.chatService = service
397+
print("[ChatViewModel] Initialized Ollama service with URL: \(settings.ollamaServerURL)")
384398
}
385399
}
386400

@@ -501,7 +515,8 @@ class ChatViewModel: ObservableObject {
501515
streaming response tokens. Also manages insertion of system messages (e.g. instructions).
502516
*/
503517
private func performSendFlow() async {
504-
print("[ChatViewModel] Sending message to API...")
518+
print("[ChatViewModel] *** STARTING NEW SEND FLOW ***")
519+
print("[ChatViewModel] Provider: \(appSettings.selectedProvider.rawValue), Model: \(appSettings.selectedModelId)")
505520

506521
// 1) Create a placeholder AI message that we'll update with streaming tokens.
507522
let aiMessage = MutableMessage(
@@ -519,37 +534,48 @@ class ChatViewModel: ObservableObject {
519534

520535
// 2) Build the list of messages to send, possibly including relevant memories.
521536
let payload = await prepareMessagesPayload()
522-
523-
// 3) Handle system message if needed (OpenAI/GitHub typically embed as role=system).
524-
// Anthropic uses a separate `system` param.
525-
if (appSettings.selectedProvider == .openAI || appSettings.selectedProvider == .githubModel),
526-
!appSettings.systemMessage.isEmpty {
527-
var updated = payload
528-
updated.insert(["role": "system", "content": appSettings.systemMessage], at: 0)
529-
530-
// For Anthropic, pass system text separately.
531-
let systemMessage = (appSettings.selectedProvider == .anthropic && !appSettings.systemMessage.isEmpty)
532-
? appSettings.systemMessage
533-
: nil
534-
535-
let stream = try await service.streamCompletion(
536-
messages: updated,
537-
model: appSettings.selectedModelId,
538-
system: systemMessage
539-
)
540-
let completeResponse = try await handleResponseStream(stream, aiMessage: aiMessage)
541-
await finalizeResponseProcessing(completeResponse: completeResponse)
542-
537+
538+
// 3) Handle system message based on provider
539+
// Determine which providers use system message in the payload vs. as separate parameter
540+
let shouldIncludeSystemInPayload = (appSettings.selectedProvider == .openAI ||
541+
appSettings.selectedProvider == .githubModel ||
542+
appSettings.selectedProvider == .ollama)
543+
544+
let systemMessageParam: String?
545+
546+
if !appSettings.systemMessage.isEmpty {
547+
if appSettings.selectedProvider == .anthropic {
548+
systemMessageParam = appSettings.systemMessage // Anthropic expects system as separate param
549+
} else {
550+
systemMessageParam = appSettings.systemMessage // For Ollama, we'll send both ways for robustness
551+
}
552+
553+
if shouldIncludeSystemInPayload {
554+
var updated = payload
555+
updated.insert(["role": "system", "content": appSettings.systemMessage], at: 0)
556+
557+
let stream = try await service.streamCompletion(
558+
messages: updated,
559+
model: appSettings.selectedModelId,
560+
system: systemMessageParam
561+
)
562+
let completeResponse = try await handleResponseStream(stream, aiMessage: aiMessage)
563+
await finalizeResponseProcessing(completeResponse: completeResponse)
564+
} else {
565+
let stream = try await service.streamCompletion(
566+
messages: payload,
567+
model: appSettings.selectedModelId,
568+
system: systemMessageParam
569+
)
570+
let completeResponse = try await handleResponseStream(stream, aiMessage: aiMessage)
571+
await finalizeResponseProcessing(completeResponse: completeResponse)
572+
}
543573
} else {
544-
// For Anthropic or other providers.
545-
let systemMessage = (appSettings.selectedProvider == .anthropic && !appSettings.systemMessage.isEmpty)
546-
? appSettings.systemMessage
547-
: nil
548-
574+
// No system message at all, just send the message payload
549575
let stream = try await service.streamCompletion(
550576
messages: payload,
551577
model: appSettings.selectedModelId,
552-
system: systemMessage
578+
system: nil
553579
)
554580
let completeResponse = try await handleResponseStream(stream, aiMessage: aiMessage)
555581
await finalizeResponseProcessing(completeResponse: completeResponse)
@@ -568,13 +594,28 @@ class ChatViewModel: ObservableObject {
568594
an empty placeholder or keep the message and optionally speak it. Then persists messages.
569595
*/
570596
private func finalizeResponseProcessing(completeResponse: String) async {
571-
print("[ChatViewModel] Received response: \(completeResponse)")
572-
573-
let trimmed = completeResponse.trimmingCharacters(in: .whitespacesAndNewlines)
597+
print("[ChatViewModel] Received complete response, raw length: \(completeResponse.count)")
598+
599+
// Clean up the response by removing any trailing JSON metadata
600+
var cleanedResponse = completeResponse
601+
if let jsonStart = completeResponse.range(of: "{\"model\":") {
602+
cleanedResponse = String(completeResponse[..<jsonStart.lowerBound])
603+
}
604+
605+
let trimmed = cleanedResponse.trimmingCharacters(in: .whitespacesAndNewlines)
606+
print("[ChatViewModel] Cleaned response length: \(trimmed.count)")
607+
574608
if trimmed.isEmpty, let last = messages.last, !last.isUser {
575609
// If the AI message is empty, remove the placeholder.
576610
messages.removeLast()
577611
} else {
612+
// If we have text, update the message with cleaned text and optionally speak it
613+
if let lastMessage = messages.last, !lastMessage.isUser {
614+
// Replace the entire message text with the cleaned version
615+
lastMessage.text = trimmed
616+
objectWillChange.send()
617+
}
618+
578619
// If we have text, optionally speak it if autoplay is on.
579620
speakMessage(trimmed)
580621
await saveMessages()
@@ -664,73 +705,70 @@ class ChatViewModel: ObservableObject {
664705
) async throws -> String {
665706
var completeResponse = ""
666707
var tokenCount = 0
708+
709+
print("[ChatViewModel] STREAMING STARTED - Provider: \(appSettings.selectedProvider.rawValue), Model: \(appSettings.selectedModelId)")
667710

668-
// Prepare haptic feedback
669711
let feedbackGenerator = UIImpactFeedbackGenerator(style: .light)
670712
let finalFeedbackGenerator = UINotificationFeedbackGenerator()
671713
feedbackGenerator.prepare()
672714
finalFeedbackGenerator.prepare()
673715

674-
// Reset token buffering
675716
tokenBuffer = ""
676717
lastFlushDate = Date()
677718
flushTask?.cancel()
678719
flushTask = nil
679720

680-
// Consume tokens as they arrive.
681721
for try await content in stream {
682-
if Task.isCancelled { break }
722+
// Filter out JSON metadata that might be included in tokens
723+
if content.hasPrefix("{\"model\":") {
724+
print("[ChatViewModel] Skipping JSON metadata in token stream")
725+
continue
726+
}
727+
728+
print("[ChatViewModel] Received token: \"\(content)\"")
729+
730+
if Task.isCancelled {
731+
print("[ChatViewModel] Stream cancelled")
732+
break
733+
}
683734

684735
tokenBuffer.append(content)
685736
completeResponse.append(content)
686-
tokenCount += 1
687737

688-
// Flush every tokenBatchSize tokens for responsiveness.
689-
if tokenCount % tokenBatchSize == 0 {
690-
await flushTokens(aiMessage: aiMessage)
691-
} else {
692-
scheduleFlush(aiMessage: aiMessage)
693-
}
738+
tokenCount += 1
694739

695-
// Provide light haptic feedback every 5 tokens.
740+
// IMPORTANT: Force flush each token immediately for debugging
741+
await flushTokens(aiMessage: aiMessage, force: true)
742+
743+
// Optional haptic every 5 tokens
696744
if tokenCount % 5 == 0 {
697745
feedbackGenerator.impactOccurred()
698746
}
699747
}
700748

701-
// Ensure we flush any leftover tokens at the end.
749+
// Make sure we flush any leftover tokens:
702750
await flushTokens(aiMessage: aiMessage, force: true)
703751
finalFeedbackGenerator.notificationOccurred(.success)
752+
753+
print("[ChatViewModel] STREAMING COMPLETE - Total tokens: \(tokenCount)")
704754

705755
return completeResponse
706756
}
707757

708-
/**
709-
Schedules a flush after `tokenFlushInterval` if no further tokens arrive,
710-
preventing partial text from building up too long.
711-
*/
712-
private func scheduleFlush(aiMessage: MutableMessage) {
713-
flushTask?.cancel()
714-
flushTask = Task { [weak self] in
715-
guard let self = self else { return }
716-
try? await Task.sleep(nanoseconds: UInt64(self.tokenFlushInterval * 1_000_000_000))
717-
await self.flushTokens(aiMessage: aiMessage)
718-
}
719-
}
720-
721-
/**
722-
Moves any accumulated tokens into the AI message text.
723-
- Parameter aiMessage: The AI message being updated.
724-
- Parameter force: If `true`, flushes even if batch/time thresholds aren't reached.
725-
*/
726758
private func flushTokens(aiMessage: MutableMessage, force: Bool = false) async {
727759
guard force || !tokenBuffer.isEmpty else { return }
728-
760+
729761
let tokensToApply = tokenBuffer
762+
print("[ChatViewModel] Flushing tokens: \"\(tokensToApply)\"")
763+
730764
tokenBuffer = ""
731-
732-
aiMessage.text.append(tokensToApply)
733-
objectWillChange.send()
765+
766+
// Make sure we update the UI on the main actor
767+
await MainActor.run {
768+
aiMessage.text += tokensToApply
769+
objectWillChange.send()
770+
}
771+
734772
lastFlushDate = Date()
735773
}
736774

0 commit comments

Comments
 (0)