diff --git a/crates/cactus/examples/chat.rs b/crates/cactus/examples/chat.rs new file mode 100644 index 0000000000..665e87927b --- /dev/null +++ b/crates/cactus/examples/chat.rs @@ -0,0 +1,50 @@ +use std::io::{self, BufRead, Write}; + +use cactus::{CompleteOptions, Message, Model}; + +fn main() { + let path = std::env::args().nth(1).expect("Usage: chat "); + let mut model = Model::new(&path).expect("Failed to load model"); + + let options = CompleteOptions { + max_tokens: Some(1024), + temperature: Some(0.7), + confidence_threshold: Some(0.0), + ..Default::default() + }; + + let mut messages: Vec = vec![Message::system("You are a helpful assistant.")]; + + println!("Chat with your model. Type 'exit' to quit.\n"); + + loop { + print!("> "); + let _ = io::stdout().flush(); + + let mut input = String::new(); + io::stdin().lock().read_line(&mut input).unwrap(); + let input = input.trim(); + + if input.is_empty() || input == "exit" || input == "quit" { + break; + } + + messages.push(Message::user(input)); + model.reset(); + + let mut response_text = String::new(); + let result = model.complete_streaming(&messages, &options, |token| { + print!("{token}"); + let _ = io::stdout().flush(); + response_text.push_str(token); + true + }); + + println!(); + + match result { + Ok(_) => messages.push(Message::assistant(&response_text)), + Err(e) => eprintln!("Error: {e}"), + } + } +} diff --git a/crates/openai-transcription/src/batch/request.rs b/crates/openai-transcription/src/batch/request.rs index c4fb8b11b9..46d1e1dba3 100644 --- a/crates/openai-transcription/src/batch/request.rs +++ b/crates/openai-transcription/src/batch/request.rs @@ -18,7 +18,7 @@ pub struct CommonTranscriptionOptions { pub temperature: Option, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, Default, PartialEq)] pub struct CreateWhisperTranscriptionOptions { pub common: CommonTranscriptionOptions, pub prompt: Option, @@ -26,17 +26,6 @@ pub struct CreateWhisperTranscriptionOptions { pub timestamp_granularities: Vec, } -impl Default for CreateWhisperTranscriptionOptions { - fn default() -> Self { - Self { - common: CommonTranscriptionOptions::default(), - prompt: None, - response_format: None, - timestamp_granularities: Vec::new(), - } - } -} - #[derive(Debug, Clone, PartialEq)] pub struct CreateGptTranscriptionOptions { pub model: GptTranscriptionModel, diff --git a/crates/owhisper-client/tests/provider_live_e2e.rs b/crates/owhisper-client/tests/provider_live_e2e.rs index 77202d9457..fcaa5c929e 100644 --- a/crates/owhisper-client/tests/provider_live_e2e.rs +++ b/crates/owhisper-client/tests/provider_live_e2e.rs @@ -5,7 +5,7 @@ use futures_util::{Stream, StreamExt}; use hypr_audio_utils::AudioFormatExt; use owhisper_client::{ AssemblyAIAdapter, DashScopeAdapter, DeepgramAdapter, ElevenLabsAdapter, FinalizeHandle, - FireworksAdapter, GladiaAdapter, ListenClient, MistralAdapter, OpenAIAdapter, Provider, + FireworksAdapter, GladiaAdapter, ListenClient, MistralAdapter, Provider, RealtimeSttAdapter, SonioxAdapter, }; use owhisper_interface::{ControlMessage, MixedMessage, stream::StreamResponse}; @@ -181,7 +181,6 @@ direct_live_test!(assemblyai, AssemblyAIAdapter, Provider::AssemblyAI); direct_live_test!(soniox, SonioxAdapter, Provider::Soniox); direct_live_test!(gladia, GladiaAdapter, Provider::Gladia); direct_live_test!(fireworks, FireworksAdapter, Provider::Fireworks); -direct_live_test!(openai, OpenAIAdapter, Provider::OpenAI); direct_live_test!(elevenlabs, ElevenLabsAdapter, Provider::ElevenLabs); direct_live_test!(dashscope, DashScopeAdapter, Provider::DashScope); direct_live_test!(mistral, MistralAdapter, Provider::Mistral);