@@ -246,8 +246,15 @@ async fn request_completion(
246246 params. request_body . clone ( ) ,
247247 ) ;
248248 let headers = build_headers ( & params. backend , params. api_token . as_ref ( ) , params. ide ) ?;
249+ let url = build_url (
250+ params. backend . clone ( ) ,
251+ & params. model ,
252+ params. disable_url_path_completion ,
253+ ) ;
254+ info ! ( ?headers, url, "sending request to backend" ) ;
255+ debug ! ( ?headers, body = ?json, url, "sending request to backend" ) ;
249256 let res = http_client
250- . post ( build_url ( params . backend . clone ( ) , & params . model ) )
257+ . post ( url )
251258 . json ( & json)
252259 . headers ( headers)
253260 . send ( )
@@ -414,7 +421,12 @@ async fn get_tokenizer(
414421 }
415422}
416423
417- fn build_url ( backend : Backend , model : & str ) -> String {
424+ // TODO: add configuration parameter to disable path auto-complete?
425+ fn build_url ( backend : Backend , model : & str , disable_url_path_completion : bool ) -> String {
426+ if disable_url_path_completion {
427+ return backend. url ( ) ;
428+ }
429+
418430 match backend {
419431 Backend :: HuggingFace { url } => format ! ( "{url}/models/{model}" ) ,
420432 Backend :: LlamaCpp { mut url } => {
@@ -428,9 +440,51 @@ fn build_url(backend: Backend, model: &str) -> String {
428440 url
429441 }
430442 }
431- Backend :: Ollama { url } => url,
432- Backend :: OpenAi { url } => url,
433- Backend :: Tgi { url } => url,
443+ Backend :: Ollama { mut url } => {
444+ if url. ends_with ( "/api/generate" ) {
445+ url
446+ } else if url. ends_with ( "/api/" ) {
447+ url. push_str ( "generate" ) ;
448+ url
449+ } else if url. ends_with ( "/api" ) {
450+ url. push_str ( "/generate" ) ;
451+ url
452+ } else if url. ends_with ( '/' ) {
453+ url. push_str ( "api/generate" ) ;
454+ url
455+ } else {
456+ url. push_str ( "/api/generate" ) ;
457+ url
458+ }
459+ }
460+ Backend :: OpenAi { mut url } => {
461+ if url. ends_with ( "/v1/completions" ) {
462+ url
463+ } else if url. ends_with ( "/v1/" ) {
464+ url. push_str ( "completions" ) ;
465+ url
466+ } else if url. ends_with ( "/v1" ) {
467+ url. push_str ( "/completions" ) ;
468+ url
469+ } else if url. ends_with ( '/' ) {
470+ url. push_str ( "v1/completions" ) ;
471+ url
472+ } else {
473+ url. push_str ( "/v1/completions" ) ;
474+ url
475+ }
476+ }
477+ Backend :: Tgi { mut url } => {
478+ if url. ends_with ( "/generate" ) {
479+ url
480+ } else if url. ends_with ( '/' ) {
481+ url. push_str ( "generate" ) ;
482+ url
483+ } else {
484+ url. push_str ( "/generate" ) ;
485+ url
486+ }
487+ }
434488 }
435489}
436490
@@ -466,8 +520,8 @@ impl LlmService {
466520 backend = ?params. backend,
467521 ide = %params. ide,
468522 request_body = serde_json:: to_string( & params. request_body) . map_err( internal_error) ?,
469- "received completion request for {}" ,
470- params . text_document_position . text_document . uri
523+ disable_url_path_completion = params . disable_url_path_completion ,
524+ "received completion request" ,
471525 ) ;
472526 if params. api_token . is_none ( ) && params. backend . is_using_inference_api ( ) {
473527 let now = Instant :: now ( ) ;
0 commit comments