@@ -20,6 +20,7 @@ import AppKit
2020
2121// [START import_vertexai]
2222import FirebaseVertexAI
23+ import FirebaseCore
2324// [END import_vertexai]
2425
2526class Snippets {
@@ -91,7 +92,7 @@ class Snippets {
9192 let prompt = " Write a story about a magic backpack. "
9293
9394 // To stream generated text output, call generateContentStream with the text input
94- let contentStream = model. generateContentStream ( prompt)
95+ let contentStream = try model. generateContentStream ( prompt)
9596 for try await chunk in contentStream {
9697 if let text = chunk. text {
9798 print ( text)
@@ -125,7 +126,7 @@ class Snippets {
125126 let prompt = " What's in this picture? "
126127
127128 // To stream generated text output, call generateContentStream and pass in the prompt
128- let contentStream = model. generateContentStream ( image, prompt)
129+ let contentStream = try model. generateContentStream ( image, prompt)
129130 for try await chunk in contentStream {
130131 if let text = chunk. text {
131132 print ( text)
@@ -167,7 +168,7 @@ class Snippets {
167168 let prompt = " What's different between these pictures? "
168169
169170 // To stream generated text output, call generateContentStream and pass in the prompt
170- let contentStream = model. generateContentStream ( image1, image2, prompt)
171+ let contentStream = try model. generateContentStream ( image1, image2, prompt)
171172 for try await chunk in contentStream {
172173 if let text = chunk. text {
173174 print ( text)
@@ -203,7 +204,7 @@ class Snippets {
203204 withExtension: " mp4 " ) else { fatalError ( ) }
204205 let video = try Data ( contentsOf: fileURL)
205206 let prompt = " What's in this video? "
206- let videoContent = ModelContent . Part . data ( mimetype : " video/mp4 " , video )
207+ let videoContent = InlineDataPart ( data : video , mimeType : " video/mp4 " )
207208
208209 // To generate text output, call generateContent and pass in the prompt
209210 let response = try await model. generateContent ( videoContent, prompt)
@@ -219,10 +220,10 @@ class Snippets {
219220 withExtension: " mp4 " ) else { fatalError ( ) }
220221 let video = try Data ( contentsOf: fileURL)
221222 let prompt = " What's in this video? "
222- let videoContent = ModelContent . Part . data ( mimetype : " video/mp4 " , video )
223+ let videoContent = InlineDataPart ( data : video , mimeType : " video/mp4 " )
223224
224225 // To stream generated text output, call generateContentStream and pass in the prompt
225- let contentStream = model. generateContentStream ( videoContent, prompt)
226+ let contentStream = try model. generateContentStream ( videoContent, prompt)
226227 for try await chunk in contentStream {
227228 if let text = chunk. text {
228229 print ( text)
@@ -243,7 +244,7 @@ class Snippets {
243244 let chat = model. startChat ( history: history)
244245
245246 // To stream generated text output, call sendMessageStream and pass in the message
246- let contentStream = chat. sendMessageStream ( " How many paws are in my house? " )
247+ let contentStream = try chat. sendMessageStream ( " How many paws are in my house? " )
247248 for try await chunk in contentStream {
248249 if let text = chunk. text {
249250 print ( text)
@@ -275,7 +276,7 @@ class Snippets {
275276 // [START count_tokens_text]
276277 let response = try await model. countTokens ( " Why is the sky blue? " )
277278 print ( " Total Tokens: \( response. totalTokens) " )
278- print ( " Total Billable Characters: \( response. totalBillableCharacters) " )
279+ print ( " Total Billable Characters: \( response. totalBillableCharacters ?? 0 ) " )
279280 // [END count_tokens_text]
280281 }
281282
@@ -288,7 +289,7 @@ class Snippets {
288289 // [START count_tokens_text_image]
289290 let response = try await model. countTokens ( image, " What's in this picture? " )
290291 print ( " Total Tokens: \( response. totalTokens) " )
291- print ( " Total Billable Characters: \( response. totalBillableCharacters) " )
292+ print ( " Total Billable Characters: \( response. totalBillableCharacters ?? 0 ) " )
292293 // [END count_tokens_text_image]
293294 }
294295
@@ -303,19 +304,19 @@ class Snippets {
303304 // [START count_tokens_multi_image]
304305 let response = try await model. countTokens ( image1, image2, " What's in this picture? " )
305306 print ( " Total Tokens: \( response. totalTokens) " )
306- print ( " Total Billable Characters: \( response. totalBillableCharacters) " )
307+ print ( " Total Billable Characters: \( response. totalBillableCharacters ?? 0 ) " )
307308 // [END count_tokens_multi_image]
308309 }
309310
310311 func countTokensChat( ) async throws {
311312 // [START count_tokens_chat]
312313 let chat = model. startChat ( )
313314 let history = chat. history
314- let message = try ModelContent ( role: " user " , " Why is the sky blue? " )
315+ let message = ModelContent ( role: " user " , parts : " Why is the sky blue? " )
315316 let contents = history + [ message]
316317 let response = try await model. countTokens ( contents)
317318 print ( " Total Tokens: \( response. totalTokens) " )
318- print ( " Total Billable Characters: \( response. totalBillableCharacters) " )
319+ print ( " Total Billable Characters: \( response. totalBillableCharacters ?? 0 ) " )
319320 // [END count_tokens_chat]
320321 }
321322
@@ -361,16 +362,13 @@ class Snippets {
361362 name: " getExchangeRate " ,
362363 description: " Get the exchange rate for currencies between countries " ,
363364 parameters: [
364- " currencyFrom " : Schema (
365- type: . string,
365+ " currencyFrom " : Schema . string (
366366 description: " The currency to convert from. "
367367 ) ,
368- " currencyTo " : Schema (
369- type: . string,
368+ " currencyTo " : Schema . string (
370369 description: " The currency to convert to. "
371370 ) ,
372- ] ,
373- requiredParameters: [ " currencyFrom " , " currencyTo " ]
371+ ]
374372 )
375373 // [END create_function_metadata]
376374
@@ -383,7 +381,7 @@ class Snippets {
383381 let model = vertex. generativeModel (
384382 modelName: " gemini-1.5-flash " ,
385383 // Specify the function declaration.
386- tools: [ Tool ( functionDeclarations: [ getExchangeRate] ) ]
384+ tools: [ Tool . functionDeclarations ( [ getExchangeRate] ) ]
387385 )
388386 // [END initialize_model_function]
389387
@@ -418,10 +416,7 @@ class Snippets {
418416 // displayed to the user.
419417 let response = try await chat. sendMessage ( [ ModelContent (
420418 role: " function " ,
421- parts: [ . functionResponse( FunctionResponse (
422- name: functionCall. name,
423- response: apiResponse
424- ) ) ]
419+ parts: [ FunctionResponsePart ( name: functionCall. name, response: apiResponse) ]
425420 ) ] )
426421
427422 // Log the text response.
@@ -436,20 +431,18 @@ class Snippets {
436431 let getExchangeRate = FunctionDeclaration (
437432 name: " getExchangeRate " ,
438433 description: " Get the exchange rate for currencies between countries " ,
439- parameters: nil ,
440- requiredParameters: nil
434+ parameters: [ : ]
441435 )
442436
443437 // [START function_modes]
444438 let model = VertexAI . vertexAI ( ) . generativeModel (
445439 // Setting a function calling mode is only available in Gemini 1.5 Pro
446440 modelName: " gemini-1.5-pro " ,
447441 // Pass the function declaration
448- tools: [ Tool ( functionDeclarations: [ getExchangeRate] ) ] ,
442+ tools: [ Tool . functionDeclarations ( [ getExchangeRate] ) ] ,
449443 toolConfig: ToolConfig (
450- functionCallingConfig: FunctionCallingConfig (
451- // Only call functions (model won't generate text)
452- mode: FunctionCallingConfig . Mode. any,
444+ // Only call functions (model won't generate text)
445+ functionCallingConfig: FunctionCallingConfig . any (
453446 // This should only be set when the Mode is .any.
454447 allowedFunctionNames: [ " getExchangeRate " ]
455448 )
0 commit comments