@@ -233,6 +233,8 @@ print(client.get_languages())
233233
234234### Running LLM-Generated Code
235235
236+ #### Simple
237+
236238``` python
237239import os
238240from ollama import Client
@@ -261,3 +263,59 @@ print(f"Generated code:\n{code}")
261263result = judge0.run(source_code = code, language = judge0.PYTHON )
262264print (f " Execution result: \n { result.stdout} " )
263265```
266+
267+ #### Tool Calling (a.k.a. Function Calling)
268+
269+ ``` python
270+ import os
271+ from ollama import Client
272+ import judge0
273+
274+ # Get your Ollama Cloud API key from https://ollama.com.
275+ client = Client(
276+ host = " https://ollama.com" ,
277+ headers = {" Authorization" : " Bearer " + os.environ.get(" OLLAMA_API_KEY" )},
278+ )
279+
280+ model= " qwen3-coder:480b-cloud"
281+
282+ messages= [
283+ {" role" : " user" , " content" : " Calculate how many r's are in the word 'strawberry'." },
284+ ]
285+
286+ tools = [{
287+ " type" : " function" ,
288+ " function" : {
289+ " name" : " execute_python" ,
290+ " description" : " Execute Python code and return result" ,
291+ " parameters" : {
292+ " type" : " object" ,
293+ " properties" : {
294+ " code" : {
295+ " type" : " string" ,
296+ " description" : " The Python code to execute"
297+ }
298+ },
299+ " required" : [" code" ]
300+ }
301+ }
302+ }]
303+
304+ response = client.chat(model = model, messages = messages, tools = tools)
305+
306+ response_message = response[" message" ]
307+ messages.append(response_message)
308+
309+ if response_message.tool_calls:
310+ for tool_call in response_message.tool_calls:
311+ if tool_call.function.name == " execute_python" :
312+ result = judge0.run(source_code = tool_call.function.arguments[" code" ], language = judge0.PYTHON )
313+ messages.append({
314+ " role" : " tool" ,
315+ " tool_name" : " execute_python" ,
316+ " content" : result.stdout,
317+ })
318+
319+ final_response = client.chat(model = model, messages = messages)
320+ print (final_response[" message" ][" content" ])
321+ ```
0 commit comments