Tool use is a critical capability for Large Language Model (LLM) agents, enabling them to interact with external systems, access up-to-date information, and perform actions beyond their inherent knowledge1). This functionality allows LLMs to handle complex tasks that require real-time data retrieval or specific operations.
import json from openai import OpenAI client = OpenAI() # Define tools with JSON Schema for the model tools = [{ "type": "function", "function": { "name": "get_weather", "description": "Get the current weather for a location", "parameters": { "type": "object", "properties": { "location": {"type": "string", "description": "City name"}, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, }, "required": ["location"], }, }, }] # Tool implementation registry def get_weather(location: str, unit: str = "celsius") -> str: # In production, call a real weather API here return json.dumps({"location": location, "temp": 22, "unit": unit, "condition": "sunny"}) TOOL_REGISTRY = {"get_weather": get_weather} def run_with_tools(user_message: str) -> str: """Complete tool-use loop: send message, execute tools, return final response.""" messages = [{"role": "user", "content": user_message}] response = client.chat.completions.create( model="gpt-4o", messages=messages, tools=tools ) msg = response.choices[0].message # If the model wants to call tools, execute them while msg.tool_calls: messages.append(msg) for call in msg.tool_calls: func = TOOL_REGISTRY[call.function.name] args = json.loads(call.function.arguments) result = func(**args) messages.append({"role": "tool", "tool_call_id": call.id, "content": result}) response = client.chat.completions.create( model="gpt-4o", messages=messages, tools=tools ) msg = response.choices[0].message return msg.content print(run_with_tools("What is the weather in Paris?"))