Build and package Python tools into standalone executables for LLM integration.
- Define Tools Simply - Inherit from
HotToolclass and implement required methods - LLM-Ready - Built-in support for function definitions compatible with OpenAI's function calling
- Build Standalone Executables - Compile Python tools into single binary files using
hot-tool build - Run Without Dependencies - Execute tools without Python installation or source code access
pip install hot-tool# get_my_ip.py
from typing import Optional
import requests
from hot_tool import FunctionDefinition, HotTool
class GetMyIpTool(HotTool):
def function_definition(self) -> FunctionDefinition:
return {
"name": "get_my_ip",
"description": "Get my IP address",
"parameters": {},
}
def run(
self, arguments: Optional[str] = None, context: Optional[str] = None
) -> str:
response = requests.get("https://ifconfig.me")
try:
response.raise_for_status()
return response.text.strip()
except Exception as e:
print(f"Error: {type(e).__name__}: {e}")
return "Can not get my IP, please try again later."# main.py
from get_my_ip import GetMyIpTool
from hot_tool.run import run_tool
print(run_tool(GetMyIpTool))
# 198.51.100.156hot-tool build get_my_ip.py -o get_my_ip
# Starting Nuitka compilation...
# ...
# Nuitka-Plugins:upx: Compressing 'get_my_ip'.
# Nuitka: Successfully created 'get_my_ip'.
# Compilation completed successfully.
# Standalone script saved to '/Users/me/path/to/get_my_ip'./get_my_ip
# 198.51.100.156./get_my_ip function-definition
# {"name": "get_my_ip", "description": "Get my IP address", "parameters": {}}This outputs the function definition in JSON format, compatible with OpenAI's function calling API. You can use this to dynamically register tools with LLMs.
You can also pass context to customize the function definition:
./get_my_ip function-definition --context "user prefers metric units"
# Function definition may be adjusted based on contextEvery tool must implement two methods:
Returns metadata about the tool for LLM integration. Can optionally accept context to customize the function definition dynamically.
context(Optional[str]): Additional context to customize the function definition
The FunctionDefinition TypedDict has the following structure:
{
"name": str, # Required: Tool name (e.g., "get_weather")
"description": str, # Optional: What the tool does
"parameters": dict, # Optional: Parameter descriptions
"strict": bool, # Optional: Strict mode for OpenAI
}Example:
def function_definition(self, context: Optional[str] = None) -> FunctionDefinition:
return {
"name": "get_current_weather",
"description": "Get the current weather of a city",
"parameters": {
"city_name": "The name of the city to get the current weather of",
},
}Executes the tool logic and returns a string result.
arguments(Optional[str]): JSON string containing input parameterscontext(Optional[str]): Additional context information- Returns: String output for the LLM
Example:
def run(
self, arguments: Optional[str] = None, context: Optional[str] = None
) -> str:
import json
args = json.loads(arguments or "{}")
city_name = args.get("city_name", "New York")
# ... your logic here ...
return "Current weather: Sunny, 72°F"You can create reusable base tool classes and import them into your scripts:
# base_tool.py
from hot_tool import HotTool
class BaseAPITool(HotTool):
def run(self, arguments=None, context=None):
# Shared logic here
return self.call_api()# my_tool.py - Your main script
from base_tool import BaseAPITool
class MyCustomTool(BaseAPITool): # Inherit from your base class
def run(self, arguments=None, context=None):
# Custom implementation
return "Custom result"Important: Each script can only define one concrete tool class that implements both run() and function_definition(). Imported base classes don't count toward this limit.
MIT License