The official Python SDK for Logwick — audit logging for AI agents.
pip install logwickimport logwick
logwick.init(api_key="sk-lw-your-key")
# Fire and forget — never blocks your code
logwick.fire({
"agent": "gpt-4o",
"action": "email_draft",
"status": "success",
"input": user_prompt,
"output": result,
"tokens": 312,
"user": user_email,
})Automatically logs input, output, tokens, and latency:
from logwick import LogwickClient
lw = LogwickClient(api_key="sk-lw-your-key")
result = lw.openai(
lambda: client.chat.completions.create(model="gpt-4o", messages=messages),
{"action": "email_draft", "user": user_email}
)
# result is the normal OpenAI response — nothing changes in your coderesult = lw.anthropic(
lambda: anthropic.messages.create(
model="claude-3-5-sonnet-20241022",
messages=messages,
max_tokens=1024
),
{"action": "document_review", "user": user_email}
)result = lw.gemini(
lambda: model.generate_content(prompt),
{"action": "data_analysis", "user": user_email}
)One handler logs every LLM call in your chain automatically:
handler = lw.langchain_handler(user="ops@acme.com")
chain = LLMChain(llm=llm, prompt=prompt, callbacks=[handler])
# Every call in the chain is now logged automaticallyfrom logwick import LogwickClient
lw = LogwickClient(
api_key="sk-lw-your-key", # required
silent=False, # print warnings (default: True = silent)
tags=["production"], # default tags added to every log
)import logwick
logwick.init(api_key="sk-lw-your-key", tags=["production"])
# Use anywhere in your codebase without passing the client around
logwick.fire({"agent": "gpt-4o", "action": "summarize", "status": "success", ...})Sign up free at logwick.io — 5,000 logs/month free, no credit card required.