Skip to content

Agent Callbacks

Callbacks provide hooks into the lifecycle of an AnyAgent execution. Using callbacks, you can monitor, control, and extend agent behavior without modifying the core underlying agent logic.

All callbacks must inherit from the base Callback class and can choose to implement any subset of the available callback methods. These methods include:

Callback MethodWhen It FiresExample Use Cases
before_agent_invocationOnce at start, before any LLM callsInitialize counters, validate inputs, set up logging
before_llm_callBefore each LLM API callContent filtering, cost tracking, prompt inspection
after_llm_callAfter LLM responds, before adding to historyResponse validation, token counting, logging
before_tool_executionBefore each tool runsRate limiting, input validation, authorization checks
after_tool_executionAfter tool completesResult validation, metrics collection, error handling
after_agent_invocationOnce at end, before returning final responseCleanup, final metrics, audit logging
# Minimum valid implementation
def before_llm_call(self, context: Context, *args, **kwargs) -> Context:
return context # <--- Essential!

During an agent run (agent.run_async or agent.run), a unique Context object is created and shared across all callbacks.

Use Context.shared (a dictionary) to persist data across different steps and callbacks.

Note: The Context object is mutable. You should modify Context.shared directly and return the same object.

any-agent populates the Context.current_span property so that callbacks can access information in a framework-agnostic way.

You can see what attributes are available for LLM Calls and Tool Executions by examining the GenAI class.

Common Pattern: Initialize a counter in one callback and check it in another.

from any_agent.callbacks import Callback, Context
from any_agent.tracing.attributes import GenAI
class CountSearchWeb(Callback):
def after_tool_execution(self, context: Context, *args, **kwargs) -> Context:
if "search_web_count" not in context.shared:
context.shared["search_web_count"] = 0
if context.current_span.attributes[GenAI.TOOL_NAME] == "search_web":
context.shared["search_web_count"] += 1
return context

Callbacks can raise exceptions to stop agent execution. This is useful for implementing safety guardrails or validation logic.

For intentional cancellation (rate limits, guardrails, validation), subclass AgentCancel. These exceptions propagate directly to your code, allowing you to catch them by their specific type:

from any_agent import AgentCancel, AgentConfig, AnyAgent
from any_agent.callbacks import Callback
from any_agent.callbacks.context import Context
class SearchLimitReached(AgentCancel):
"""Raised when the search limit is exceeded."""
class LimitSearchWeb(Callback):
def __init__(self, max_calls: int):
self.max_calls = max_calls
def before_tool_execution(self, context: Context, *args, **kwargs) -> Context:
if context.shared.get("search_web_count", 0) > self.max_calls:
raise SearchLimitReached(f"Exceeded {self.max_calls} search calls")
return context
# In your application code:
agent = AnyAgent.create(
"tinyagent",
AgentConfig(
model_id="gpt-4.1-nano",
callbacks=[LimitSearchWeb(max_calls=3)],
),
)
try:
trace = agent.run("Find information about Python")
except SearchLimitReached as e:
print(f"Search limit reached: {e}")
print(f"Trace: {e.trace}") # Access spans collected before cancellation

Regular exceptions (like RuntimeError) are automatically wrapped in AgentRunError by the framework, which provides access to the execution trace but requires you to inspect the wrapped exception:

from any_agent import AgentConfig, AgentRunError, AnyAgent
from any_agent.callbacks import Callback
from any_agent.callbacks.context import Context
class LimitSearchWeb(Callback):
def __init__(self, max_calls: int):
self.max_calls = max_calls
def before_tool_execution(self, context: Context, *args, **kwargs) -> Context:
if context.shared.get("search_web_count", 0) > self.max_calls:
msg = "Reached limit of `search_web` calls."
raise RuntimeError(msg)
return context
# In your application code:
agent = AnyAgent.create(
"tinyagent",
AgentConfig(
model_id="gpt-4.1-nano",
callbacks=[LimitSearchWeb(max_calls=3)],
),
)
try:
trace = agent.run("Find information about Python")
except AgentRunError as e:
print(f"Error: {e.original_exception}")
print(f"Trace: {e.trace}")

The Context.current_span attribute provides access to the active trace span. This allows you to inspect (and modify) the data being processed, such as LLM inputs or Tool outputs.

Common attributes (available via any_agent.tracing.attributes.GenAI) include:

  • GenAI.INPUT_MESSAGES: The chat history sent to the model.

  • GenAI.TOOL_NAME: The name of the tool currently being executed.

  • GenAI.OUTPUT_MESSAGES: The response received from the model.

When agent.run() or agent.run_async() executes, it triggers a series of events (e.g., before the LLM is called, after a tool is executed). You can register custom Callback classes to listen for these events.

All callbacks share a strict contract: They receive the current Context as input and must return a Context as output.

# pseudocode of an Agent run
history = [system_prompt, user_prompt]
context = Context()
for callback in agent.config.callbacks:
# 1. Agent Start
context = callback.before_agent_invocation(context)
while True:
for callback in agent.config.callbacks:
# 2. Pre-LLM
context = callback.before_llm_call(context)
response = CALL_LLM(history)
for callback in agent.config.callbacks:
# 3. Post-LLM
context = callback.after_llm_call(context)
history.append(response)
if response.tool_executions:
for tool_execution in tool_executions:
# 4. Pre-Tool
for callback in agent.config.callbacks:
context = callback.before_tool_execution(context)
tool_response = EXECUTE_TOOL(tool_execution)
for callback in agent.config.callbacks:
# 5. Post-Tool
context = callback.after_tool_execution(context)
history.append(tool_response)
else:
for callback in agent.config.callbacks:
# 6. Agent DONE
context = callback.after_agent_invocation(context)
return response

Advanced designs such as safety guardrails or custom side-effects can be integrated into your agentic system using this functionality.

any-agent comes with a set of default callbacks that will be used by default (if you don’t pass a value to AgentConfig.callbacks):

If you want to disable these default callbacks, you can pass an empty list:

from any_agent import AgentConfig, AnyAgent
from any_agent.tools import search_web, visit_webpage
agent = AnyAgent.create(
"tinyagent",
AgentConfig(
model_id="mistral:mistral-small-latest",
instructions="Use the tools to find an answer",
tools=[search_web, visit_webpage],
callbacks=[]
),
)

Callbacks are provided to the agent using the AgentConfig.callbacks property.

any-agent includes default callbacks (like console logging). Use get_default_callbacks to keep them:

from any_agent import AgentConfig, AnyAgent
from any_agent.callbacks import get_default_callbacks
from any_agent.tools import search_web, visit_webpage
agent = AnyAgent.create(
"tinyagent",
AgentConfig(
model_id="gpt-4.1-nano",
instructions="Use the tools to find an answer",
tools=[search_web, visit_webpage],
callbacks=[
CountSearchWeb(), # Custom callbacks first
LimitSearchWeb(max_calls=3),
*get_default_callbacks() #Runs after custom callbacks
]
),
)

Some inputs and/or outputs in your traces might contain sensitive information that you don’t want to be exposed in the traces.

You can use callbacks to offload the sensitive information to an external location and replace the span attributes with a reference to that location:

import json
from pathlib import Path
from any_agent.callbacks.base import Callback
from any_agent.callbacks.context import Context
from any_agent.tracing.attributes import GenAI
class SensitiveDataOffloader(Callback):
def __init__(self, output_dir: str) -> None:
self.output_dir = Path(output_dir)
self.output_dir.mkdir(exist_ok=True, parents=True)
def before_llm_call(self, context: Context, *args, **kwargs) -> Context:
span = context.current_span
if input_messages := span.attributes.get(GenAI.INPUT_MESSAGES):
output_file = self.output_dir / f"{span.get_span_context().trace_id}.txt"
output_file.write_text(str(input_messages))
span.set_attribute(
GenAI.INPUT_MESSAGES,
json.dumps(
{"ref": str(output_file)}
)
)
return context

You can find a working example in the Callbacks Cookbook.

Some agent frameworks allow you to limit how many steps an agent can take and some don’t. In addition, each framework defines a step differently: some count the LLM calls, some the tool executions, and some the sum of both.

You can use callbacks to limit how many steps an agent can take, and you can decide what to count as a step:

from any_agent import AgentCancel
from any_agent.callbacks.base import Callback
from any_agent.callbacks.context import Context
class LLMCallLimitReached(AgentCancel):
"""Raised when the LLM call limit is exceeded."""
class ToolExecutionLimitReached(AgentCancel):
"""Raised when the tool execution limit is exceeded."""
class LimitLLMCalls(Callback):
def __init__(self, max_llm_calls: int) -> None:
self.max_llm_calls = max_llm_calls
def before_llm_call(self, context: Context, *args, **kwargs) -> Context:
if "n_llm_calls" not in context.shared:
context.shared["n_llm_calls"] = 0
context.shared["n_llm_calls"] += 1
if context.shared["n_llm_calls"] > self.max_llm_calls:
raise LLMCallLimitReached(f"Exceeded {self.max_llm_calls} LLM calls")
return context
class LimitToolExecutions(Callback):
def __init__(self, max_tool_executions: int) -> None:
self.max_tool_executions = max_tool_executions
def before_tool_execution(self, context: Context, *args, **kwargs) -> Context:
if "n_tool_executions" not in context.shared:
context.shared["n_tool_executions"] = 0
context.shared["n_tool_executions"] += 1
if context.shared["n_tool_executions"] > self.max_tool_executions:
raise ToolExecutionLimitReached(
f"Exceeded {self.max_tool_executions} tool executions"
)
return context