Skip to main content

Agents API Reference

AgentConfig

from synapsekit.agents import AgentConfig
ParameterTypeDefaultDescription
llmBaseLLMrequiredThe language model that drives the agent
toolslist[BaseTool][]Tools available to the agent
system_promptstr | NoneNoneOverride the default system prompt
max_stepsint10Maximum loop iterations before raising MaxStepsExceeded
handle_tool_errorsboolTrueCatch tool exceptions and feed them back as observations
verboseboolFalsePrint Thought/Action/Observation steps to stdout
return_intermediate_stepsboolFalseInclude all intermediate steps in the result dict
memoryBaseMemory | NoneNoneConversation memory backend for multi-turn sessions
from synapsekit.agents import AgentConfig

config = AgentConfig(llm=llm, tools=[search_tool], max_steps=15)

ReActAgent

from synapsekit.agents import ReActAgent

agent = ReActAgent(config: AgentConfig)

Implements the ReAct (Reasoning + Acting) loop via prompt-based tool use. Compatible with any LLM.

async run(query, session_id=None)

ParameterTypeDefaultDescription
querystrrequiredThe user task or question
session_idstr | NoneNoneSession ID for memory isolation

Returns {"output": str, "steps": int, "intermediate_steps": list}.

from synapsekit.agents import ReActAgent, AgentConfig, tool

@tool
def search_web(query: str) -> str:
"""Search the web for current information."""
return f"Search results for: {query}"

agent = ReActAgent(AgentConfig(llm=llm, tools=[search_web]))
result = await agent.run("What is the capital of Australia?")
print(result["output"])

async stream(query, session_id=None)

Yields step dicts with type field: "thought", "action", "observation", or "answer".

async for step in agent.stream("Research the latest AI news"):
if step["type"] == "answer":
print(step["content"])

FunctionCallingAgent

from synapsekit.agents import FunctionCallingAgent

agent = FunctionCallingAgent(config: AgentConfig)

Uses the LLM's native function/tool calling API. More reliable than ReAct for supported models.

Supported providers: OpenAI, Anthropic, Gemini, Mistral, DeepSeek, OpenRouter, Together, Fireworks, AzureOpenAI, Groq.

async run(query, session_id=None) / async stream(query, session_id=None)

Identical signature to ReActAgent.

Parallel tool calls: when the LLM requests multiple tools simultaneously, they are executed concurrently via asyncio.gather.


AgentExecutor

from synapsekit.agents import AgentExecutor

AgentExecutor(
agent: ReActAgent | FunctionCallingAgent,
tools: list[BaseTool],
max_steps: int = 10,
handle_tool_errors: bool = True,
verbose: bool = False,
)
ParameterTypeDefaultDescription
agentReActAgent | FunctionCallingAgentrequiredThe underlying agent instance
toolslist[BaseTool]requiredTools the agent can call
max_stepsint10Hard limit on iterations
handle_tool_errorsboolTrueCatch and forward tool errors as observations
verboseboolFalseLog each step

async invoke(query, session_id=None)

Same return format as agent.run().


BaseTool

from synapsekit.agents import BaseTool

class BaseTool(ABC):
name: str
description: str
args_schema: dict

@abstractmethod
async def run(self, **kwargs) -> str: ...
class DatabaseTool(BaseTool):
name = "query_database"
description = "Run a SQL query against the production database."
args_schema = {
"type": "object",
"properties": {
"sql": {"type": "string", "description": "The SQL query to execute."}
},
"required": ["sql"],
}

async def run(self, sql: str) -> str:
result = await db.execute(sql)
return str(result)

@tool decorator

Converts a Python function into a BaseTool instance using type annotations and docstrings.

from synapsekit.agents import tool

@tool
def my_tool(param1: str, param2: int = 0) -> str:
"""Short description shown to the LLM.

Args:
param1: Description of param1.
param2: Description of param2.
"""
return f"Result: {param1}, {param2}"

Supported types for auto-schema generation:

Python typeJSON Schema type
str"string"
int"integer"
float"number"
bool"boolean"
list / list[T]"array"
dict"object"
Optional[T]removes from required
Literal["a", "b"]"enum": ["a", "b"]

HandoffChain

from synapsekit.agents import HandoffChain

HandoffChain(
supervisor: ReActAgent | FunctionCallingAgent,
workers: dict[str, ReActAgent | FunctionCallingAgent],
max_rounds: int = 5,
)
ParameterTypeDefaultDescription
supervisoragentrequiredThe supervisor agent that assigns tasks to workers
workersdict[str, agent]requiredNamed worker agents
max_roundsint5Maximum supervisor-worker cycles

async run(task) -> dict

chain = HandoffChain(
supervisor=planner_agent,
workers={"researcher": search_agent, "writer": write_agent},
)
result = await chain.run("Research and summarize recent advances in quantum computing")

Crew

from synapsekit.agents import Crew

Crew(
agents: list[ReActAgent | FunctionCallingAgent],
max_rounds: int = 5,
finish_condition: Callable[[dict], bool] | None = None,
)
ParameterTypeDefaultDescription
agentslistrequiredAgents that take turns contributing
max_roundsint5Maximum total rounds
finish_conditionCallable | NoneNoneReturn True to stop early

async run(task) -> dict

crew = Crew(agents=[researcher, writer, critic], max_rounds=6)
result = await crew.run("Write a technical overview of SynapseKit")

MCPAgent

from synapsekit.agents import MCPAgent

MCPAgent(
llm: BaseLLM,
mcp_server_url: str,
transport: str = "sse",
max_steps: int = 10,
)
ParameterTypeDefaultDescription
llmBaseLLMrequiredThe language model
mcp_server_urlstrrequiredMCP server endpoint
transportstr"sse"Transport protocol: "sse" or "stdio"
max_stepsint10Maximum agent steps

Automatically discovers available tools from the MCP server on first call.

agent = MCPAgent(llm=llm, mcp_server_url="http://localhost:8080/sse")
result = await agent.run("List all Python files in the /src directory")

See also