from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig, chain
from langchain.chat_models import init_chat_model
# Assume we've imported your PaymanAITool or multiple Payman AI Tools
payman_tool = PaymanAITool(name="send_payment")
# Build a prompt
prompt = ChatPromptTemplate([
("system", "You are a helpful AI that can send payments if asked."),
("human", "{user_input}"),
("placeholder", "{messages}"),
])
llm = init_chat_model(model="gpt-4", model_provider="openai")
llm_with_tools = llm.bind_tools([payman_tool], tool_choice=payman_tool.name)
llm_chain = prompt | llm_with_tools
@chain
def tool_chain(user_input: str, config: RunnableConfig):
input_ = {"user_input": user_input}
ai_msg = llm_chain.invoke(input_, config=config)
tool_msgs = payman_tool.batch(ai_msg.tool_calls, config=config)
return llm_chain.invoke({**input_, "messages": [ai_msg, *tool_msgs]}, config=config)
# Example usage:
response = tool_chain.invoke("Send $10 to payee123.")
print(response)```
## API reference
You can find full API documentation for PaymanAI at:
- [Python reference](https://python.langchain.com/v0.2/api_reference/community/tools/langchain_community.tools.langchain_payman_tool.tool.PaymanAI.html)
- (Any other relevant references or doc links)