Human-in-the-loop
There are certain tools that we don't trust a model to execute on its own. One thing we can do in such situations is require human approval before the tool is invoked.
Setupβ
We'll need to install the following packages:
%pip install --upgrade --quiet langchain
And set these environment variables:
import getpass
import os
# If you'd like to use LangSmith, uncomment the below:
# os.environ["LANGCHAIN_TRACING_V2"] = "true"
# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass()
Chainβ
Suppose we have the following (dummy) tools and tool-calling chain:
- OpenAI
- Anthropic
- Cohere
- FireworksAI
- MistralAI
- TogetherAI
Install dependencies
pip install -qU langchain-openai
Set environment variables
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
Install dependencies
pip install -qU langchain-anthropic
Set environment variables
import getpass
import os
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-sonnet-20240229")
Install dependencies
pip install -qU langchain-google-vertexai
Set environment variables
import getpass
import os
os.environ["GOOGLE_API_KEY"] = getpass.getpass()
from langchain_google_vertexai import ChatVertexAI
llm = ChatVertexAI(model="gemini-pro")
Install dependencies
pip install -qU langchain-cohere
Set environment variables
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass()
from langchain_cohere import ChatCohere
llm = ChatCohere(model="command-r")
Install dependencies
pip install -qU langchain-fireworks
Set environment variables
import getpass
import os
os.environ["FIREWORKS_API_KEY"] = getpass.getpass()
from langchain_fireworks import ChatFireworks
llm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
Install dependencies
pip install -qU langchain-mistralai
Set environment variables
import getpass
import os
os.environ["MISTRAL_API_KEY"] = getpass.getpass()
from langchain_mistralai import ChatMistralAI
llm = ChatMistralAI(model="mistral-large-latest")
Install dependencies
pip install -qU langchain-openai
Set environment variables
import getpass
import os
os.environ["TOGETHER_API_KEY"] = getpass.getpass()
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="https://api.together.xyz/v1",
api_key=os.environ["TOGETHER_API_KEY"],
model="mistralai/Mixtral-8x7B-Instruct-v0.1",)
# | echo: false
# | outout: false
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0)
API Reference:
from operator import itemgetter
from typing import Dict, List
from langchain_core.messages import AIMessage
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import tool
@tool
def count_emails(last_n_days: int) -> int:
"""Multiply two integers together."""
return last_n_days * 2
@tool
def send_email(message: str, recipient: str) -> str:
"Add two integers."
return f"Successfully sent email to {recipient}."
tools = [count_emails, send_email]
llm_with_tools = llm.bind_tools(tools)
def call_tools(msg: AIMessage) -> List[Dict]:
"""Simple sequential tool calling helper."""
tool_map = {tool.name: tool for tool in tools}
tool_calls = msg.tool_calls.copy()
for tool_call in tool_calls:
tool_call["output"] = tool_map[tool_call["name"]].invoke(tool_call["args"])
return tool_calls
chain = llm_with_tools | call_tools
chain.invoke("how many emails did i get in the last 5 days?")
API Reference:
[{'name': 'count_emails',
'args': {'last_n_days': 5},
'id': 'toolu_012VHuh7vk5dVNct5SgZj3gh',
'output': 10}]
Adding human approvalβ
We can add a simple human approval step to our tool_chain function:
import json
def human_approval(msg: AIMessage) -> Runnable:
tool_strs = "\n\n".join(
json.dumps(tool_call, indent=2) for tool_call in msg.tool_calls
)
input_msg = (
f"Do you approve of the following tool invocations\n\n{tool_strs}\n\n"
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no."
)
resp = input(input_msg)
if resp.lower() not in ("yes", "y"):
raise ValueError(f"Tool invocations not approved:\n\n{tool_strs}")
return msg
chain = llm_with_tools | human_approval | call_tools
chain.invoke("how many emails did i get in the last 5 days?")
Do you approve of the following tool invocations
{
"name": "count_emails",
"args": {
"last_n_days": 5
},
"id": "toolu_01LCpjpFxrRspygDscnHYyPm"
}
Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. yes
[{'name': 'count_emails',
'args': {'last_n_days': 5},
'id': 'toolu_01LCpjpFxrRspygDscnHYyPm',
'output': 10}]
chain.invoke("Send sally@gmail.com an email saying 'What's up homie'")
Do you approve of the following tool invocations
{
"name": "send_email",
"args": {
"message": "What's up homie",
"recipient": "sally@gmail.com"
},
"id": "toolu_0158qJVd1AL32Y1xxYUAtNEy"
}
Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. no
---------------------------------------------------------------------------
``````output
ValueError Traceback (most recent call last)
``````output
Cell In[11], line 1
----> 1 chain.invoke("Send sally@gmail.com an email saying 'What's up homie'")
``````output
File ~/langchain/libs/core/langchain_core/runnables/base.py:2499, in RunnableSequence.invoke(self, input, config)
2497 try:
2498 for i, step in enumerate(self.steps):
-> 2499 input = step.invoke(
2500 input,
2501 # mark each step as a child run
2502 patch_config(
2503 config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
2504 ),
2505 )
2506 # finish the root run
2507 except BaseException as e:
``````output
File ~/langchain/libs/core/langchain_core/runnables/base.py:3961, in RunnableLambda.invoke(self, input, config, **kwargs)
3959 """Invoke this runnable synchronously."""
3960 if hasattr(self, "func"):
-> 3961 return self._call_with_config(
3962 self._invoke,
3963 input,
3964 self._config(config, self.func),
3965 **kwargs,
3966 )
3967 else:
3968 raise TypeError(
3969 "Cannot invoke a coroutine function synchronously."
3970 "Use `ainvoke` instead."
3971 )
``````output
File ~/langchain/libs/core/langchain_core/runnables/base.py:1625, in Runnable._call_with_config(self, func, input, config, run_type, **kwargs)
1621 context = copy_context()
1622 context.run(var_child_runnable_config.set, child_config)
1623 output = cast(
1624 Output,
-> 1625 context.run(
1626 call_func_with_variable_args, # type: ignore[arg-type]
1627 func, # type: ignore[arg-type]
1628 input, # type: ignore[arg-type]
1629 config,
1630 run_manager,
1631 **kwargs,
1632 ),
1633 )
1634 except BaseException as e:
1635 run_manager.on_chain_error(e)
``````output
File ~/langchain/libs/core/langchain_core/runnables/config.py:347, in call_func_with_variable_args(func, input, config, run_manager, **kwargs)
345 if run_manager is not None and accepts_run_manager(func):
346 kwargs["run_manager"] = run_manager
--> 347 return func(input, **kwargs)
``````output
File ~/langchain/libs/core/langchain_core/runnables/base.py:3835, in RunnableLambda._invoke(self, input, run_manager, config, **kwargs)
3833 output = chunk
3834 else:
-> 3835 output = call_func_with_variable_args(
3836 self.func, input, config, run_manager, **kwargs
3837 )
3838 # If the output is a runnable, invoke it
3839 if isinstance(output, Runnable):
``````output
File ~/langchain/libs/core/langchain_core/runnables/config.py:347, in call_func_with_variable_args(func, input, config, run_manager, **kwargs)
345 if run_manager is not None and accepts_run_manager(func):
346 kwargs["run_manager"] = run_manager
--> 347 return func(input, **kwargs)
``````output
Cell In[9], line 14, in human_approval(msg)
12 resp = input(input_msg)
13 if resp.lower() not in ("yes", "y"):
---> 14 raise ValueError(f"Tool invocations not approved:\n\n{tool_strs}")
15 return msg
``````output
ValueError: Tool invocations not approved:
{
"name": "send_email",
"args": {
"message": "What's up homie",
"recipient": "sally@gmail.com"
},
"id": "toolu_0158qJVd1AL32Y1xxYUAtNEy"
}