INVALID_TOOL_RESULTS
You are passing too many, too few, or mismatched
ToolMessages
to a model.
When using a model to call tools, the
AIMessage
the model responds with will contain a tool_calls array. To continue
the flow, the next messages you pass back to the model must be exactly
one ToolMessage for each item in that array containing the result of
that tool call. Each ToolMessage must have a tool_call_id field that
matches one of the tool_calls on the AIMessage.
For example, given the following response from a model:
import { z } from "zod";
import { tool } from "@langchain/core/tools";
import { ChatOpenAI } from "@langchain/openai";
import { BaseMessageLike } from "@langchain/core/messages";
const model = new ChatOpenAI({
model: "gpt-4o-mini",
});
const dummyTool = tool(
async () => {
return "action complete!";
},
{
name: "foo",
schema: z.object({}),
}
);
const modelWithTools = model.bindTools([dummyTool]);
const chatHistory: BaseMessageLike[] = [
{
role: "user",
content: `Call tool "foo" twice with no arguments`,
},
];
const responseMessage = await modelWithTools.invoke(chatHistory);
console.log(responseMessage);
AIMessage {
"id": "chatcmpl-AIgT1xUd6lkWAutThiiBsqjq7Ykj1",
"content": "",
"additional_kwargs": {
"tool_calls": [
{
"id": "call_BknYpnY7xiARM17TPYqL7luj",
"type": "function",
"function": "[Object]"
},
{
"id": "call_EHf8MIcTdsLCZcFVlcH4hxJw",
"type": "function",
"function": "[Object]"
}
]
},
"response_metadata": {
"tokenUsage": {
"promptTokens": 42,
"completionTokens": 37,
"totalTokens": 79
},
"finish_reason": "tool_calls",
"usage": {
"prompt_tokens": 42,
"completion_tokens": 37,
"total_tokens": 79,
"prompt_tokens_details": {
"cached_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0
}
},
"system_fingerprint": "fp_e2bde53e6e"
},
"tool_calls": [
{
"name": "foo",
"args": {},
"type": "tool_call",
"id": "call_BknYpnY7xiARM17TPYqL7luj"
},
{
"name": "foo",
"args": {},
"type": "tool_call",
"id": "call_EHf8MIcTdsLCZcFVlcH4hxJw"
}
],
"invalid_tool_calls": [],
"usage_metadata": {
"output_tokens": 37,
"input_tokens": 42,
"total_tokens": 79,
"input_token_details": {
"cache_read": 0
},
"output_token_details": {
"reasoning": 0
}
}
}
Calling the model with only one tool response would result in an error:
const toolResponse1 = await dummyTool.invoke(responseMessage.tool_calls![0]);
chatHistory.push(responseMessage);
chatHistory.push(toolResponse1);
await modelWithTools.invoke(chatHistory);
BadRequestError: 400 An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_EHf8MIcTdsLCZcFVlcH4hxJw
at APIError.generate (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/error.js:45:20)
at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:291:33)
at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:335:30)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async /Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.cjs:1441:29
at async RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) {
status: 400,
headers: {
'access-control-expose-headers': 'X-Request-ID',
'alt-svc': 'h3=":443"; ma=86400',
'cf-cache-status': 'DYNAMIC',
'cf-ray': '8d31d4d95e2a0c96-EWR',
connection: 'keep-alive',
'content-length': '315',
'content-type': 'application/json',
date: 'Tue, 15 Oct 2024 18:21:53 GMT',
'openai-organization': 'langchain',
'openai-processing-ms': '16',
'openai-version': '2020-10-01',
server: 'cloudflare',
'set-cookie': '__cf_bm=e5.GX1bHiMVgr76YSvAKuECCGG7X_RXF0jDGSMXFGfU-1729016513-1.0.1.1-ZBYeVqX.M6jSNJB.wS696fEhX7V.es._M0WcWtQ9Qx8doEA5qMVKNE5iX6i7UKyPCg2GvDfM.MoDwRCXKMSkEA; path=/; expires=Tue, 15-Oct-24 18:51:53 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=J8gS08GodUA9hRTYuElen0YOCzMO3d4LW0ZT0k_kyj4-1729016513560-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None',
'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',
'x-content-type-options': 'nosniff',
'x-ratelimit-limit-requests': '30000',
'x-ratelimit-limit-tokens': '150000000',
'x-ratelimit-remaining-requests': '29999',
'x-ratelimit-remaining-tokens': '149999967',
'x-ratelimit-reset-requests': '2ms',
'x-ratelimit-reset-tokens': '0s',
'x-request-id': 'req_f810058e7f047fafcb713575c4419161'
},
request_id: 'req_f810058e7f047fafcb713575c4419161',
error: {
message: "An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_EHf8MIcTdsLCZcFVlcH4hxJw",
type: 'invalid_request_error',
param: 'messages',
code: null
},
code: null,
param: 'messages',
type: 'invalid_request_error',
attemptNumber: 1,
retriesLeft: 6
}
If we add a second response, the call will succeed as expected because we now have one tool response per tool call:
const toolResponse2 = await dummyTool.invoke(responseMessage.tool_calls![1]);
chatHistory.push(toolResponse2);
await modelWithTools.invoke(chatHistory);
AIMessage {
"id": "chatcmpl-AIgTPDBm1epnnLHx0tPFTgpsf8Ay6",
"content": "The tool \"foo\" was called twice, and both times returned the result: \"action complete!\".",
"additional_kwargs": {},
"response_metadata": {
"tokenUsage": {
"promptTokens": 98,
"completionTokens": 21,
"totalTokens": 119
},
"finish_reason": "stop",
"usage": {
"prompt_tokens": 98,
"completion_tokens": 21,
"total_tokens": 119,
"prompt_tokens_details": {
"cached_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0
}
},
"system_fingerprint": "fp_e2bde53e6e"
},
"tool_calls": [],
"invalid_tool_calls": [],
"usage_metadata": {
"output_tokens": 21,
"input_tokens": 98,
"total_tokens": 119,
"input_token_details": {
"cache_read": 0
},
"output_token_details": {
"reasoning": 0
}
}
}
But if we add a duplicate, extra tool response, the call will fail again:
const duplicateToolResponse2 = await dummyTool.invoke(
responseMessage.tool_calls![1]
);
chatHistory.push(duplicateToolResponse2);
await modelWithTools.invoke(chatHistory);
BadRequestError: 400 Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.
at APIError.generate (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/error.js:45:20)
at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:291:33)
at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:335:30)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async /Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.cjs:1441:29
at async RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) {
status: 400,
headers: {
'access-control-expose-headers': 'X-Request-ID',
'alt-svc': 'h3=":443"; ma=86400',
'cf-cache-status': 'DYNAMIC',
'cf-ray': '8d31d57dff5e0f3b-EWR',
connection: 'keep-alive',
'content-length': '233',
'content-type': 'application/json',
date: 'Tue, 15 Oct 2024 18:22:19 GMT',
'openai-organization': 'langchain',
'openai-processing-ms': '36',
'openai-version': '2020-10-01',
server: 'cloudflare',
'set-cookie': '__cf_bm=QUsNlSGxVeIbscI0rm2YR3U9aUFLNxxqh1i_3aYBGN4-1729016539-1.0.1.1-sKRUvxHkQXvlb5LaqASkGtIwPMWUF5x9kF0ut8NLP6e0FVKEhdIEkEe6lYA1toW45JGTwp98xahaX7wt9CO4AA; path=/; expires=Tue, 15-Oct-24 18:52:19 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=J6fN8u8HUieCeyLDI59mi_0r_W0DgiO207wEtvrmT9Y-1729016539919-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None',
'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',
'x-content-type-options': 'nosniff',
'x-ratelimit-limit-requests': '30000',
'x-ratelimit-limit-tokens': '150000000',
'x-ratelimit-remaining-requests': '29999',
'x-ratelimit-remaining-tokens': '149999956',
'x-ratelimit-reset-requests': '2ms',
'x-ratelimit-reset-tokens': '0s',
'x-request-id': 'req_aebfebbb9af2feaf2e9683948e431676'
},
request_id: 'req_aebfebbb9af2feaf2e9683948e431676',
error: {
message: "Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.",
type: 'invalid_request_error',
param: 'messages.[4].role',
code: null
},
code: null,
param: 'messages.[4].role',
type: 'invalid_request_error',
attemptNumber: 1,
retriesLeft: 6
}
You should additionally not pass ToolMessages back to to a model if
they are not preceded by an AIMessage with tool calls. For example,
this will fail:
await modelWithTools.invoke([
{
role: "tool",
content: "action completed!",
tool_call_id: "dummy",
},
]);
BadRequestError: 400 Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.
at APIError.generate (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/error.js:45:20)
at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:291:33)
at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:335:30)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async /Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.cjs:1441:29
at async RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) {
status: 400,
headers: {
'access-control-expose-headers': 'X-Request-ID',
'alt-svc': 'h3=":443"; ma=86400',
'cf-cache-status': 'DYNAMIC',
'cf-ray': '8d31d5da7fba19aa-EWR',
connection: 'keep-alive',
'content-length': '233',
'content-type': 'application/json',
date: 'Tue, 15 Oct 2024 18:22:34 GMT',
'openai-organization': 'langchain',
'openai-processing-ms': '25',
'openai-version': '2020-10-01',
server: 'cloudflare',
'set-cookie': '__cf_bm=qK6.PWACr7IYuMafLpxumD4CrFnwHQiJn4TiGkrNTBk-1729016554-1.0.1.1-ECIk0cvh1wOfsK41a1Ce7npngsUDRRG93_yinP4.kVIWu1eX0CFG19iZ8yfGXedyPo6Wh1CKTGLk_3Qwrg.blA; path=/; expires=Tue, 15-Oct-24 18:52:34 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=IVTqysqHo4VUVJ.tVTcGg0rnXGWTbSSzX5mcUVrw8BU-1729016554732-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None',
'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',
'x-content-type-options': 'nosniff',
'x-ratelimit-limit-requests': '30000',
'x-ratelimit-limit-tokens': '150000000',
'x-ratelimit-remaining-requests': '29999',
'x-ratelimit-remaining-tokens': '149999978',
'x-ratelimit-reset-requests': '2ms',
'x-ratelimit-reset-tokens': '0s',
'x-request-id': 'req_59339f8163ef5bd3f0308a212611dfea'
},
request_id: 'req_59339f8163ef5bd3f0308a212611dfea',
error: {
message: "Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.",
type: 'invalid_request_error',
param: 'messages.[0].role',
code: null
},
code: null,
param: 'messages.[0].role',
type: 'invalid_request_error',
attemptNumber: 1,
retriesLeft: 6
}
See this guide for more details on tool calling.
Troubleshootingβ
The following may help resolve this error:
- If you are using a custom executor rather than a prebuilt one like
LangGraphβs
ToolNodeor the legacy LangChain AgentExecutor, verify that you are invoking and returning the result for one tool per tool call. - If you are using few-shot tool call
examples with messages that you
manually create, and you want to simulate a failure, you still need
to pass back a
ToolMessagewhose content indicates that failure.