diff --git a/reference/python/mkdocs.yml b/reference/python/mkdocs.yml index ac00cd9680..cb9801896b 100644 --- a/reference/python/mkdocs.yml +++ b/reference/python/mkdocs.yml @@ -440,8 +440,8 @@ nav: - Anthropic: - integrations/langchain_anthropic/index.md - ChatAnthropic: integrations/langchain_anthropic/ChatAnthropic.md - - AnthropicLLM: integrations/langchain_anthropic/AnthropicLLM.md - Middleware: integrations/langchain_anthropic/middleware.md + - AnthropicLLM: integrations/langchain_anthropic/AnthropicLLM.md - AstraDB: integrations/langchain_astradb.md - AWS: integrations/langchain_aws.md - Azure (Microsoft): diff --git a/reference/python/uv.lock b/reference/python/uv.lock index 3e5239351a..41d995aa7e 100644 --- a/reference/python/uv.lock +++ b/reference/python/uv.lock @@ -427,30 +427,30 @@ wheels = [ [[package]] name = "boto3" -version = "1.40.63" +version = "1.42.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore", marker = "platform_python_implementation != 'PyPy'" }, { name = "jmespath", marker = "platform_python_implementation != 'PyPy'" }, { name = "s3transfer", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5d/fb/db063c9600cbe2c52009edd436262c696b18bafaf49835a7f17ba1679a84/boto3-1.40.63.tar.gz", hash = "sha256:3bf4b034900c87a6a9b3b3b44c4aec26e96fc73bff2505f0766224b7295178ce", size = 111541, upload-time = "2025-10-30T19:32:52.081Z" } +sdist = { url = "https://files.pythonhosted.org/packages/25/f9/808ed6c387802399a9d6c3a6cc3d09d19376dbbcdf228a8ca501b7f98eda/boto3-1.42.7.tar.gz", hash = "sha256:eda49046c0f6a21ac159f9b2d609e5cc70d1dd019b7ac9618eec99285282b3db", size = 112817, upload-time = "2025-12-10T20:32:10.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c6/d4/d977f678c60e05c19c857ad896f838152dc68e0cc28f0f026e224879d8ca/boto3-1.40.63-py3-none-any.whl", hash = "sha256:f15d4abf1a6283887c336f660cdfc2162a210d2d8f4d98dbcbcef983371c284d", size = 139322, upload-time = "2025-10-30T19:32:49.876Z" }, + { url = "https://files.pythonhosted.org/packages/99/87/0929d68046575a2171a6ef8681a14e707c25e1dcc6db883f1729c3c111cd/boto3-1.42.7-py3-none-any.whl", hash = "sha256:c5cb2ada690c14e2dfa1e1c59ef7ef399c5e381f5514f1541d28310e35192300", size = 140573, upload-time = "2025-12-10T20:32:08.253Z" }, ] [[package]] name = "botocore" -version = "1.40.63" +version = "1.42.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath", marker = "platform_python_implementation != 'PyPy'" }, { name = "python-dateutil", marker = "platform_python_implementation != 'PyPy'" }, { name = "urllib3", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a2/08/62f4d332dd729d14190073eaf6db63803a5bc2d9b8f1248ae3cbc6c9cb64/botocore-1.40.63.tar.gz", hash = "sha256:0324552c3c800e258cbcb8c22b495a2e2e0260a7408d08016196e46fa0d1b587", size = 14400022, upload-time = "2025-10-30T19:32:40.81Z" } +sdist = { url = "https://files.pythonhosted.org/packages/99/76/d55a451399fa3a05a39881976dc9a02e6d60661f7e68976a387da655be5a/botocore-1.42.7.tar.gz", hash = "sha256:cc401b4836eae2a781efa1d1df88b2e92f9245885a6ae1bf9a6b26bc97b3efd2", size = 14855150, upload-time = "2025-12-10T20:31:58.665Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/45/b0/17c1e8fa8617c588da33f6724909eef56e1745ddfe2f87972d9a8e9e6ca2/botocore-1.40.63-py3-none-any.whl", hash = "sha256:83657b3ee487268fccc9ba022cba572ba657b9ece8cddd1fa241e2c6a49c8c14", size = 14061984, upload-time = "2025-10-30T19:32:36.945Z" }, + { url = "https://files.pythonhosted.org/packages/d6/46/223f3e319a5a710bd28b4e4b5d0ba16a0ee9c858541335ef42fd14443e83/botocore-1.42.7-py3-none-any.whl", hash = "sha256:92128d56654342f026d5c20a92bf0e8b546be1eb38df2c0efc7433e8bbc39045", size = 14527904, upload-time = "2025-12-10T20:31:54.934Z" }, ] [[package]] @@ -753,11 +753,12 @@ wheels = [ [[package]] name = "deepagents" version = "0.3.0" -source = { git = "https://github.com/langchain-ai/deepagents.git?subdirectory=libs%2Fdeepagents#6a9074f58b4dd7b0302749b84b87f6537d15c4b6" } +source = { git = "https://github.com/langchain-ai/deepagents.git?subdirectory=libs%2Fdeepagents#e7b8b808f52deb98da3582b1100bb787c2ec2eb7" } dependencies = [ { name = "langchain", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-anthropic", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, + { name = "langchain-google-genai", marker = "platform_python_implementation != 'PyPy'" }, { name = "wcmatch", marker = "platform_python_implementation != 'PyPy'" }, ] @@ -1799,7 +1800,7 @@ wheels = [ [[package]] name = "langchain" version = "1.1.3" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain_v1#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain_v1#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "langgraph", marker = "platform_python_implementation != 'PyPy'" }, @@ -1809,7 +1810,7 @@ dependencies = [ [[package]] name = "langchain-anthropic" version = "1.2.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fanthropic#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fanthropic#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "anthropic", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -1829,7 +1830,7 @@ dependencies = [ [[package]] name = "langchain-aws" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain-aws.git?subdirectory=libs%2Faws#6ae571c73131c9d61c1c02eb30a9874cb7ff5631" } +source = { git = "https://github.com/langchain-ai/langchain-aws.git?subdirectory=libs%2Faws#724a60af56f19b38e767677f2ce0e240a7a9278d" } dependencies = [ { name = "boto3", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -1893,7 +1894,7 @@ dependencies = [ [[package]] name = "langchain-chroma" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fchroma#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fchroma#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "chromadb", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -1903,7 +1904,7 @@ dependencies = [ [[package]] name = "langchain-classic" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-text-splitters", marker = "platform_python_implementation != 'PyPy'" }, @@ -1947,7 +1948,7 @@ dependencies = [ [[package]] name = "langchain-core" version = "1.1.3" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fcore#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fcore#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "jsonpatch", marker = "platform_python_implementation != 'PyPy'" }, { name = "langsmith", marker = "platform_python_implementation != 'PyPy'" }, @@ -1972,7 +1973,7 @@ dependencies = [ [[package]] name = "langchain-deepseek" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fdeepseek#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fdeepseek#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-openai", marker = "platform_python_implementation != 'PyPy'" }, @@ -1981,7 +1982,7 @@ dependencies = [ [[package]] name = "langchain-elasticsearch" version = "0.4.0" -source = { git = "https://github.com/langchain-ai/langchain-elastic.git?subdirectory=libs%2Felasticsearch#f9d82e15700ac26f4e3e409fa5ca16e712fb84b3" } +source = { git = "https://github.com/langchain-ai/langchain-elastic.git?subdirectory=libs%2Felasticsearch#a6383faccbc8e6353c9700c3aea5aaf2d5395691" } dependencies = [ { name = "elasticsearch", extra = ["vectorstore-mmr"], marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -1990,7 +1991,7 @@ dependencies = [ [[package]] name = "langchain-exa" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fexa#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fexa#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "exa-py", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -1999,7 +2000,7 @@ dependencies = [ [[package]] name = "langchain-fireworks" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Ffireworks#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Ffireworks#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "aiohttp", marker = "platform_python_implementation != 'PyPy'" }, { name = "fireworks-ai", marker = "platform_python_implementation != 'PyPy'" }, @@ -2011,7 +2012,7 @@ dependencies = [ [[package]] name = "langchain-google-community" version = "3.0.2" -source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fcommunity#5b245cb6d150761379b7e78b5d6c75d2b561c992" } +source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fcommunity#a7f2465856300d5d4c3dab382a20dc7680fe7231" } dependencies = [ { name = "google-api-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "google-api-python-client", marker = "platform_python_implementation != 'PyPy'" }, @@ -2025,7 +2026,7 @@ dependencies = [ [[package]] name = "langchain-google-genai" version = "4.0.0" -source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fgenai#5b245cb6d150761379b7e78b5d6c75d2b561c992" } +source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fgenai#a7f2465856300d5d4c3dab382a20dc7680fe7231" } dependencies = [ { name = "filetype", marker = "platform_python_implementation != 'PyPy'" }, { name = "google-genai", marker = "platform_python_implementation != 'PyPy'" }, @@ -2036,7 +2037,7 @@ dependencies = [ [[package]] name = "langchain-google-vertexai" version = "3.2.0" -source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fvertexai#5b245cb6d150761379b7e78b5d6c75d2b561c992" } +source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fvertexai#a7f2465856300d5d4c3dab382a20dc7680fe7231" } dependencies = [ { name = "bottleneck", marker = "platform_python_implementation != 'PyPy'" }, { name = "google-cloud-aiplatform", marker = "platform_python_implementation != 'PyPy'" }, @@ -2053,7 +2054,7 @@ dependencies = [ [[package]] name = "langchain-groq" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fgroq#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fgroq#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "groq", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -2062,7 +2063,7 @@ dependencies = [ [[package]] name = "langchain-huggingface" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fhuggingface#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fhuggingface#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "huggingface-hub", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -2091,7 +2092,7 @@ dependencies = [ [[package]] name = "langchain-milvus" version = "0.3.1" -source = { git = "https://github.com/langchain-ai/langchain-milvus.git?subdirectory=libs%2Fmilvus#01a4c6903d4893ffc635ba564c99af8a08d1a437" } +source = { git = "https://github.com/langchain-ai/langchain-milvus.git?subdirectory=libs%2Fmilvus#b3967136712df93673d2f488602ab0c7ec424e3b" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "pymilvus", marker = "platform_python_implementation != 'PyPy'" }, @@ -2100,7 +2101,7 @@ dependencies = [ [[package]] name = "langchain-mistralai" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fmistralai#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fmistralai#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "httpx", marker = "platform_python_implementation != 'PyPy'" }, { name = "httpx-sse", marker = "platform_python_implementation != 'PyPy'" }, @@ -2123,7 +2124,7 @@ dependencies = [ [[package]] name = "langchain-nomic" version = "1.0.1" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fnomic#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fnomic#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "nomic", marker = "platform_python_implementation != 'PyPy'" }, @@ -2143,7 +2144,7 @@ dependencies = [ [[package]] name = "langchain-ollama" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Follama#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Follama#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "ollama", marker = "platform_python_implementation != 'PyPy'" }, @@ -2152,7 +2153,7 @@ dependencies = [ [[package]] name = "langchain-openai" version = "1.1.1" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fopenai#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fopenai#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "openai", marker = "platform_python_implementation != 'PyPy'" }, @@ -2174,7 +2175,7 @@ dependencies = [ [[package]] name = "langchain-perplexity" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fperplexity#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fperplexity#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "openai", marker = "platform_python_implementation != 'PyPy'" }, @@ -2197,7 +2198,7 @@ dependencies = [ [[package]] name = "langchain-prompty" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fprompty#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fprompty#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "pyyaml", marker = "platform_python_implementation != 'PyPy'" }, @@ -2206,7 +2207,7 @@ dependencies = [ [[package]] name = "langchain-qdrant" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fqdrant#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fqdrant#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "pydantic", marker = "platform_python_implementation != 'PyPy'" }, @@ -2395,7 +2396,7 @@ dependencies = [ [[package]] name = "langchain-tests" version = "1.0.2" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fstandard-tests#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fstandard-tests#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "httpx", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -2413,7 +2414,7 @@ dependencies = [ [[package]] name = "langchain-text-splitters" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Ftext-splitters#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Ftext-splitters#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, ] @@ -2444,7 +2445,7 @@ dependencies = [ [[package]] name = "langchain-xai" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fxai#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fxai#5aa46501cf43758cc9a4f3275a02258c2cf8228f" } dependencies = [ { name = "aiohttp", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -2455,7 +2456,7 @@ dependencies = [ [[package]] name = "langgraph" version = "1.0.4" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Flanggraph#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Flanggraph#df191731918482c5eaf2934a2df0c9cb3571db0c" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "langgraph-checkpoint", marker = "platform_python_implementation != 'PyPy'" }, @@ -2468,7 +2469,7 @@ dependencies = [ [[package]] name = "langgraph-checkpoint" version = "3.0.1" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint#df191731918482c5eaf2934a2df0c9cb3571db0c" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "ormsgpack", marker = "platform_python_implementation != 'PyPy'" }, @@ -2476,8 +2477,8 @@ dependencies = [ [[package]] name = "langgraph-checkpoint-aws" -version = "1.0.1" -source = { git = "https://github.com/langchain-ai/langchain-aws.git?subdirectory=libs%2Flanggraph-checkpoint-aws#6ae571c73131c9d61c1c02eb30a9874cb7ff5631" } +version = "1.0.2" +source = { git = "https://github.com/langchain-ai/langchain-aws.git?subdirectory=libs%2Flanggraph-checkpoint-aws#724a60af56f19b38e767677f2ce0e240a7a9278d" } dependencies = [ { name = "boto3", marker = "platform_python_implementation != 'PyPy'" }, { name = "langgraph", marker = "platform_python_implementation != 'PyPy'" }, @@ -2487,7 +2488,7 @@ dependencies = [ [[package]] name = "langgraph-checkpoint-postgres" version = "3.0.2" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint-postgres#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint-postgres#df191731918482c5eaf2934a2df0c9cb3571db0c" } dependencies = [ { name = "langgraph-checkpoint", marker = "platform_python_implementation != 'PyPy'" }, { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, @@ -2498,7 +2499,7 @@ dependencies = [ [[package]] name = "langgraph-checkpoint-sqlite" version = "3.0.1" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint-sqlite#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint-sqlite#df191731918482c5eaf2934a2df0c9cb3571db0c" } dependencies = [ { name = "aiosqlite", marker = "platform_python_implementation != 'PyPy'" }, { name = "langgraph-checkpoint", marker = "platform_python_implementation != 'PyPy'" }, @@ -2508,7 +2509,7 @@ dependencies = [ [[package]] name = "langgraph-prebuilt" version = "1.0.5" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fprebuilt#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fprebuilt#df191731918482c5eaf2934a2df0c9cb3571db0c" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "langgraph-checkpoint", marker = "platform_python_implementation != 'PyPy'" }, @@ -2517,7 +2518,7 @@ dependencies = [ [[package]] name = "langgraph-sdk" version = "0.2.15" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fsdk-py#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fsdk-py#df191731918482c5eaf2934a2df0c9cb3571db0c" } dependencies = [ { name = "httpx", marker = "platform_python_implementation != 'PyPy'" }, { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, @@ -2544,8 +2545,8 @@ dependencies = [ [[package]] name = "langsmith" -version = "0.4.58" -source = { git = "https://github.com/langchain-ai/langsmith-sdk.git?subdirectory=python#97f8b9b9f2a30c426b6adb77f708bbc68b94a4e3" } +version = "0.4.59" +source = { git = "https://github.com/langchain-ai/langsmith-sdk.git?subdirectory=python#4154fcd0610d1d8a77ffb1af67a727f6af0bb031" } dependencies = [ { name = "httpx", marker = "platform_python_implementation != 'PyPy'" }, { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, @@ -4371,14 +4372,14 @@ wheels = [ [[package]] name = "s3transfer" -version = "0.14.0" +version = "0.16.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/62/74/8d69dcb7a9efe8baa2046891735e5dfe433ad558ae23d9e3c14c633d1d58/s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125", size = 151547, upload-time = "2025-09-09T19:23:31.089Z" } +sdist = { url = "https://files.pythonhosted.org/packages/05/04/74127fc843314818edfa81b5540e26dd537353b123a4edc563109d8f17dd/s3transfer-0.16.0.tar.gz", hash = "sha256:8e990f13268025792229cd52fa10cb7163744bf56e719e0b9cb925ab79abf920", size = 153827, upload-time = "2025-12-01T02:30:59.114Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/48/f0/ae7ca09223a81a1d890b2557186ea015f6e0502e9b8cb8e1813f1d8cfa4e/s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456", size = 85712, upload-time = "2025-09-09T19:23:30.041Z" }, + { url = "https://files.pythonhosted.org/packages/fc/51/727abb13f44c1fcf6d145979e1535a35794db0f6e450a0cb46aa24732fe2/s3transfer-0.16.0-py3-none-any.whl", hash = "sha256:18e25d66fed509e3868dc1572b3f427ff947dd2c56f844a5bf09481ad3f3b2fe", size = 86830, upload-time = "2025-12-01T02:30:57.729Z" }, ] [[package]] diff --git a/src/oss/python/integrations/chat/anthropic.mdx b/src/oss/python/integrations/chat/anthropic.mdx index 4945133b61..2572ed0b81 100644 --- a/src/oss/python/integrations/chat/anthropic.mdx +++ b/src/oss/python/integrations/chat/anthropic.mdx @@ -74,92 +74,116 @@ from langchain_anthropic import ChatAnthropic model = ChatAnthropic( model="claude-haiku-4-5-20251001", - temperature=0, - max_tokens=1024, - timeout=None, - max_retries=2, - # other params... + # temperature=, + # max_tokens=, + # timeout=, + # max_retries=, + # ... ) ``` -See the @[`ChatAnthropic`] API reference for details on all available parameters. +See the @[`ChatAnthropic`] API reference for details on all available instantiation parameters. -## Invocation - -```python -messages = [ - ( - "system", - "You are a helpful assistant that translates English to French. Translate the user sentence.", - ), - ("human", "I love programming."), -] -ai_msg = model.invoke(messages) -ai_msg -``` - -```output -AIMessage(content="J'adore la programmation.", response_metadata={'id': 'msg_018Nnu76krRPq8HvgKLW4F8T', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 11}}, id='run-57e9295f-db8a-48dc-9619-babd2bedd891-0', usage_metadata={'input_tokens': 29, 'output_tokens': 11, 'total_tokens': 40}) -``` +{/* TODO: show use with a proxy or different base_url */} -```python -print(ai_msg.text) -``` +## Invocation -```output -J'adore la programmation. -``` + + + ```python + messages = [ + ( + "system", + "You are a helpful translator. Translate the user sentence to French.", + ), + ( + "human", + "I love programming.", + ), + ] + model.invoke(messages) + ``` + + + ```python + print(ai_msg.text) + ``` + + ```output + J'adore la programmation. + ``` + + + ```python + for chunk in model.stream(messages): + print(chunk.text, end="") + ``` + + ```python + AIMessageChunk(content="J", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content="'", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content="a", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content="ime", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content=" la", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content=" programm", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content="ation", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content=".", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + ``` + + To aggregate the full message from the stream: + + ```python + stream = model.stream(messages) + full = next(stream) + for chunk in stream: + full += chunk + full + ``` + + ```python + AIMessageChunk(content="J'aime la programmation.", id="run-b34faef0-882f-4869-a19c-ed2b856e6361") + ``` + + + ```python + await model.ainvoke(messages) + + # stream + async for chunk in (await model.astream(messages)) + + # batch + await model.abatch([messages]) + ``` + + ```python + AIMessage( + content="J'aime la programmation.", + response_metadata={ + "id": "msg_01Trik66aiQ9Z1higrD5XFx3", + "model": "claude-sonnet-4-5-20250929", + "stop_reason": "end_turn", + "stop_sequence": None, + "usage": {"input_tokens": 25, "output_tokens": 11}, + }, + id="run-5886ac5f-3c2e-49f5-8a44-b1e92808c929-0", + usage_metadata={ + "input_tokens": 25, + "output_tokens": 11, + "total_tokens": 36, + }, + ) + ``` + + Learn more about supported invocation methods in our [models](/oss/langchain/models#invocation) guide. -## Token counting - -You can count tokens in messages before sending them to the model using @[`get_num_tokens_from_messages()`][ChatAnthropic.get_num_tokens_from_messages]. This uses Anthropic's official [token counting API](https://platform.claude.com/docs/en/build-with-claude/token-counting). - -```python -from langchain_anthropic import ChatAnthropic -from langchain.messages import HumanMessage, SystemMessage - -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") - -messages = [ - SystemMessage(content="You are a scientist"), - HumanMessage(content="Hello, Claude"), -] - -token_count = model.get_num_tokens_from_messages(messages) -print(token_count) -``` - -```output -14 -``` - -You can also count tokens when using tools: - -```python -from langchain.tools import tool - -@tool(parse_docstring=True) -def get_weather(location: str) -> str: - """Get the current weather in a given location - - Args: - location: The city and state, e.g. San Francisco, CA - """ - return "Sunny" - -messages = [ - HumanMessage(content="What's the weather like in San Francisco?"), -] - -token_count = model.get_num_tokens_from_messages(messages, tools=[get_weather]) -print(token_count) -``` - -```output -586 -``` ## Content blocks @@ -186,7 +210,7 @@ response = model_with_tools.invoke("Which city is hotter today: LA or NY?") response.content ``` -```output +```python [{'text': "I'll help you compare the temperatures of Los Angeles and New York by checking their current weather. I'll retrieve the weather for both cities.", 'type': 'text'}, {'id': 'toolu_01CkMaXrgmsNjTso7so94RJq', @@ -212,7 +236,7 @@ You can also access tool calls specifically in a standard format using the response.tool_calls ``` -```output +```python [{'name': 'GetWeather', 'args': {'location': 'Los Angeles, CA'}, 'id': 'toolu_01Ddzj5PkuZkrjF4tafzu54A'}, @@ -231,6 +255,52 @@ See @[`ChatAnthropic.bind_tools`] for details on how to bind tools to your model For information about Claude's built-in tools (code execution, web browsing, files API, etc), see the [Built-in tools](#built-in-tools). +```python +from pydantic import BaseModel, Field + + +class GetWeather(BaseModel): + '''Get the current weather in a given location''' + + location: str = Field(..., description="The city and state, e.g. San Francisco, CA") + + +class GetPopulation(BaseModel): + '''Get the current population in a given location''' + + location: str = Field(..., description="The city and state, e.g. San Francisco, CA") + + +model_with_tools = model.bind_tools([GetWeather, GetPopulation]) # [!code highlight] +ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") +ai_msg.tool_calls +``` + +```python +[ + { + "name": "GetWeather", + "args": {"location": "Los Angeles, CA"}, + "id": "toolu_01KzpPEAgzura7hpBqwHbWdo", + }, + { + "name": "GetWeather", + "args": {"location": "New York, NY"}, + "id": "toolu_01JtgbVGVJbiSwtZk3Uycezx", + }, + { + "name": "GetPopulation", + "args": {"location": "Los Angeles, CA"}, + "id": "toolu_01429aygngesudV9nTbCKGuw", + }, + { + "name": "GetPopulation", + "args": {"location": "New York, NY"}, + "id": "toolu_01JPktyd44tVMeBcPPnFSEJG", + }, +] +``` + ### Strict tool use @@ -398,7 +468,7 @@ Anthropic supports a [token-efficient tool use](https://platform.claude.com/docs print(f"\nTotal tokens: {response.usage_metadata['total_tokens']}") ``` - ```output + ```python [{'name': 'get_weather', 'args': {'location': 'San Francisco'}, 'id': 'toolu_01EoeE1qYaePcmNbUvMsWtmA', 'type': 'tool_call'}] Total tokens: 408 @@ -462,7 +532,7 @@ for chunk in model_with_tools.stream("Write a document about AI"): pass ``` -```output +```python Complete args: {'title': 'Artificial Intelligence: An Overview', 'content': '# Artificial Intelligence: An Overview... ``` @@ -644,38 +714,36 @@ To use extended thinking, specify the `thinking` parameter when initializing @[` You will need to specify a token budget to use this feature. See usage example below: + ```python Initialization param + import json + from langchain_anthropic import ChatAnthropic -```python Init param -import json -from langchain_anthropic import ChatAnthropic - -model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", - max_tokens=5000, - thinking={"type": "enabled", "budget_tokens": 2000}, # [!code highlight] -) - -response = model.invoke("What is the cube root of 50.653?") -print(json.dumps(response.content_blocks, indent=2)) -``` + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + max_tokens=5000, + thinking={"type": "enabled", "budget_tokens": 2000}, # [!code highlight] + ) -```python Invocation param -import json -from langchain_anthropic import ChatAnthropic + response = model.invoke("What is the cube root of 50.653?") + print(json.dumps(response.content_blocks, indent=2)) + ``` -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + ```python Invocation param + import json + from langchain_anthropic import ChatAnthropic -response = model.invoke( - "What is the cube root of 50.653?", - max_tokens=5000, - thinking={"type": "enabled", "budget_tokens": 2000} # [!code highlight] -) -print(json.dumps(response.content_blocks, indent=2)) -``` + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + response = model.invoke( + "What is the cube root of 50.653?", + max_tokens=5000, + thinking={"type": "enabled", "budget_tokens": 2000} # [!code highlight] + ) + print(json.dumps(response.content_blocks, indent=2)) + ``` -```output +```json [ { "type": "reasoning", @@ -683,12 +751,18 @@ print(json.dumps(response.content_blocks, indent=2)) "extras": {"signature": "ErUBCkYIBxgCIkB0UjV..."} }, { - "type": "text" + "type": "text", "text": "The cube root of 50.653 is approximately 3.6998.\n\nTo verify: 3.6998\u00b3 = 50.6530, which is very close to our original number.", } ] ``` + + The Claude Messages API handles thinking differently across Claude Sonnet 3.7 and Claude 4 models. + + Refer to the [Claude docs](https://platform.claude.com/docs/en/build-with-claude/extended-thinking#differences-in-thinking-across-model-versions) for more info. + + ## Effort Certain Claude models support an [effort](https://platform.claude.com/docs/en/build-with-claude/effort) feature, which controls how many tokens Claude uses when responding. This is useful for balancing response quality against latency and cost. @@ -698,7 +772,7 @@ from langchain_anthropic import ChatAnthropic model = ChatAnthropic( model="claude-opus-4-5-20251101", - effort="medium", # [!code highlight] + effort="medium", # # Options: "high", "medium", "low" [!code highlight] ) response = model.invoke("Analyze the trade-offs between microservices and monolithic architectures") @@ -710,135 +784,403 @@ response = model.invoke("Analyze the trade-offs between microservices and monoli See the [Claude documentation](https://platform.claude.com/docs/en/build-with-claude/effort) for detail on when to use different effort levels and to see supported models. -## Prompt caching +## Citations -Anthropic supports [caching](https://platform.claude.com/docs/en/build-with-claude/prompt-caching) of elements of your prompts, including messages, tool definitions, tool results, images and documents. This allows you to re-use large documents, instructions, [few-shot documents](/langsmith/create-few-shot-evaluators), and other data to reduce latency and costs. +Anthropic supports a [citations](https://platform.claude.com/docs/en/build-with-claude/citations) feature that lets Claude attach context to its answers based on source documents supplied by the user. -To enable caching on an element of a prompt, mark its associated content block using the `cache_control` key. See examples below: +When [document](https://platform.claude.com/docs/en/build-with-claude/citations#document-types) or `search_result` content blocks with `"citations": {"enabled": True}` are included in a query, Claude may generate citations in its response. - - Only certain Claude models support prompt caching. See the [Claude documentation](https://platform.claude.com/docs/en/build-with-claude/prompt-caching#supported-models) for details. - +### Simple example -### Messages +In this example we pass a [plain text document](https://platform.claude.com/docs/en/build-with-claude/citations#plain-text-documents). In the background, Claude [automatically chunks](https://platform.claude.com/docs/en/build-with-claude/citations#plain-text-documents) the input text into sentences, which are used when generating citations. -```python expandable -import requests +```python from langchain_anthropic import ChatAnthropic - -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") - -# Pull LangChain readme -get_response = requests.get( - "https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md" -) -readme = get_response.text +model = ChatAnthropic(model="claude-haiku-4-5-20251001") messages = [ { - "role": "system", + "role": "user", "content": [ { - "type": "text", - "text": "You are a technology expert.", - }, - { - "type": "text", - "text": f"{readme}", - "cache_control": {"type": "ephemeral"}, # [!code highlight] + "type": "document", + "source": { + "type": "text", + "media_type": "text/plain", + "data": "The grass is green. The sky is blue.", + }, + "title": "My Document", + "context": "This is a trustworthy document.", + "citations": {"enabled": True}, }, + {"type": "text", "text": "What color is the grass and sky?"}, ], - }, - { - "role": "user", - "content": "What's LangChain, according to its README?", - }, + } ] +response = model.invoke(messages) +response.content +``` -response_1 = model.invoke(messages) -response_2 = model.invoke(messages) - -usage_1 = response_1.usage_metadata["input_token_details"] -usage_2 = response_2.usage_metadata["input_token_details"] - -print(f"First invocation:\n{usage_1}") -print(f"\nSecond:\n{usage_2}") +```python +[{'text': 'Based on the document, ', 'type': 'text'}, + {'text': 'the grass is green', + 'type': 'text', + 'citations': [{'type': 'char_location', + 'cited_text': 'The grass is green. ', + 'document_index': 0, + 'document_title': 'My Document', + 'start_char_index': 0, + 'end_char_index': 20}]}, + {'text': ', and ', 'type': 'text'}, + {'text': 'the sky is blue', + 'type': 'text', + 'citations': [{'type': 'char_location', + 'cited_text': 'The sky is blue.', + 'document_index': 0, + 'document_title': 'My Document', + 'start_char_index': 20, + 'end_char_index': 36}]}, + {'text': '.', 'type': 'text'}] ``` -```output -First invocation: -{'cache_read': 0, 'cache_creation': 1458} +### In tool results (agentic RAG) -Second: -{'cache_read': 1458, 'cache_creation': 0} -``` +Claude supports a [search_result](https://platform.claude.com/docs/en/build-with-claude/search-results) content block representing citable results from queries against a knowledge base or other custom source. These content blocks can be passed to claude both top-line (as in the above example) and within a tool result. This allows Claude to cite elements of its response using the result of a tool call. - -**Extended caching** +To pass search results in response to tool calls, define a tool that returns a list of `search_result` content blocks in Anthropic's native format. For example: - The cache lifetime is 5 minutes by default. If this is too short, you can apply one hour caching by enabling the `"extended-cache-ttl-2025-04-11"` beta header and specifying `"cache_control": {"type": "ephemeral", "ttl": "1h"}` on the message: +```python +def retrieval_tool(query: str) -> list[dict]: + """Access my knowledge base.""" - ```python - model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", - betas=["extended-cache-ttl-2025-04-11"], # [!code highlight] - ) + # Run a search (e.g., with a LangChain vector store) + results = vector_store.similarity_search(query=query, k=2) - messages = [ + # Package results into search_result blocks + return [ { - "role": "user", - "content": [ - { - "type": "text", - "text": f"{long_text}", - "cache_control": {"type": "ephemeral", "ttl": "1h"}, # [!code highlight] - }, - ], + "type": "search_result", + # Customize fields as desired, using document metadata or otherwise + "title": "My Document Title", + "source": "Source description or provenance", + "citations": {"enabled": True}, + "content": [{"type": "text", "text": doc.page_content}], } + for doc in results ] - ``` +``` - Details of cached token counts will be included on the @[`InputTokenDetails`] of response's @[`usage_metadata`][UsageMetadata]: + + Here we demonstrate an end-to-end example in which we populate a LangChain [vector store](/oss/integrations/vectorstores/) with sample documents and equip Claude with a tool that queries those documents. - ```python - response = model.invoke(messages) - response.usage_metadata - ``` - ```json - { - "input_tokens": 1500, - "output_tokens": 200, - "total_tokens": 1700, - "input_token_details": { - "cache_read": 0, - "cache_creation": 1000, - "ephemeral_1h_input_tokens": 750, - "ephemeral_5m_input_tokens": 250, - } - } - ``` + The tool here takes a search query and a `category` string literal, but any valid tool signature can be used. - + This example requires `langchain-openai` and `numpy` to be installed: -### Caching tools + ```bash + pip install langchain-openai numpy + ``` -```python expandable -from langchain_anthropic import ChatAnthropic -from langchain.tools import tool + ```python + from typing import Literal + from langchain.chat_models import init_chat_model + from langchain.embeddings import init_embeddings + from langchain_core.documents import Document + from langchain_core.vectorstores import InMemoryVectorStore + from langgraph.checkpoint.memory import InMemorySaver + from langchain.agents import create_agent -# For demonstration purposes, we artificially expand the -# tool description. -description = ( - "Get the weather at a location. " - f"By the way, check out this readme: {readme}" -) + # Set up vector store + # Ensure you set your OPENAI_API_KEY environment variable + embeddings = init_embeddings("openai:text-embedding-3-small") + vector_store = InMemoryVectorStore(embeddings) -@tool(description=description, extras={"cache_control": {"type": "ephemeral"}}) # [!code highlight] -def get_weather(location: str) -> str: - return "It's sunny." + document_1 = Document( + id="1", + page_content=( + "To request vacation days, submit a leave request form through the " + "HR portal. Approval will be sent by email." + ), + metadata={ + "category": "HR Policy", + "doc_title": "Leave Policy", + "provenance": "Leave Policy - page 1", + }, + ) + document_2 = Document( + id="2", + page_content="Managers will review vacation requests within 3 business days.", + metadata={ + "category": "HR Policy", + "doc_title": "Leave Policy", + "provenance": "Leave Policy - page 2", + }, + ) + document_3 = Document( + id="3", + page_content=( + "Employees with over 6 months tenure are eligible for 20 paid vacation days " + "per year." + ), + metadata={ + "category": "Benefits Policy", + "doc_title": "Benefits Guide 2025", + "provenance": "Benefits Policy - page 1", + }, + ) + + documents = [document_1, document_2, document_3] + vector_store.add_documents(documents=documents) + + + # Define tool + async def retrieval_tool( + query: str, category: Literal["HR Policy", "Benefits Policy"] + ) -> list[dict]: + """Access my knowledge base.""" + + def _filter_function(doc: Document) -> bool: + return doc.metadata.get("category") == category + + results = vector_store.similarity_search( + query=query, k=2, filter=_filter_function + ) + + return [ + { + "type": "search_result", + "title": doc.metadata["doc_title"], + "source": doc.metadata["provenance"], + "citations": {"enabled": True}, + "content": [{"type": "text", "text": doc.page_content}], + } + for doc in results + ] + + + + # Create agent + model = init_chat_model("claude-haiku-4-5-20251001") + + checkpointer = InMemorySaver() + agent = create_agent(model, [retrieval_tool], checkpointer=checkpointer) + + + # Invoke on a query + config = {"configurable": {"thread_id": "session_1"}} + + input_message = { + "role": "user", + "content": "How do I request vacation days?", + } + async for step in agent.astream( + {"messages": [input_message]}, + config, + stream_mode="values", + ): + step["messages"][-1].pretty_print() + ``` + + +### Using with text splitters + +Anthropic also lets you specify your own splits using [custom document](https://platform.claude.com/docs/en/build-with-claude/citations#custom-content-documents) types. LangChain [text splitters](/oss/integrations/splitters/) can be used to generate meaningful splits for this purpose. See the below example, where we split the LangChain `README.md` (a markdown document) and pass it to Claude as context: + +This example requires @[`langchain-text-splitters`] to be installed: + +```bash +pip install langchain-text-splitters +``` + +```python expandable +import requests +from langchain_anthropic import ChatAnthropic +from langchain_text_splitters import MarkdownTextSplitter + + +def format_to_anthropic_documents(documents: list[str]): + return { + "type": "document", + "source": { + "type": "content", + "content": [{"type": "text", "text": document} for document in documents], + }, + "citations": {"enabled": True}, + } + + +# Pull readme +get_response = requests.get( + "https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md" +) +readme = get_response.text + +# Split into chunks +splitter = MarkdownTextSplitter( + chunk_overlap=0, + chunk_size=50, +) +documents = splitter.split_text(readme) + +# Construct message +message = { + "role": "user", + "content": [ + format_to_anthropic_documents(documents), + {"type": "text", "text": "Give me a link to LangChain's tutorials."}, + ], +} + +# Query model +model = ChatAnthropic(model="claude-haiku-4-5-20251001") +response = model.invoke([message]) +``` + +## Prompt caching + +Anthropic supports [caching](https://platform.claude.com/docs/en/build-with-claude/prompt-caching) of elements of your prompts, including messages, tool definitions, tool results, images and documents. This allows you to re-use large documents, instructions, [few-shot documents](/langsmith/create-few-shot-evaluators), and other data to reduce latency and costs. + +To enable caching on an element of a prompt, mark its associated content block using the `cache_control` key. See examples below: + + + Only certain Claude models support prompt caching. See the [Claude documentation](https://platform.claude.com/docs/en/build-with-claude/prompt-caching#supported-models) for details. + + +### Messages + +```python expandable +import requests +from langchain_anthropic import ChatAnthropic + + +model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + +# Pull LangChain readme +get_response = requests.get( + "https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md" +) +readme = get_response.text + +messages = [ + { + "role": "system", + "content": [ + { + "type": "text", + "text": "You are a technology expert.", + }, + { + "type": "text", + "text": f"{readme}", + "cache_control": {"type": "ephemeral"}, # [!code highlight] + }, + ], + }, + { + "role": "user", + "content": "What's LangChain, according to its README?", + }, +] + +response_1 = model.invoke(messages) +response_2 = model.invoke(messages) + +usage_1 = response_1.usage_metadata["input_token_details"] +usage_2 = response_2.usage_metadata["input_token_details"] + +print(f"First invocation:\n{usage_1}") +print(f"\nSecond:\n{usage_2}") +``` + +```python +First invocation: +{'cache_read': 0, 'cache_creation': 1458} + +Second: +{'cache_read': 1458, 'cache_creation': 0} +``` + +Alternatively, you may enable prompt caching at invocation time. You may want to conditionally cache based on runtime conditions, such as the length of the context. This is useful for app-level decisions about what to cache. + +```python +response = model.invoke( + messages, + cache_control={"type": "ephemeral"}, # [!code highlight] +) +``` + + + **Extended caching** + + The cache lifetime is 5 minutes by default. If this is too short, you can apply one hour caching by enabling the `"extended-cache-ttl-2025-04-11"` beta header and specifying `"cache_control": {"type": "ephemeral", "ttl": "1h"}` on the message. + + + ```python + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + betas=["extended-cache-ttl-2025-04-11"], # [!code highlight] + ) + + messages = [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": f"{long_text}", + "cache_control": {"type": "ephemeral", "ttl": "1h"}, # [!code highlight] + }, + ], + } + ] + ``` + + Details of cached token counts will be included on the @[`InputTokenDetails`] of response's @[`usage_metadata`][UsageMetadata]: + + ```python + response = model.invoke(messages) + response.usage_metadata + ``` + ```json + { + "input_tokens": 1500, + "output_tokens": 200, + "total_tokens": 1700, + "input_token_details": { + "cache_read": 0, + "cache_creation": 1000, + "ephemeral_1h_input_tokens": 750, + "ephemeral_5m_input_tokens": 250, + } + } + ``` + + + + +### Caching tools + +```python expandable +from langchain_anthropic import ChatAnthropic +from langchain.tools import tool + + +# For demonstration purposes, we artificially expand the +# tool description. +description = ( + "Get the weather at a location. " + f"By the way, check out this readme: {readme}" +) + + +@tool(description=description, extras={"cache_control": {"type": "ephemeral"}}) # [!code highlight] +def get_weather(location: str) -> str: + return "It's sunny." model = ChatAnthropic(model="claude-sonnet-4-5-20250929") @@ -855,7 +1197,7 @@ print(f"First invocation:\n{usage_1}") print(f"\nSecond:\n{usage_2}") ``` -```output +```python First invocation: {'cache_read': 0, 'cache_creation': 1809} @@ -934,7 +1276,7 @@ Below, we implement a simple chatbot that incorporates this feature. We follow t print(f"\n{output['messages'][-1].usage_metadata['input_token_details']}") ``` - ```output + ```python ================================== Ai Message ================================== Hello, Bob! It's nice to meet you. How are you doing today? Is there something I can help you with? @@ -998,260 +1340,63 @@ Below, we implement a simple chatbot that incorporates this feature. We follow t In the [LangSmith trace](https://smith.langchain.com/public/4d0584d8-5f9e-4b91-8704-93ba2ccf416a/r), toggling "raw output" will show exactly what messages are sent to the chat model, including `cache_control` keys. -## Citations - -Anthropic supports a [citations](https://platform.claude.com/docs/en/build-with-claude/citations) feature that lets Claude attach context to its answers based on source documents supplied by the user. - -When [document](https://platform.claude.com/docs/en/build-with-claude/citations#document-types) or `search_result` content blocks with `"citations": {"enabled": True}` are included in a query, Claude may generate citations in its response. - -### Simple example - -In this example we pass a [plain text document](https://platform.claude.com/docs/en/build-with-claude/citations#plain-text-documents). In the background, Claude [automatically chunks](https://platform.claude.com/docs/en/build-with-claude/citations#plain-text-documents) the input text into sentences, which are used when generating citations. - -```python -from langchain_anthropic import ChatAnthropic - -model = ChatAnthropic(model="claude-haiku-4-5-20251001") - -messages = [ - { - "role": "user", - "content": [ - { - "type": "document", - "source": { - "type": "text", - "media_type": "text/plain", - "data": "The grass is green. The sky is blue.", - }, - "title": "My Document", - "context": "This is a trustworthy document.", - "citations": {"enabled": True}, - }, - {"type": "text", "text": "What color is the grass and sky?"}, - ], - } -] -response = model.invoke(messages) -response.content -``` - -```output -[{'text': 'Based on the document, ', 'type': 'text'}, - {'text': 'the grass is green', - 'type': 'text', - 'citations': [{'type': 'char_location', - 'cited_text': 'The grass is green. ', - 'document_index': 0, - 'document_title': 'My Document', - 'start_char_index': 0, - 'end_char_index': 20}]}, - {'text': ', and ', 'type': 'text'}, - {'text': 'the sky is blue', - 'type': 'text', - 'citations': [{'type': 'char_location', - 'cited_text': 'The sky is blue.', - 'document_index': 0, - 'document_title': 'My Document', - 'start_char_index': 20, - 'end_char_index': 36}]}, - {'text': '.', 'type': 'text'}] -``` - -### In tool results (agentic RAG) - -Claude supports a [search_result](https://platform.claude.com/docs/en/build-with-claude/search-results) content block representing citable results from queries against a knowledge base or other custom source. These content blocks can be passed to claude both top-line (as in the above example) and within a tool result. This allows Claude to cite elements of its response using the result of a tool call. - -To pass search results in response to tool calls, define a tool that returns a list of `search_result` content blocks in Anthropic's native format. For example: - -```python -def retrieval_tool(query: str) -> list[dict]: - """Access my knowledge base.""" - - # Run a search (e.g., with a LangChain vector store) - results = vector_store.similarity_search(query=query, k=2) - - # Package results into search_result blocks - return [ - { - "type": "search_result", - # Customize fields as desired, using document metadata or otherwise - "title": "My Document Title", - "source": "Source description or provenance", - "citations": {"enabled": True}, - "content": [{"type": "text", "text": doc.page_content}], - } - for doc in results - ] -``` - - - Here we demonstrate an end-to-end example in which we populate a LangChain [vector store](/oss/integrations/vectorstores/) with sample documents and equip Claude with a tool that queries those documents. - - The tool here takes a search query and a `category` string literal, but any valid tool signature can be used. - - This example requires `langchain-openai` and `numpy` to be installed: - - ```bash - pip install langchain-openai numpy - ``` - - ```python - from typing import Literal - - from langchain.chat_models import init_chat_model - from langchain.embeddings import init_embeddings - from langchain_core.documents import Document - from langchain_core.vectorstores import InMemoryVectorStore - from langgraph.checkpoint.memory import InMemorySaver - from langchain.agents import create_agent - - - # Set up vector store - # Ensure you set your OPENAI_API_KEY environment variable - embeddings = init_embeddings("openai:text-embedding-3-small") - vector_store = InMemoryVectorStore(embeddings) - - document_1 = Document( - id="1", - page_content=( - "To request vacation days, submit a leave request form through the " - "HR portal. Approval will be sent by email." - ), - metadata={ - "category": "HR Policy", - "doc_title": "Leave Policy", - "provenance": "Leave Policy - page 1", - }, - ) - document_2 = Document( - id="2", - page_content="Managers will review vacation requests within 3 business days.", - metadata={ - "category": "HR Policy", - "doc_title": "Leave Policy", - "provenance": "Leave Policy - page 2", - }, - ) - document_3 = Document( - id="3", - page_content=( - "Employees with over 6 months tenure are eligible for 20 paid vacation days " - "per year." - ), - metadata={ - "category": "Benefits Policy", - "doc_title": "Benefits Guide 2025", - "provenance": "Benefits Policy - page 1", - }, - ) - - documents = [document_1, document_2, document_3] - vector_store.add_documents(documents=documents) - +## Token counting - # Define tool - async def retrieval_tool( - query: str, category: Literal["HR Policy", "Benefits Policy"] - ) -> list[dict]: - """Access my knowledge base.""" +You can count tokens in messages before sending them to the model using @[`get_num_tokens_from_messages()`][ChatAnthropic.get_num_tokens_from_messages]. This uses Anthropic's official [token counting API](https://platform.claude.com/docs/en/build-with-claude/token-counting). - def _filter_function(doc: Document) -> bool: - return doc.metadata.get("category") == category + + + ```python + from langchain_anthropic import ChatAnthropic + from langchain.messages import HumanMessage, SystemMessage - results = vector_store.similarity_search( - query=query, k=2, filter=_filter_function - ) + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") - return [ - { - "type": "search_result", - "title": doc.metadata["doc_title"], - "source": doc.metadata["provenance"], - "citations": {"enabled": True}, - "content": [{"type": "text", "text": doc.page_content}], - } - for doc in results + messages = [ + SystemMessage(content="You are a scientist"), + HumanMessage(content="Hello, Claude"), ] + token_count = model.get_num_tokens_from_messages(messages) + print(token_count) + ``` + + ```output + 14 + ``` + + + You can also count tokens when using tools: + + ```python + from langchain.tools import tool + + @tool(parse_docstring=True) + def get_weather(location: str) -> str: + """Get the current weather in a given location + + Args: + location: The city and state, e.g. San Francisco, CA + """ + return "Sunny" + + messages = [ + HumanMessage(content="What's the weather like in San Francisco?"), + ] + token_count = model.get_num_tokens_from_messages(messages, tools=[get_weather]) + print(token_count) + ``` - # Create agent - model = init_chat_model("claude-haiku-4-5-20251001") - - checkpointer = InMemorySaver() - agent = create_agent(model, [retrieval_tool], checkpointer=checkpointer) - - - # Invoke on a query - config = {"configurable": {"thread_id": "session_1"}} - - input_message = { - "role": "user", - "content": "How do I request vacation days?", - } - async for step in agent.astream( - {"messages": [input_message]}, - config, - stream_mode="values", - ): - step["messages"][-1].pretty_print() - ``` - - -### Using with text splitters - -Anthropic also lets you specify your own splits using [custom document](https://platform.claude.com/docs/en/build-with-claude/citations#custom-content-documents) types. LangChain [text splitters](/oss/integrations/splitters/) can be used to generate meaningful splits for this purpose. See the below example, where we split the LangChain `README.md` (a markdown document) and pass it to Claude as context: - -This example requires @[`langchain-text-splitters`] to be installed: - -```bash -pip install langchain-text-splitters -``` - -```python expandable -import requests -from langchain_anthropic import ChatAnthropic -from langchain_text_splitters import MarkdownTextSplitter - - -def format_to_anthropic_documents(documents: list[str]): - return { - "type": "document", - "source": { - "type": "content", - "content": [{"type": "text", "text": document} for document in documents], - }, - "citations": {"enabled": True}, - } - - -# Pull readme -get_response = requests.get( - "https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md" -) -readme = get_response.text - -# Split into chunks -splitter = MarkdownTextSplitter( - chunk_overlap=0, - chunk_size=50, -) -documents = splitter.split_text(readme) - -# Construct message -message = { - "role": "user", - "content": [ - format_to_anthropic_documents(documents), - {"type": "text", "text": "Give me a link to LangChain's tutorials."}, - ], -} - -# Query model -model = ChatAnthropic(model="claude-haiku-4-5-20251001") -response = model.invoke([message]) -``` + ```output + 586 + ``` + + ## Context management @@ -1349,7 +1494,7 @@ response = model_with_structure.invoke("Provide details about the movie Inceptio response ``` -```output +```python Movie(title='Inception', year=2010, director='Christopher Nolan', rating=8.8) ``` @@ -1385,19 +1530,39 @@ result = agent.invoke({ result["structured_response"] ``` -```output +```python Weather(temperature=75.0, condition='Sunny') ``` - + + +## Built-in tools + +Anthropic supports a variety of built-in client and server-side [tools](/oss/langchain/tools/). + +Server-side tools (e.g., [web search](#web-search)) are passed to the model and executed by Anthropic. Client-side tools (e.g., [bash tool](#bash-tool)) require you to implement the callback execution logic in your application and return results to the model. + +In either case, you make tools accessible to your chat model by using @[`bind_tools`][ChatAnthropic.bind_tools] on the model instance. + +Importantly, client-side tools require you to implement the execution logic. See the relevant sections below for examples. + + + **Middleware vs tools** + + For client-side tools (e.g. [bash](#bash-tool), [text editor](#text-editor), [memory](#memory-tool)), you may opt to use [middleware](/oss/integrations/middleware/anthropic), which provide production-ready implementations that contain built-in execution, state management, and security policies. + + Use middleware when you want a turnkey solution; use tools (documented below) when you need custom execution logic or want to use @[`bind_tools`][ChatAnthropic.bind_tools] directly. + -## Built-in tools + + **Beta tools** -Anthropic supports a variety of [built-in tools](https://platform.claude.com/docs/en/agents-and-tools/tool-use/text-editor-tool), which can be bound to the model in the [usual way](/oss/langchain/tools/). Claude will generate tool calls adhering to its internal schema for the tool. + If binding a beta tool to your chat model, LangChain will automatically add the required beta header for you. + ### Bash tool -Claude supports a [bash tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/bash-tool) that allows it to execute shell commands in a persistent bash session. This enables system operations, script execution, and command-line automation. +Claude supports a client-side [bash tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/bash-tool) that allows it to execute shell commands in a persistent bash session. This enables system operations, script execution, and command-line automation. **Important: You must provide the execution environment** @@ -1417,40 +1582,149 @@ Claude supports a [bash tool](https://platform.claude.com/docs/en/agents-and-too - Claude 4 models or Claude Sonnet 3.7 -```python -from langchain_anthropic import ChatAnthropic + + + ```python expandable + import subprocess -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + from anthropic.types.beta import BetaToolBash20250124Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + from langchain.messages import HumanMessage, ToolMessage + from langchain.tools import tool -bash_tool = { - "type": "bash_20250124", - "name": "bash", -} + tool_spec = BetaToolBash20250124Param( # [!code highlight] + name="bash", # [!code highlight] + type="bash_20250124", # [!code highlight] + ) # [!code highlight] -model_with_bash = model.bind_tools([bash_tool]) -response = model_with_bash.invoke( - "List all Python files in the current directory" -) -``` -`response.tool_calls` will contain the bash command Claude wants to execute. You must run this command in your environment and pass the result back. + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def bash(*, command: str, restart: bool = False, **kw): + """Execute a bash command.""" + if restart: + return "Bash session restarted" + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + timeout=30, + ) + return result.stdout + result.stderr + except Exception as e: + return f"Error: {e}" + + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + model_with_bash = model.bind_tools([bash]) # [!code highlight] + + # Initial request + messages = [HumanMessage("List all files in the current directory")] + response = model_with_bash.invoke(messages) + print(response.content_blocks) + + # Tool execution loop + while response.tool_calls: + # Execute each tool call + tool_messages = [] + for tool_call in response.tool_calls: + result = bash.invoke(tool_call) + tool_messages.append(result) + + # Continue conversation with tool results + messages = [*messages, response, *tool_messages] + response = model_with_bash.invoke(messages) + print(response.content_blocks) + ``` + + + + ```python expandable + import subprocess + + from anthropic.types.beta import BetaToolBash20250124Param # [!code highlight] + from langchain.agents import create _agent + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + tool_spec = BetaToolBash20250124Param( # [!code highlight] + name="bash", # [!code highlight] + type="bash_20250124", # [!code highlight] + ) # [!code highlight] + + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def bash(*, command: str, restart: bool = False, **kw): + """Execute a bash command.""" + if restart: + return "Bash session restarted" + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + ) + return result.stdout + result.stderr + + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[bash], # [!code highlight] + ) + + result = agent.invoke({"messages": [{"role": "user", "content": "List files"}]}) + + for message in result["messages"]: + message.pretty_print() + ``` + + + + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + bash_tool = { # [!code highlight] + "type": "bash_20250124", # [!code highlight] + "name": "bash", # [!code highlight] + } # [!code highlight] + + model_with_bash = model.bind_tools([bash_tool]) # [!code highlight] + response = model_with_bash.invoke( + "List all Python files in the current directory" + ) + # You must handle execution of the bash command in response.tool_calls via a tool execution loop + ``` + + Using @[`create_agent`] handles the tool execution loop automatically. + + `response.tool_calls` will contain the bash command Claude wants to execute. You must run this command in your environment and pass the result back. + + ```python + [{'type': 'text', + 'text': "I'll list the Python files in the current directory for you."}, + {'type': 'tool_call', + 'name': 'bash', + 'args': {'command': 'ls -la *.py'}, + 'id': 'toolu_01ABC123...'}] + ``` + + -```output -[{'type': 'text', - 'text': "I'll list the Python files in the current directory for you."}, - {'type': 'tool_call', - 'name': 'bash', - 'args': {'command': 'ls -la *.py'}, - 'id': 'toolu_01ABC123...'}] -``` The bash tool supports two parameters: - `command` (required): The bash command to execute - `restart` (optional): Set to `true` to restart the bash session + + For a "batteries-included" implementation, consider using [`ClaudeBashToolMiddleware`](/oss/integrations/middleware/anthropic#bash-tool) which provides persistent sessions, Docker isolation, output redaction, and startup/shutdown commands out of the box. + + ### Code execution -Claude can use a [code execution tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/code-execution-tool) to execute code in a sandboxed environment. +Claude can use a server-side [code execution tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/code-execution-tool) to execute code in a sandboxed environment. Anthropic's `2025-08-25` code execution tools are supported since `langchain-anthropic>=1.0.3`. @@ -1462,91 +1736,143 @@ Claude can use a [code execution tool](https://platform.claude.com/docs/en/agent The code sandbox does not have internet access, thus you may only use packages that are pre-installed in the environment. See the [Claude docs](https://platform.claude.com/docs/en/agents-and-tools/tool-use/code-execution-tool#networking-and-security) for more info. -```python -from langchain_anthropic import ChatAnthropic + + + ```python + from anthropic.types.beta import BetaCodeExecutionTool20250825Param # [!code highlight] + from langchain_anthropic import ChatAnthropic -model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", -) + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + ) -tool = {"type": "code_execution_20250825", "name": "code_execution"} # [!code highlight] -model_with_tools = model.bind_tools([tool]) + code_tool = BetaCodeExecutionTool20250825Param( # [!code highlight] + name="code_execution", # [!code highlight] + type="code_execution_20250825", # [!code highlight] + ) # [!code highlight] + model_with_tools = model.bind_tools([code_tool]) # [!code highlight] -response = model_with_tools.invoke( - "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]" -) -``` + response = model_with_tools.invoke( + "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]" + ) + ``` + + + + ```python + from anthropic.types.beta import BetaCodeExecutionTool20250825Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + + code_tool = BetaCodeExecutionTool20250825Param( # [!code highlight] + name="code_execution", # [!code highlight] + type="code_execution_20250825", # [!code highlight] + ) # [!code highlight] + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[code_tool], # [!code highlight] + ) - + result = agent.invoke({ + "messages": [{"role": "user", "content": "Calculate mean and std of [1,2,3,4,5]"}] + }) -Using the Files API, Claude can write code to access files for data analysis and other purposes. See example below: + for message in result["messages"]: + message.pretty_print() + ``` + -```python -import anthropic -from langchain_anthropic import ChatAnthropic + + ```python + from langchain_anthropic import ChatAnthropic + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + ) -client = anthropic.Anthropic() -file = client.beta.files.upload( - file=open("/path/to/sample_data.csv", "rb") -) -file_id = file.id + code_tool = {"type": "code_execution_20250825", "name": "code_execution"} # [!code highlight] + model_with_tools = model.bind_tools([code_tool]) + response = model_with_tools.invoke( + "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]" + ) + ``` + + -# Run inference -model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", -) + + Using the Files API, Claude can write code to access files for data analysis and other purposes. See example below: -tool = {"type": "code_execution_20250825", "name": "code_execution"} # [!code highlight] -model_with_tools = model.bind_tools([tool]) + ```python + import anthropic + from anthropic.types.beta import BetaCodeExecutionTool20250825Param # [!code highlight] + from langchain_anthropic import ChatAnthropic -input_message = { - "role": "user", - "content": [ - { - "type": "text", - "text": "Please plot these data and tell me what you see.", - }, - { - "type": "container_upload", - "file_id": file_id, - }, - ] -} -response = model_with_tools.invoke([input_message]) -``` -Note that Claude may generate files as part of its code execution. You can access these files using the Files API: + client = anthropic.Anthropic() + file = client.beta.files.upload( + file=open("/path/to/sample_data.csv", "rb") + ) + file_id = file.id -```python -# Take all file outputs for demonstration purposes -file_ids = [] -for block in response.content: - if block["type"] == "bash_code_execution_tool_result": - file_ids.extend( - content["file_id"] - for content in block.get("content", {}).get("content", []) - if "file_id" in content - ) -for i, file_id in enumerate(file_ids): - file_content = client.beta.files.download(file_id) - file_content.write_to_file(f"/path/to/file_{i}.png") -``` + # Run inference + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + ) - - **Available tool versions:** + code_tool = BetaCodeExecutionTool20250825Param( # [!code highlight] + name="code_execution", # [!code highlight] + type="code_execution_20250825", # [!code highlight] + ) # [!code highlight] + model_with_tools = model.bind_tools([code_tool]) - - `code_execution_20250522` (legacy) - - `code_execution_20250825` (recommended) - + input_message = { + "role": "user", + "content": [ + { + "type": "text", + "text": "Please plot these data and tell me what you see.", + }, + { + "type": "container_upload", + "file_id": file_id, + }, + ] + } + response = model_with_tools.invoke([input_message]) + ``` + + Note that Claude may generate files as part of its code execution. You can access these files using the Files API: + + ```python + # Take all file outputs for demonstration purposes + file_ids = [] + for block in response.content: + if block["type"] == "bash_code_execution_tool_result": + file_ids.extend( + content["file_id"] + for content in block.get("content", {}).get("content", []) + if "file_id" in content + ) + + for i, file_id in enumerate(file_ids): + file_content = client.beta.files.download(file_id) + file_content.write_to_file(f"/path/to/file_{i}.png") + ``` + + + **Available tool versions:** + - `code_execution_20250522` (legacy) + - `code_execution_20250825` (recommended) + ### Computer use -Claude supports [computer use](https://platform.claude.com/docs/en/agents-and-tools/tool-use/computer-use-tool) capabilities, allowing it to interact with desktop environments through screenshots, mouse control, and keyboard input. +Claude supports client-side [computer use](https://platform.claude.com/docs/en/agents-and-tools/tool-use/computer-use-tool) capabilities, allowing it to interact with desktop environments through screenshots, mouse control, and keyboard input. **Important: You must provide the execution environment** @@ -1566,36 +1892,165 @@ Claude supports [computer use](https://platform.claude.com/docs/en/agents-and-to - Claude Opus 4.5, Claude 4, or Claude Sonnet 3.7 -```python -from langchain_anthropic import ChatAnthropic + + + ```python expandable + import base64 + from typing import Literal + + from anthropic.types.beta import BetaToolComputerUse20250124Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + from langchain.messages import HumanMessage, ToolMessage + from langchain.tools import tool + + DISPLAY_WIDTH = 1024 + DISPLAY_HEIGHT = 768 + + tool_spec = BetaToolComputerUse20250124Param( # [!code highlight] + name="computer", # [!code highlight] + type="computer_20250124", # [!code highlight] + display_width_px=DISPLAY_WIDTH, # [!code highlight] + display_height_px=DISPLAY_HEIGHT, # [!code highlight] + display_number=1, # [!code highlight] + ) # [!code highlight] + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def computer( + *, + action: Literal[ + "key", "type", "mouse_move", "left_click", "left_click_drag", + "right_click", "middle_click", "double_click", "screenshot", + "cursor_position", "scroll" + ], + coordinate: list[int] | None = None, + text: str | None = None, + **kw + ): + """Control the computer display.""" + if action == "screenshot": + # Take screenshot and return base64-encoded image + # Implementation depends on your display setup (e.g., Xvfb, pyautogui) + return {"type": "image", "data": "base64_screenshot_data..."} + elif action == "left_click" and coordinate: + # Execute click at coordinate + return f"Clicked at {coordinate}" + elif action == "type" and text: + # Type text + return f"Typed: {text}" + # ... implement other actions + return f"Executed {action}" + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + model_with_computer = model.bind_tools([computer]) # [!code highlight] + + # Initial request + messages = [HumanMessage("Take a screenshot to see what's on the screen")] + response = model_with_computer.invoke(messages) + print(response.content_blocks) + + # Tool execution loop + while response.tool_calls: + tool_messages = [] + for tool_call in response.tool_calls: + result = computer.invoke(tool_call["args"]) + tool_messages.append( + ToolMessage(content=str(result), tool_call_id=tool_call["id"]) + ) + + messages = [*messages, response, *tool_messages] + response = model_with_computer.invoke(messages) + print(response.content_blocks) + ``` + + + + ```python expandable + from typing import Literal + + from anthropic.types.beta import BetaToolComputerUse20250124Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + tool_spec = BetaToolComputerUse20250124Param( # [!code highlight] + name="computer", # [!code highlight] + type="computer_20250124", # [!code highlight] + display_width_px=1024, # [!code highlight] + display_height_px=768, # [!code highlight] + ) # [!code highlight] + + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def computer( + *, + action: Literal[ + "key", "type", "mouse_move", "left_click", "left_click_drag", + "right_click", "middle_click", "double_click", "screenshot", + "cursor_position", "scroll" + ], + coordinate: list[int] | None = None, + text: str | None = None, + **kw + ): + """Control the computer display.""" + if action == "screenshot": + return {"type": "image", "data": "base64_screenshot_data..."} + elif action == "left_click" and coordinate: + return f"Clicked at {coordinate}" + elif action == "type" and text: + return f"Typed: {text}" + return f"Executed {action}" + + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[computer], # [!code highlight] + ) -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + result = agent.invoke({ + "messages": [{"role": "user", "content": "Take a screenshot"}] + }) -# LangChain handles the API call and tool binding -computer_tool = { - "type": "computer_20250124", - "name": "computer", - "display_width_px": 1024, - "display_height_px": 768, - "display_number": 1, -} + for message in result["messages"]: + message.pretty_print() + ``` + -model_with_computer = model.bind_tools([computer_tool]) -response = model_with_computer.invoke( - "Take a screenshot to see what's on the screen" -) -``` + + ```python + from langchain_anthropic import ChatAnthropic -`response.tool_calls` will contain the computer action Claude wants to perform. You must execute this action in your environment and pass the result back. + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") -```output -[{'type': 'text', - 'text': "I'll take a screenshot to see what's currently on the screen."}, - {'type': 'tool_call', - 'name': 'computer', - 'args': {'action': 'screenshot'}, - 'id': 'toolu_01RNsqAE7dDZujELtacNeYv9'}] -``` + computer_tool = { + "type": "computer_20250124", + "name": "computer", + "display_width_px": 1024, + "display_height_px": 768, + "display_number": 1, + } + + model_with_computer = model.bind_tools([computer_tool]) # [!code highlight] + response = model_with_computer.invoke( + "Take a screenshot to see what's on the screen" + ) + # You must handle execution of the computer actions in response.tool_calls via a tool execution loop + ``` + + Using @[`create_agent`] handles the tool execution loop automatically. + + `response.tool_calls` will contain the computer action Claude wants to perform. You must execute this action in your environment and pass the result back. + + ```python + [{'type': 'text', + 'text': "I'll take a screenshot to see what's currently on the screen."}, + {'type': 'tool_call', + 'name': 'computer', + 'args': {'action': 'screenshot'}, + 'id': 'toolu_01RNsqAE7dDZujELtacNeYv9'}] + ``` + + **Available tool versions:** @@ -1606,69 +2061,274 @@ response = model_with_computer.invoke( ### Remote MCP -Claude can use a [MCP connector tool](https://platform.claude.com/docs/en/agents-and-tools/mcp-connector) for model-generated calls to remote MCP servers. +Claude can use a server-side [MCP connector tool](https://platform.claude.com/docs/en/agents-and-tools/mcp-connector) for model-generated calls to remote MCP servers. **Remote MCP is supported since `langchain-anthropic>=0.3.14`** -```python -from langchain_anthropic import ChatAnthropic + + + ```python + from anthropic.types.beta import BetaMCPToolsetParam # [!code highlight] + from langchain_anthropic import ChatAnthropic -mcp_servers = [ - { - "type": "url", - "url": "https://docs.langchain.com/mcp", - "name": "LangChain Docs", - # "tool_configuration": { # optional configuration - # "enabled": True, - # "allowed_tools": ["ask_question"], - # }, - # "authorization_token": "PLACEHOLDER", # optional authorization if needed - } -] + mcp_servers = [ + { + "type": "url", + "url": "https://docs.langchain.com/mcp", + "name": "LangChain Docs", + } + ] -model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", - mcp_servers=mcp_servers, # [!code highlight] -) + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + mcp_servers=mcp_servers, # [!code highlight] + ) -response = model.invoke( - "What are LangChain content blocks?", - tools=[{"type": "mcp_toolset", "mcp_server_name": "LangChain Docs"}], # [!code highlight] -) -response.content_blocks -``` + mcp_tool = BetaMCPToolsetParam( # [!code highlight] + type="mcp_toolset", # [!code highlight] + mcp_server_name="LangChain Docs", # [!code highlight] + ) # [!code highlight] + + response = model.invoke( + "What are LangChain content blocks?", + tools=[mcp_tool], # [!code highlight] + ) + ``` + + + + ```python + from anthropic.types.beta import BetaMCPToolsetParam # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + + mcp_servers = [ + { + "type": "url", + "url": "https://docs.langchain.com/mcp", + "name": "LangChain Docs", + } + ] + + mcp_tool = BetaMCPToolsetParam( # [!code highlight] + type="mcp_toolset", # [!code highlight] + mcp_server_name="LangChain Docs", # [!code highlight] + ) # [!code highlight] + + agent = create_agent( + model=ChatAnthropic( + model="claude-sonnet-4-5-20250929", + mcp_servers=mcp_servers, # [!code highlight] + ), + tools=[mcp_tool], # [!code highlight] + ) + + result = agent.invoke({ + "messages": [{"role": "user", "content": "What are LangChain content blocks?"}] + }) + + for message in result["messages"]: + message.pretty_print() + ``` + + + + ```python + from langchain_anthropic import ChatAnthropic + + mcp_servers = [ + { + "type": "url", + "url": "https://docs.langchain.com/mcp", + "name": "LangChain Docs", + # "tool_configuration": { # optional configuration + # "enabled": True, + # "allowed_tools": ["ask_question"], + # }, + # "authorization_token": "PLACEHOLDER", # optional authorization if needed + } + ] + + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + mcp_servers=mcp_servers, # [!code highlight] + ) + + response = model.invoke( + "What are LangChain content blocks?", + tools=[{"type": "mcp_toolset", "mcp_server_name": "LangChain Docs"}], # [!code highlight] + ) + response.content_blocks + ``` + + ### Text editor -The text editor tool can be used to view and modify text files. See docs [here](https://platform.claude.com/docs/en/agents-and-tools/tool-use/text-editor-tool) for details. +Claude supports a client-side text editor tool can be used to view and modify text local files. See docs [here](https://platform.claude.com/docs/en/agents-and-tools/tool-use/text-editor-tool) for details. -```python -from langchain_anthropic import ChatAnthropic + + + ```python expandable + from typing import Literal -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + from anthropic.types.beta import BetaToolTextEditor20250728Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + from langchain.messages import HumanMessage, ToolMessage + from langchain.tools import tool -tool = {"type": "text_editor_20250728", "name": "str_replace_based_edit_tool"} -model_with_tools = model.bind_tools([tool]) + tool_spec = BetaToolTextEditor20250728Param( # [!code highlight] + name="str_replace_based_edit_tool", # [!code highlight] + type="text_editor_20250728", # [!code highlight] + ) # [!code highlight] -response = model_with_tools.invoke( - "There's a syntax error in my primes.py file. Can you help me fix it?" -) -print(response.text) -response.tool_calls -``` + # Simple in-memory file storage for demonstration + files: dict[str, str] = { + "/workspace/primes.py": "def is_prime(n):\n if n < 2\n return False\n return True" + } -```output -I'll help you fix the syntax error in your primes.py file. Let me first take a look at the file to identify the issue. -``` + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def str_replace_based_edit_tool( + *, + command: Literal["view", "create", "str_replace", "insert", "undo_edit"], + path: str, + file_text: str | None = None, + old_str: str | None = None, + new_str: str | None = None, + insert_line: int | None = None, + view_range: list[int] | None = None, + **kw + ): + """View and edit text files.""" + if command == "view": + if path not in files: + return f"Error: File {path} not found" + content = files[path] + if view_range: + lines = content.splitlines() + start, end = view_range[0] - 1, view_range[1] + return "\n".join(lines[start:end]) + return content + elif command == "create": + files[path] = file_text or "" + return f"Created {path}" + elif command == "str_replace" and old_str is not None: + if path not in files: + return f"Error: File {path} not found" + files[path] = files[path].replace(old_str, new_str or "", 1) + return f"Replaced in {path}" + # ... implement other commands + return f"Executed {command} on {path}" + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + model_with_tools = model.bind_tools([str_replace_based_edit_tool]) # [!code highlight] + + # Initial request + messages = [HumanMessage("There's a syntax error in my primes.py file. Can you fix it?")] + response = model_with_tools.invoke(messages) + print(response.content_blocks) + + # Tool execution loop + while response.tool_calls: + tool_messages = [] + for tool_call in response.tool_calls: + result = str_replace_based_edit_tool.invoke(tool_call["args"]) + tool_messages.append( + ToolMessage(content=result, tool_call_id=tool_call["id"]) + ) + + messages = [*messages, response, *tool_messages] + response = model_with_tools.invoke(messages) + print(response.content_blocks) + ``` + + + + ```python expandable + from typing import Literal + + from anthropic.types.beta import BetaToolTextEditor20250728Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + # Simple in-memory file storage + files: dict[str, str] = { + "/workspace/primes.py": "def is_prime(n):\n if n < 2\n return False\n return True" + } -```output -[{'name': 'str_replace_based_edit_tool', - 'args': {'command': 'view', 'path': '/root'}, - 'id': 'toolu_011BG5RbqnfBYkD8qQonS9k9', - 'type': 'tool_call'}] -``` + tool_spec = BetaToolTextEditor20250728Param( # [!code highlight] + name="str_replace_based_edit_tool", # [!code highlight] + type="text_editor_20250728", # [!code highlight] + ) # [!code highlight] + + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def str_replace_based_edit_tool( + *, + command: Literal["view", "create", "str_replace", "insert", "undo_edit"], + path: str, + file_text: str | None = None, + old_str: str | None = None, + new_str: str | None = None, + **kw + ): + """View and edit text files.""" + if command == "view": + return files.get(path, f"Error: File {path} not found") + elif command == "create": + files[path] = file_text or "" + return f"Created {path}" + elif command == "str_replace" and old_str is not None: + if path not in files: + return f"Error: File {path} not found" + files[path] = files[path].replace(old_str, new_str or "", 1) + return f"Replaced in {path}" + return f"Executed {command} on {path}" + + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[str_replace_based_edit_tool], # [!code highlight] + ) + + result = agent.invoke({ + "messages": [{"role": "user", "content": "Fix the syntax error in /workspace/primes.py"}] + }) + + for message in result["messages"]: + message.pretty_print() + ``` + + + + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + editor_tool = {"type": "text_editor_20250728", "name": "str_replace_based_edit_tool"} # [!code highlight] + + model_with_tools = model.bind_tools([editor_tool]) # [!code highlight] + + response = model_with_tools.invoke( + "There's a syntax error in my primes.py file. Can you help me fix it?" + ) + # You must handle execution of the text editor commands in response.tool_calls via a tool execution loop + ``` + + Using @[`create_agent`] handles the tool execution loop automatically. + + ```python + [{'name': 'str_replace_based_edit_tool', + 'args': {'command': 'view', 'path': '/root'}, + 'id': 'toolu_011BG5RbqnfBYkD8qQonS9k9', + 'type': 'tool_call'}] + ``` + + **Available tool versions:** @@ -1677,41 +2337,147 @@ I'll help you fix the syntax error in your primes.py file. Let me first take a l - `text_editor_20250728` (recommended) + + For a "batteries-included" implementation, consider using [`StateClaudeTextEditorMiddleware`](/oss/integrations/middleware/anthropic#text-editor) or [`FilesystemClaudeTextEditorMiddleware`](/oss/integrations/middleware/anthropic#text-editor) which provide LangGraph state integration or filesystem persistence, path validation, and other features. + + ### Web fetching -Claude can use a [web fetching tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/web-fetch-tool) to retrieve full content from specified web pages and PDF documents and ground its responses with citations. +Claude can use a server-side [web fetching tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/web-fetch-tool) to retrieve full content from specified web pages and PDF documents and ground its responses with citations. -```python -from langchain_anthropic import ChatAnthropic + + + ```python + from anthropic.types.beta import BetaWebFetchTool20250910Param # [!code highlight] + from langchain_anthropic import ChatAnthropic -model = ChatAnthropic(model="claude-haiku-4-5-20251001") + model = ChatAnthropic(model="claude-haiku-4-5-20251001") -tool = {"type": "web_fetch_20250910", "name": "web_fetch", "max_uses": 3} # [!code highlight] -model_with_tools = model.bind_tools([tool]) + fetch_tool = BetaWebFetchTool20250910Param( # [!code highlight] + name="web_fetch", # [!code highlight] + type="web_fetch_20250910", # [!code highlight] + max_uses=3, # [!code highlight] + ) # [!code highlight] -response = model_with_tools.invoke( - "Please analyze the content at https://docs.langchain.com/" -) -``` + model_with_tools = model.bind_tools([fetch_tool]) # [!code highlight] + + response = model_with_tools.invoke( + "Please analyze the content at https://docs.langchain.com/" + ) + ``` + + + + ```python + from anthropic.types.beta import BetaWebFetchTool20250910Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + + fetch_tool = BetaWebFetchTool20250910Param( # [!code highlight] + name="web_fetch", # [!code highlight] + type="web_fetch_20250910", # [!code highlight] + max_uses=3, # [!code highlight] + ) # [!code highlight] + + agent = create_agent( + model=ChatAnthropic(model="claude-haiku-4-5-20251001"), + tools=[fetch_tool], # [!code highlight] + ) + + result = agent.invoke({ + "messages": [{"role": "user", "content": "Analyze https://docs.langchain.com/"}] + }) + + for message in result["messages"]: + message.pretty_print() + ``` + + + + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-haiku-4-5-20251001") + + fetch_tool = {"type": "web_fetch_20250910", "name": "web_fetch", "max_uses": 3} # [!code highlight] + + model_with_tools = model.bind_tools([fetch_tool]) # [!code highlight] + + response = model_with_tools.invoke( + "Please analyze the content at https://docs.langchain.com/" + ) + ``` + + ### Web search -Claude can use a [web search tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/web-search-tool) to run searches and ground its responses with citations. +Claude can use a server-side [web search tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/web-search-tool) to run searches and ground its responses with citations. **Web search tool is supported since `langchain-anthropic>=0.3.13`** -```python -from langchain_anthropic import ChatAnthropic + + + ```python + from anthropic.types.beta import BetaWebSearchTool20250305Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + search_tool = BetaWebSearchTool20250305Param( # [!code highlight] + name="web_search", # [!code highlight] + type="web_search_20250305", # [!code highlight] + max_uses=3, # [!code highlight] + ) # [!code highlight] + + model_with_tools = model.bind_tools([search_tool]) # [!code highlight] + + response = model_with_tools.invoke("How do I update a web app to TypeScript 5.5?") + ``` + + + + ```python + from anthropic.types.beta import BetaWebSearchTool20250305Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + + search_tool = BetaWebSearchTool20250305Param( # [!code highlight] + name="web_search", # [!code highlight] + type="web_search_20250305", # [!code highlight] + max_uses=3, # [!code highlight] + ) # [!code highlight] + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[search_tool], # [!code highlight] + ) -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + result = agent.invoke({ + "messages": [{"role": "user", "content": "How do I update a web app to TypeScript 5.5?"}] + }) -tool = {"type": "web_search_20250305", "name": "web_search", "max_uses": 3} # [!code highlight] -model_with_tools = model.bind_tools([tool]) + for message in result["messages"]: + message.pretty_print() + ``` + -response = model_with_tools.invoke("How do I update a web app to TypeScript 5.5?") -``` + + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + search_tool = {"type": "web_search_20250305", "name": "web_search", "max_uses": 3} # [!code highlight] + + model_with_tools = model.bind_tools([search_tool]) # [!code highlight] + + response = model_with_tools.invoke("How do I update a web app to TypeScript 5.5?") + ``` + + ### Memory tool @@ -1721,30 +2487,185 @@ Claude supports a memory tool for client-side storage and retrieval of context a **Anthropic's built-in memory tool is supported since `langchain-anthropic>=0.3.21`** -```python -from langchain_anthropic import ChatAnthropic + + + ```python expandable + from typing import Literal -model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", -) -model_with_tools = model.bind_tools([{"type": "memory_20250818", "name": "memory"}]) # [!code highlight] + from anthropic.types.beta import BetaMemoryTool20250818Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + from langchain.messages import HumanMessage, ToolMessage + from langchain.tools import tool -response = model_with_tools.invoke("What are my interests?") -response.content_blocks -``` + tool_spec = BetaMemoryTool20250818Param( # [!code highlight] + name="memory", # [!code highlight] + type="memory_20250818", # [!code highlight] + ) # [!code highlight] -```output -[{'type': 'text', - 'text': "I'll check my memory to see what information I have about your interests."}, - {'type': 'tool_call', - 'name': 'memory', - 'args': {'command': 'view', 'path': '/memories'}, - 'id': 'toolu_01XeP9sxx44rcZHFNqXSaKqh'}] -``` + # Simple in-memory storage for demonstration purposes + memory_store: dict[str, str] = { + "/memories/interests": "User enjoys Python programming and hiking" + } + + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def memory( + *, + command: Literal["view", "create", "str_replace", "insert", "delete", "rename"], + path: str, + content: str | None = None, + old_str: str | None = None, + new_str: str | None = None, + insert_line: int | None = None, + new_path: str | None = None, + **kw, + ): + """Manage persistent memory across conversations.""" + if command == "view": + if path == "/memories": + # List all memories + return "\n".join(memory_store.keys()) or "No memories stored" + return memory_store.get(path, f"No memory at {path}") + elif command == "create": + memory_store[path] = content or "" + return f"Created memory at {path}" + elif command == "str_replace" and old_str is not None: + if path in memory_store: + memory_store[path] = memory_store[path].replace(old_str, new_str or "", 1) + return f"Updated {path}" + elif command == "delete": + memory_store.pop(path, None) + return f"Deleted {path}" + # ... implement other commands + return f"Executed {command} on {path}" + + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + model_with_tools = model.bind_tools([memory]) # [!code highlight] + + # Initial request + messages = [HumanMessage("What are my interests?")] + response = model_with_tools.invoke(messages) + print(response.content_blocks) + + # Tool execution loop + while response.tool_calls: + tool_messages = [] + for tool_call in response.tool_calls: + result = memory.invoke(tool_call["args"]) + tool_messages.append(ToolMessage(content=result, tool_call_id=tool_call["id"])) + + messages = [*messages, response, *tool_messages] + response = model_with_tools.invoke(messages) + print(response.content_blocks) + ``` + + ```python + [{'type': 'text', + 'text': "I'll check my memory to see what information I have about your interests."}, + {'type': 'tool_call', + 'name': 'memory', + 'args': {'command': 'view', 'path': '/memories'}, + 'id': 'toolu_01XeP9sxx44rcZHFNqXSaKqh'}] + ``` + + + + ```python expandable + from typing import Literal + + from anthropic.types.beta import BetaMemoryTool20250818Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + # Simple in-memory storage + memory_store: dict[str, str] = { + "/memories/interests": "User enjoys Python programming and hiking" + } + + tool_spec = BetaMemoryTool20250818Param( # [!code highlight] + name="memory", # [!code highlight] + type="memory_20250818", # [!code highlight] + ) # [!code highlight] + + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def memory( + *, + command: Literal["view", "create", "str_replace", "insert", "delete", "rename"], + path: str, + content: str | None = None, + old_str: str | None = None, + new_str: str | None = None, + **kw + ): + """Manage persistent memory across conversations.""" + if command == "view": + if path == "/memories": + return "\n".join(memory_store.keys()) or "No memories stored" + return memory_store.get(path, f"No memory at {path}") + elif command == "create": + memory_store[path] = content or "" + return f"Created memory at {path}" + elif command == "str_replace" and old_str is not None: + if path in memory_store: + memory_store[path] = memory_store[path].replace(old_str, new_str or "", 1) + return f"Updated {path}" + elif command == "delete": + memory_store.pop(path, None) + return f"Deleted {path}" + return f"Executed {command} on {path}" + + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[memory], # [!code highlight] + ) + + result = agent.invoke({ + "messages": [{"role": "user", "content": "What are my interests?"}] + }) + + for message in result["messages"]: + message.pretty_print() + ``` + + Using @[`create_agent`] handles the tool execution loop automatically. + + + + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + ) + model_with_tools = model.bind_tools([{"type": "memory_20250818", "name": "memory"}]) # [!code highlight] + + response = model_with_tools.invoke("What are my interests?") + response.content_blocks + # You must handle execution of the memory commands in response.tool_calls via a tool execution loop + ``` + + ```python + [{'type': 'text', + 'text': "I'll check my memory to see what information I have about your interests."}, + {'type': 'tool_call', + 'name': 'memory', + 'args': {'command': 'view', 'path': '/memories'}, + 'id': 'toolu_01XeP9sxx44rcZHFNqXSaKqh'}] + ``` + + + + + For a "batteries-included" implementation, consider using [`StateClaudeMemoryMiddleware`](/oss/integrations/middleware/anthropic#memory) or [`FilesystemClaudeMemoryMiddleware`](/oss/integrations/middleware/anthropic#memory) which provide LangGraph state integration or filesystem persistence, automatic system prompt injection, and other features. + ### Tool search -Claude supports a [tool search](https://platform.claude.com/docs/en/agents-and-tools/tool-use/tool-search-tool) feature that enables dynamic tool discovery and loading. Instead of loading all tool definitions into the context window upfront, Claude can search your tool catalog and load only the tools it needs. +Claude supports a server-side [tool search](https://platform.claude.com/docs/en/agents-and-tools/tool-use/tool-search-tool) feature that enables dynamic tool discovery and loading. Instead of loading all tool definitions into the context window upfront, Claude can search your tool catalog and load only the tools it needs. This is useful when: @@ -1759,38 +2680,135 @@ There are two tool search variants: Use the `extras` parameter to specify `defer_loading` on LangChain tools: -```python -from langchain_anthropic import ChatAnthropic -from langchain.tools import tool - -@tool(extras={"defer_loading": True}) # [!code highlight] -def get_weather(location: str, unit: str = "fahrenheit") -> str: - """Get the current weather for a location. - - Args: - location: City name - unit: Temperature unit (celsius or fahrenheit) - """ - return f"Weather in {location}: Sunny" - -@tool(extras={"defer_loading": True}) # [!code highlight] -def search_files(query: str) -> str: - """Search through files in the workspace. - - Args: - query: Search query - """ - return f"Found files matching '{query}'" - -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + + ```python expandable + from anthropic.types.beta import BetaToolSearchToolRegex20251119Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + @tool(extras={"defer_loading": True}) # [!code highlight] + def get_weather(location: str, unit: str = "fahrenheit") -> str: + """Get the current weather for a location. + + Args: + location: City name + unit: Temperature unit (celsius or fahrenheit) + """ + return f"Weather in {location}: Sunny" + + @tool(extras={"defer_loading": True}) # [!code highlight] + def search_files(query: str) -> str: + """Search through files in the workspace. + + Args: + query: Search query + """ + return f"Found files matching '{query}'" + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + tool_search = BetaToolSearchToolRegex20251119Param( # [!code highlight] + name="tool_search_tool_regex", # [!code highlight] + type="tool_search_tool_regex_20251119", # [!code highlight] + ) # [!code highlight] + + model_with_tools = model.bind_tools([ + tool_search, # [!code highlight] + get_weather, + search_files, + ]) + response = model_with_tools.invoke("What's the weather in San Francisco?") + ``` + + + + ```python expandable + from anthropic.types.beta import BetaToolSearchToolRegex20251119Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + tool_search = BetaToolSearchToolRegex20251119Param( # [!code highlight] + name="tool_search_tool_regex", # [!code highlight] + type="tool_search_tool_regex_20251119", # [!code highlight] + ) # [!code highlight] + + + @tool(extras={"defer_loading": True}) # [!code highlight] + def get_weather(location: str, unit: str = "fahrenheit") -> str: + """Get the current weather for a location. + + Args: + location: City name + unit: Temperature unit (celsius or fahrenheit) + """ + return f"Weather in {location}: Sunny" + + + @tool(extras={"defer_loading": True}) # [!code highlight] + def search_files(query: str) -> str: + """Search through files in the workspace. + + Args: + query: Search query + """ + return f"Found files matching '{query}'" + + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[ + tool_search, # [!code highlight] + get_weather, + search_files, + ], + ) -model_with_tools = model.bind_tools([ - {"type": "tool_search_tool_regex_20251119", "name": "tool_search_tool_regex"}, - get_weather, - search_files, -]) -response = model_with_tools.invoke("What's the weather in San Francisco?") -``` + result = agent.invoke({ + "messages": [{"role": "user", "content": "What's the weather in San Francisco?"}] + }) + + for message in result["messages"]: + message.pretty_print() + ``` + + + + ```python + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + @tool(extras={"defer_loading": True}) # [!code highlight] + def get_weather(location: str, unit: str = "fahrenheit") -> str: + """Get the current weather for a location. + + Args: + location: City name + unit: Temperature unit (celsius or fahrenheit) + """ + return f"Weather in {location}: Sunny" + + @tool(extras={"defer_loading": True}) # [!code highlight] + def search_files(query: str) -> str: + """Search through files in the workspace. + + Args: + query: Search query + """ + return f"Found files matching '{query}'" + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + model_with_tools = model.bind_tools([ + {"type": "tool_search_tool_regex_20251119", "name": "tool_search_tool_regex"}, # [!code highlight] + get_weather, + search_files, + ]) + response = model_with_tools.invoke("What's the weather in San Francisco?") + ``` + + ```mermaid sequenceDiagram @@ -1815,6 +2833,52 @@ sequenceDiagram See the [Claude documentation](https://platform.claude.com/docs/en/agents-and-tools/tool-use/tool-search-tool) for more details on tool search, including usage with MCP servers and client-side implementations. + +## Response metadata + +```python +ai_msg = model.invoke(messages) +ai_msg.response_metadata +``` + +```python +{ + "id": "msg_013xU6FHEGEq76aP4RgFerVT", + "model": "claude-sonnet-4-5-20250929", + "stop_reason": "end_turn", + "stop_sequence": None, + "usage": {"input_tokens": 25, "output_tokens": 11}, +} +``` + +## Token usage metadata + +```python +ai_msg = model.invoke(messages) +ai_msg.usage_metadata +``` + +```python +{"input_tokens": 25, "output_tokens": 11, "total_tokens": 36} +``` + +Message chunks containing token usage will be included during streaming by +default: + +```python +stream = model.stream(messages) +full = next(stream) +for chunk in stream: + full += chunk +full.usage_metadata +``` + +```python +{"input_tokens": 25, "output_tokens": 11, "total_tokens": 36} +``` + +These can be disabled by setting `stream_usage=False` in the stream method or when initializing `ChatAnthropic`. + --- ## API reference diff --git a/src/oss/python/integrations/middleware/anthropic.mdx b/src/oss/python/integrations/middleware/anthropic.mdx index 65bfdcacdb..fd8b2e4ae7 100644 --- a/src/oss/python/integrations/middleware/anthropic.mdx +++ b/src/oss/python/integrations/middleware/anthropic.mdx @@ -12,6 +12,105 @@ Middleware specifically designed for Anthropic's Claude models. Learn more about | [Memory](#memory) | Provide Claude's memory tool for persistent agent memory | | [File search](#file-search) | Search tools for state-based file systems | +## Middleware vs tools + +`langchain-anthropic` provides two ways to use Claude's native tools: + +- **Middleware** (this page): Production-ready implementations with built-in execution, state management, and security policies +- **Tools** (via [`bind_tools`](/oss/integrations/chat/anthropic#built-in-tools)): Low-level building blocks where you provide your own execution logic + +### When to use which + +| Use case | Recommended | Why | +|----------|-------------|-----| +| Production agents with bash | Middleware | Persistent sessions, Docker isolation, output redaction | +| State-based file editing | Middleware | Built-in LangGraph state persistence | +| Filesystem file editing | Middleware | Writes to disk with path validation | +| Custom execution logic | Tools | Full control over execution | +| Quick prototype | Tools | Simpler, bring your own callback | +| Non-agent use with @[`bind_tools`][ChatAnthropic.bind_tools] | Tools | Middleware requires @[`create_agent`] | + +### Feature comparison + +| Feature | Middleware | Tools | +|---------|:----------:|:-----:| +| Works with @[`create_agent`] | ✅ | ✅ | +| Works with @[`bind_tools`][ChatAnthropic.bind_tools] | ❌ | ✅ | +| Built-in state management | ✅ | ❌ | +| Custom execute callback | ❌ | ✅ | + + + +**Using middleware** (turnkey solution): + +```python +from langchain_anthropic import ChatAnthropic +from langchain_anthropic.middleware import ClaudeBashToolMiddleware +from langchain.agents import create_agent +from langchain.agents.middleware import DockerExecutionPolicy + +# Production-ready with Docker isolation, session management, etc. +agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + middleware=[ + ClaudeBashToolMiddleware( + workspace_root="/workspace", + execution_policy=DockerExecutionPolicy(image="python:3.11"), + startup_commands=["pip install pandas"], + ), + ], +) +``` + +**Using tools** (bring your own execution): + +```python +import subprocess + +from anthropic.types.beta import BetaToolBash20250124Param +from langchain_anthropic import ChatAnthropic +from langchain.agents import create_agent +from langchain.tools import tool + +tool_spec = BetaToolBash20250124Param( + name="bash", + type="bash_20250124", + strict=True, +) + +@tool(extras={"provider_tool_definition": tool_spec}) +def bash(*, command: str, restart: bool = False, **kw): + """Execute a bash command.""" + if restart: + return "Bash session restarted" + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + timeout=30, + ) + return result.stdout + result.stderr + except Exception as e: + return f"Error: {e}" + + +agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[bash], +) + +result = agent.invoke( + {"messages": [{"role": "user", "content": "List files in this directory"}]} +) +print(result["messages"][-1].content) +``` + + + +--- + ## Prompt caching Reduce costs and latency by caching static or repetitive prompt content (like system prompts, tool definitions, and conversation history) on Anthropic's servers. This middleware implements a **conversational caching strategy** that places cache breakpoints after the most recent message, allowing the entire conversation history (including the latest user message) to be cached and reused in subsequent API calls.