diff --git a/src/langgraph-platform/assistants.mdx b/src/langgraph-platform/assistants.mdx index 19763d45..5ec70ca1 100644 --- a/src/langgraph-platform/assistants.mdx +++ b/src/langgraph-platform/assistants.mdx @@ -17,7 +17,8 @@ The LangGraph Cloud API provides several endpoints for creating and managing ass ## Configuration -Assistants build on the LangGraph open source concept of [configuration](/oss/graph-api#configuration). +Assistants build on the LangGraph open source concept of [configuration](/oss/graph-api#runtime-context). + While configuration is available in the open source LangGraph library, assistants are only present in [LangGraph Platform](/langgraph-platform/index). This is due to the fact that assistants are tightly coupled to your deployed graph. Upon deployment, LangGraph Server will automatically create a default assistant for each graph using the graph's default configuration settings. In practice, an assistant is just an _instance_ of a graph with a specific configuration. Therefore, multiple assistants can reference the same graph but can contain different configurations (e.g. prompts, models, tools). The LangGraph Server API provides several endpoints for creating and managing assistants. See the [API reference](https://langchain-ai.github.io/langgraph/cloud/reference/api/api_ref/) and [this how-to](/langgraph-platform/configuration-cloud) for more details on how to create assistants. diff --git a/src/langgraph-platform/configuration-cloud.mdx b/src/langgraph-platform/configuration-cloud.mdx index 95c26752..2f2cdebf 100644 --- a/src/langgraph-platform/configuration-cloud.mdx +++ b/src/langgraph-platform/configuration-cloud.mdx @@ -4,21 +4,20 @@ sidebarTitle: Manage assistants --- In this guide we will show how to create, configure, and manage an [assistant](/langgraph-platform/assistants). -First, as a brief refresher on the concept of configurations, consider the following simple `call_model` node and configuration schema. Observe that this node tries to read and use the `model_name` as defined by the `config` object's `configurable`. +First, as a brief refresher on the concept of context, consider the following simple `call_model` node and context schema. +Observe that this node tries to read and use the `model_name` as defined by the `context` object's `model_name` field. ```python - - class ConfigSchema(TypedDict): + class ContextSchema(TypedDict): model_name: str - builder = StateGraph(AgentState, config_schema=ConfigSchema) + builder = StateGraph(AgentState, context_schema=ContextSchema) - def call_model(state, config): + def call_model(state, runtime: Runtime[ContextSchema]): messages = state["messages"] - model_name = config.get('configurable', {}).get("model_name", "anthropic") - model = _get_model(model_name) + model = _get_model(runtime.context.get("model_name", "anthropic")) response = model.invoke(messages) # We return a list, because this will get added to the existing list return {"messages": [response]} @@ -28,17 +27,16 @@ First, as a brief refresher on the concept of configurations, consider the follo ```js import { Annotation } from "@langchain/langgraph"; - const ConfigSchema = Annotation.Root({ + const ContextSchema = Annotation.Root({ model_name: Annotation, system_prompt: }); - const builder = new StateGraph(AgentState, ConfigSchema) + const builder = new StateGraph(AgentState, ContextSchema) - function callModel(state: State, config: RunnableConfig) { + function callModel(state: State, runtime: Runtime[ContextSchema]) { const messages = state.messages; - const modelName = config.configurable?.model_name ?? "anthropic"; - const model = _getModel(modelName); + const model = _getModel(runtime.context.model_name ?? "anthropic"); const response = model.invoke(messages); // We return a list, because this will get added to the existing list return { messages: [response] }; @@ -55,7 +53,7 @@ For more information on configurations, [see here](/langgraph-platform/configura To create an assistant, use the [LangGraph SDK](/langgraph-platform/sdk) `create` method. See the [Python](/langgraph-platform/python-sdk#langgraph_sdk.client.AssistantsClient.create) and [JS](/langgraph-platform/js-ts-sdk#create) SDK reference docs for more information. -This example uses the same configuration schema as above, and creates an assistant with `model_name` set to `openai`. +This example uses the same context schema as above, and creates an assistant with `model_name` set to `openai`. @@ -65,7 +63,7 @@ This example uses the same configuration schema as above, and creates an assista client = get_client(url=) openai_assistant = await client.assistants.create( # "agent" is the name of a graph we deployed - "agent", config={"configurable": {"model_name": "openai"}}, name="Open AI Assistant" + "agent", context={"model_name": "openai"}, name="Open AI Assistant" ) print(openai_assistant) @@ -79,7 +77,7 @@ This example uses the same configuration schema as above, and creates an assista const openAIAssistant = await client.assistants.create({ graphId: 'agent', name: "Open AI Assistant", - config: { "configurable": { "model_name": "openai" } }, + context: { "model_name": "openai" }, }); console.log(openAIAssistant); @@ -90,7 +88,7 @@ This example uses the same configuration schema as above, and creates an assista curl --request POST \ --url /assistants \ --header 'Content-Type: application/json' \ - --data '{"graph_id":"agent", "config":{"configurable":{"model_name":"openai"}}, "name": "Open AI Assistant"}' + --data '{"graph_id":"agent", "context":{"model_name":"openai"}, "name": "Open AI Assistant"}' ``` @@ -102,11 +100,9 @@ Output: "assistant_id": "62e209ca-9154-432a-b9e9-2d75c7a9219b", "graph_id": "agent", "name": "Open AI Assistant" -"config": { -"configurable": { +"context": { "model_name": "openai" } -}, "metadata": {} "created_at": "2024-08-31T03:09:10.230718+00:00", "updated_at": "2024-08-31T03:09:10.230718+00:00", @@ -239,7 +235,7 @@ To edit the assistant, use the `update` method. This will create a new version o **Note** - You must pass in the ENTIRE config (and metadata if you are using it). The update endpoint creates new versions completely from scratch and does not rely on previous versions. + You must pass in the ENTIRE context (and metadata if you are using it). The update endpoint creates new versions completely from scratch and does not rely on previous versions. For example, to update your assistant's system prompt: @@ -249,11 +245,9 @@ For example, to update your assistant's system prompt: ```python openai_assistant_v2 = await client.assistants.update( openai_assistant["assistant_id"], - config={ - "configurable": { - "model_name": "openai", - "system_prompt": "You are an unhelpful assistant!", - } + context={ + "model_name": "openai", + "system_prompt": "You are an unhelpful assistant!", }, ) ``` @@ -263,13 +257,12 @@ For example, to update your assistant's system prompt: const openaiAssistantV2 = await client.assistants.update( openai_assistant["assistant_id"], { - config: { - configurable: { - model_name: 'openai', - system_prompt: 'You are an unhelpful assistant!', - }, + context: { + model_name: 'openai', + system_prompt: 'You are an unhelpful assistant!', + }, }, - }); + ); ``` @@ -278,7 +271,7 @@ For example, to update your assistant's system prompt: --url /assistants/ \ --header 'Content-Type: application/json' \ --data '{ - "config": {"model_name": "openai", "system_prompt": "You are an unhelpful assistant!"} + "context": {"model_name": "openai", "system_prompt": "You are an unhelpful assistant!"} }' ``` diff --git a/src/langgraph-platform/setup-app-requirements-txt.mdx b/src/langgraph-platform/setup-app-requirements-txt.mdx index 58fc9e46..6759cb50 100644 --- a/src/langgraph-platform/setup-app-requirements-txt.mdx +++ b/src/langgraph-platform/setup-app-requirements-txt.mdx @@ -42,7 +42,7 @@ Dependencies can optionally be specified in one of the following files: `pyproje The dependencies below will be included in the image, you can also use them in your code, as long as with a compatible version range: ``` -langgraph>=0.3.27 +langgraph>=0.6.0 langgraph-sdk>=0.1.66 langgraph-checkpoint>=2.0.23 langchain-core>=0.2.38 @@ -114,11 +114,11 @@ from langgraph.graph import StateGraph, END, START from my_agent.utils.nodes import call_model, should_continue, tool_node # import nodes from my_agent.utils.state import AgentState # import state -# Define the config -class GraphConfig(TypedDict): +# Define the runtime context +class GraphContext(TypedDict): model_name: Literal["anthropic", "openai"] -workflow = StateGraph(AgentState, config_schema=GraphConfig) +workflow = StateGraph(AgentState, context_schema=GraphContext) workflow.add_node("agent", call_model) workflow.add_node("action", tool_node) workflow.add_edge(START, "agent") diff --git a/src/langgraph-platform/setup-pyproject.mdx b/src/langgraph-platform/setup-pyproject.mdx index 20eecd88..19313c2c 100644 --- a/src/langgraph-platform/setup-pyproject.mdx +++ b/src/langgraph-platform/setup-pyproject.mdx @@ -42,7 +42,7 @@ Dependencies can optionally be specified in one of the following files: `pyproje The dependencies below will be included in the image, you can also use them in your code, as long as with a compatible version range: ``` -langgraph>=0.3.27 +langgraph>=0.6.0 langgraph-sdk>=0.1.66 langgraph-checkpoint>=2.0.23 langchain-core>=0.2.38 @@ -77,7 +77,7 @@ license = {text = "MIT"} readme = "README.md" requires-python = ">=3.9" dependencies = [ - "langgraph>=0.2.0", + "langgraph>=0.6.0", "langchain-fireworks>=0.1.3" ] @@ -127,11 +127,11 @@ from langgraph.graph import StateGraph, END, START from my_agent.utils.nodes import call_model, should_continue, tool_node # import nodes from my_agent.utils.state import AgentState # import state -# Define the config -class GraphConfig(TypedDict): +# Define the runtime context +class GraphContext(TypedDict): model_name: Literal["anthropic", "openai"] -workflow = StateGraph(AgentState, config_schema=GraphConfig) +workflow = StateGraph(AgentState, context_schema=GraphContext) workflow.add_node("agent", call_model) workflow.add_node("action", tool_node) workflow.add_edge(START, "agent")