diff --git a/.stats.yml b/.stats.yml
index d383b41d..0f28d1e5 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 3
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-4742de59ec06077403336bc26e26390e57888e5eef313bf27eab241dbb905f06.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-0017013a270564e5cdfb7b8ffe474c962f4b806c862cbcc33c905504897fabbe.yml
diff --git a/api.md b/api.md
index 0c84ef6f..3ec85f4b 100644
--- a/api.md
+++ b/api.md
@@ -38,16 +38,21 @@ Types:
```python
from anthropic.types.beta.tools import (
+ InputJsonDelta,
Tool,
ToolResultBlockParam,
ToolUseBlock,
ToolUseBlockParam,
ToolsBetaContentBlock,
+ ToolsBetaContentBlockDeltaEvent,
+ ToolsBetaContentBlockStartEvent,
ToolsBetaMessage,
ToolsBetaMessageParam,
+ ToolsBetaMessageStreamEvent,
)
```
Methods:
- client.beta.tools.messages.create(\*\*params) -> ToolsBetaMessage
+- client.beta.tools.messages.stream(\*args) -> ToolsBetaMessageStreamManager[ToolsBetaMessageStream] | ToolsBetaMessageStreamManager[ToolsBetaMessageStreamT]
diff --git a/examples/tools_stream.py b/examples/tools_stream.py
new file mode 100644
index 00000000..5b0c1927
--- /dev/null
+++ b/examples/tools_stream.py
@@ -0,0 +1,48 @@
+import asyncio
+from typing_extensions import override
+
+from anthropic import AsyncAnthropic
+from anthropic.lib.streaming.beta import AsyncToolsBetaMessageStream
+
+client = AsyncAnthropic()
+
+
+class MyHandler(AsyncToolsBetaMessageStream):
+ @override
+ async def on_input_json(self, delta: str, snapshot: object) -> None:
+ print(f"delta: {repr(delta)}")
+ print(f"snapshot: {snapshot}")
+ print()
+
+
+async def main() -> None:
+ async with client.beta.tools.messages.stream(
+ max_tokens=1024,
+ model="claude-3-haiku-20240307",
+ tools=[
+ {
+ "name": "get_weather",
+ "description": "Get the weather at a specific location",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"},
+ "unit": {
+ "type": "string",
+ "enum": ["celsius", "fahrenheit"],
+ "description": "Unit for the output",
+ },
+ },
+ "required": ["location"],
+ },
+ }
+ ],
+ messages=[{"role": "user", "content": "What is the weather in SF?"}],
+ event_handler=MyHandler,
+ ) as stream:
+ await stream.until_done()
+
+ print()
+
+
+asyncio.run(main())
diff --git a/src/anthropic/lib/streaming/beta/__init__.py b/src/anthropic/lib/streaming/beta/__init__.py
new file mode 100644
index 00000000..6fd08cdb
--- /dev/null
+++ b/src/anthropic/lib/streaming/beta/__init__.py
@@ -0,0 +1,8 @@
+from ._tools import (
+ ToolsBetaMessageStream as ToolsBetaMessageStream,
+ ToolsBetaMessageStreamT as ToolsBetaMessageStreamT,
+ AsyncToolsBetaMessageStream as AsyncToolsBetaMessageStream,
+ AsyncToolsBetaMessageStreamT as AsyncToolsBetaMessageStreamT,
+ ToolsBetaMessageStreamManager as ToolsBetaMessageStreamManager,
+ AsyncToolsBetaMessageStreamManager as AsyncToolsBetaMessageStreamManager,
+)
diff --git a/src/anthropic/lib/streaming/beta/_tools.py b/src/anthropic/lib/streaming/beta/_tools.py
new file mode 100644
index 00000000..dabb3134
--- /dev/null
+++ b/src/anthropic/lib/streaming/beta/_tools.py
@@ -0,0 +1,490 @@
+from __future__ import annotations
+
+import asyncio
+from types import TracebackType
+from typing import TYPE_CHECKING, Any, Generic, TypeVar, Callable, cast
+from typing_extensions import Iterator, Awaitable, AsyncIterator, override, assert_never
+
+import httpx
+
+from ...._utils import consume_sync_iterator, consume_async_iterator
+from ...._models import construct_type
+from ...._streaming import Stream, AsyncStream
+from ....types.beta.tools import ToolsBetaMessage, ToolsBetaContentBlock, ToolsBetaMessageStreamEvent
+
+if TYPE_CHECKING:
+ from ...._client import Anthropic, AsyncAnthropic
+
+
+class ToolsBetaMessageStream(Stream[ToolsBetaMessageStreamEvent]):
+ text_stream: Iterator[str]
+ """Iterator over just the text deltas in the stream.
+
+ ```py
+ for text in stream.text_stream:
+ print(text, end="", flush=True)
+ print()
+ ```
+ """
+
+ def __init__(
+ self,
+ *,
+ cast_to: type[ToolsBetaMessageStreamEvent],
+ response: httpx.Response,
+ client: Anthropic,
+ ) -> None:
+ super().__init__(cast_to=cast_to, response=response, client=client)
+
+ self.text_stream = self.__stream_text__()
+ self.__final_message_snapshot: ToolsBetaMessage | None = None
+ self.__events: list[ToolsBetaMessageStreamEvent] = []
+
+ def get_final_message(self) -> ToolsBetaMessage:
+ """Waits until the stream has been read to completion and returns
+ the accumulated `ToolsBetaMessage` object.
+ """
+ self.until_done()
+ assert self.__final_message_snapshot is not None
+ return self.__final_message_snapshot
+
+ def get_final_text(self) -> str:
+ """Returns all `text` content blocks concatenated together.
+
+ > [!NOTE]
+ > Currently the API will only respond with a single content block.
+
+ Will raise an error if no `text` content blocks were returned.
+ """
+ message = self.get_final_message()
+ text_blocks: list[str] = []
+ for block in message.content:
+ if block.type == "text":
+ text_blocks.append(block.text)
+
+ if not text_blocks:
+ raise RuntimeError("Expected to have received at least 1 text block")
+
+ return "".join(text_blocks)
+
+ def until_done(self) -> None:
+ """Blocks until the stream has been consumed"""
+ consume_sync_iterator(self)
+
+ @override
+ def close(self) -> None:
+ super().close()
+ self.on_end()
+
+ # properties
+ @property
+ def current_message_snapshot(self) -> ToolsBetaMessage:
+ assert self.__final_message_snapshot is not None
+ return self.__final_message_snapshot
+
+ # event handlers
+ def on_stream_event(self, event: ToolsBetaMessageStreamEvent) -> None:
+ """Callback that is fired for every Server-Sent-Event"""
+
+ def on_message(self, message: ToolsBetaMessage) -> None:
+ """Callback that is fired when a full Message object is accumulated.
+
+ This corresponds to the `message_stop` SSE type.
+ """
+
+ def on_content_block(self, content_block: ToolsBetaContentBlock) -> None:
+ """Callback that is fired whenever a full ToolsBetaContentBlock is accumulated.
+
+ This corresponds to the `content_block_stop` SSE type.
+ """
+
+ def on_text(self, text: str, snapshot: str) -> None:
+ """Callback that is fired whenever a `text` ToolsBetaContentBlock is yielded.
+
+ The first argument is the text delta and the second is the current accumulated
+ text, for example:
+
+ ```py
+ on_text("Hello", "Hello")
+ on_text(" there", "Hello there")
+ on_text("!", "Hello there!")
+ ```
+ """
+
+ def on_input_json(self, delta: str, snapshot: object) -> None:
+ """Callback that is fired whenever a `input_json_delta` ToolsBetaContentBlock is yielded.
+
+ The first argument is the json string delta and the second is the current accumulated
+ parsed object, for example:
+
+ ```
+ on_input_json('{"locations": ["San ', {"locations": []})
+ on_input_json('Francisco"]', {"locations": ["San Francisco"]})
+ ```
+ """
+
+ def on_exception(self, exception: Exception) -> None:
+ """Fires if any exception occurs"""
+
+ def on_end(self) -> None:
+ ...
+
+ def on_timeout(self) -> None:
+ """Fires if the request times out"""
+
+ @override
+ def __stream__(self) -> Iterator[ToolsBetaMessageStreamEvent]:
+ try:
+ for event in super().__stream__():
+ self.__events.append(event)
+
+ self.__final_message_snapshot = accumulate_event(
+ event=event,
+ current_snapshot=self.__final_message_snapshot,
+ )
+ self._emit_sse_event(event)
+
+ yield event
+ except (httpx.TimeoutException, asyncio.TimeoutError) as exc:
+ self.on_timeout()
+ self.on_exception(exc)
+ raise
+ except Exception as exc:
+ self.on_exception(exc)
+ raise
+ finally:
+ self.on_end()
+
+ def __stream_text__(self) -> Iterator[str]:
+ for chunk in self:
+ if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
+ yield chunk.delta.text
+
+ def _emit_sse_event(self, event: ToolsBetaMessageStreamEvent) -> None:
+ self.on_stream_event(event)
+
+ if event.type == "message_start":
+ # nothing special we want to fire here
+ pass
+ elif event.type == "message_delta":
+ # nothing special we want to fire here
+ pass
+ elif event.type == "message_stop":
+ self.on_message(self.current_message_snapshot)
+ elif event.type == "content_block_start":
+ # nothing special we want to fire here
+ pass
+ elif event.type == "content_block_delta":
+ content = self.current_message_snapshot.content[event.index]
+ if event.delta.type == "text_delta" and content.type == "text":
+ self.on_text(event.delta.text, content.text)
+ elif event.delta.type == "input_json_delta" and content.type == "tool_use":
+ self.on_input_json(event.delta.partial_json, content.input)
+ elif event.type == "content_block_stop":
+ content = self.current_message_snapshot.content[event.index]
+ self.on_content_block(content)
+ else:
+ # we only want exhaustive checking for linters, not at runtime
+ if TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(event)
+
+
+ToolsBetaMessageStreamT = TypeVar("ToolsBetaMessageStreamT", bound=ToolsBetaMessageStream)
+
+
+class ToolsBetaMessageStreamManager(Generic[ToolsBetaMessageStreamT]):
+ """Wrapper over MessageStream that is returned by `.stream()`.
+
+ ```py
+ with client.beta.tools.messages.stream(...) as stream:
+ for chunk in stream:
+ ...
+ ```
+ """
+
+ def __init__(self, api_request: Callable[[], ToolsBetaMessageStreamT]) -> None:
+ self.__stream: ToolsBetaMessageStreamT | None = None
+ self.__api_request = api_request
+
+ def __enter__(self) -> ToolsBetaMessageStreamT:
+ self.__stream = self.__api_request()
+ return self.__stream
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ if self.__stream is not None:
+ self.__stream.close()
+
+
+class AsyncToolsBetaMessageStream(AsyncStream[ToolsBetaMessageStreamEvent]):
+ text_stream: AsyncIterator[str]
+ """Async iterator over just the text deltas in the stream.
+
+ ```py
+ async for text in stream.text_stream:
+ print(text, end="", flush=True)
+ print()
+ ```
+ """
+
+ def __init__(
+ self,
+ *,
+ cast_to: type[ToolsBetaMessageStreamEvent],
+ response: httpx.Response,
+ client: AsyncAnthropic,
+ ) -> None:
+ super().__init__(cast_to=cast_to, response=response, client=client)
+
+ self.text_stream = self.__stream_text__()
+ self.__final_message_snapshot: ToolsBetaMessage | None = None
+ self.__events: list[ToolsBetaMessageStreamEvent] = []
+
+ async def get_final_message(self) -> ToolsBetaMessage:
+ """Waits until the stream has been read to completion and returns
+ the accumulated `ToolsBetaMessage` object.
+ """
+ await self.until_done()
+ assert self.__final_message_snapshot is not None
+ return self.__final_message_snapshot
+
+ async def get_final_text(self) -> str:
+ """Returns all `text` content blocks concatenated together.
+
+ > [!NOTE]
+ > Currently the API will only respond with a single content block.
+
+ Will raise an error if no `text` content blocks were returned.
+ """
+ message = await self.get_final_message()
+ text_blocks: list[str] = []
+ for block in message.content:
+ if block.type == "text":
+ text_blocks.append(block.text)
+
+ if not text_blocks:
+ raise RuntimeError("Expected to have received at least 1 text block")
+
+ return "".join(text_blocks)
+
+ async def until_done(self) -> None:
+ """Waits until the stream has been consumed"""
+ await consume_async_iterator(self)
+
+ @override
+ async def close(self) -> None:
+ await super().close()
+ await self.on_end()
+
+ # properties
+ @property
+ def current_message_snapshot(self) -> ToolsBetaMessage:
+ assert self.__final_message_snapshot is not None
+ return self.__final_message_snapshot
+
+ # event handlers
+ async def on_stream_event(self, event: ToolsBetaMessageStreamEvent) -> None:
+ """Callback that is fired for every Server-Sent-Event"""
+
+ async def on_message(self, message: ToolsBetaMessage) -> None:
+ """Callback that is fired when a full ToolsBetaMessage object is accumulated.
+
+ This corresponds to the `message_stop` SSE type.
+ """
+
+ async def on_content_block(self, content_block: ToolsBetaContentBlock) -> None:
+ """Callback that is fired whenever a full ToolsBetaContentBlock is accumulated.
+
+ This corresponds to the `content_block_stop` SSE type.
+ """
+
+ async def on_text(self, text: str, snapshot: str) -> None:
+ """Callback that is fired whenever a `text` ToolsBetaContentBlock is yielded.
+
+ The first argument is the text delta and the second is the current accumulated
+ text, for example:
+
+ ```
+ on_text("Hello", "Hello")
+ on_text(" there", "Hello there")
+ on_text("!", "Hello there!")
+ ```
+ """
+
+ async def on_input_json(self, delta: str, snapshot: object) -> None:
+ """Callback that is fired whenever a `input_json_delta` ToolsBetaContentBlock is yielded.
+
+ The first argument is the json string delta and the second is the current accumulated
+ parsed object, for example:
+
+ ```
+ on_input_json('{"locations": ["San ', {"locations": []})
+ on_input_json('Francisco"]', {"locations": ["San Francisco"]})
+ ```
+ """
+
+ async def on_final_text(self, text: str) -> None:
+ """Callback that is fired whenever a full `text` ToolsBetaContentBlock is accumulated.
+
+ This corresponds to the `content_block_stop` SSE type.
+ """
+
+ async def on_exception(self, exception: Exception) -> None:
+ """Fires if any exception occurs"""
+
+ async def on_end(self) -> None:
+ ...
+
+ async def on_timeout(self) -> None:
+ """Fires if the request times out"""
+
+ @override
+ async def __stream__(self) -> AsyncIterator[ToolsBetaMessageStreamEvent]:
+ try:
+ async for event in super().__stream__():
+ self.__events.append(event)
+
+ self.__final_message_snapshot = accumulate_event(
+ event=event,
+ current_snapshot=self.__final_message_snapshot,
+ )
+ await self._emit_sse_event(event)
+
+ yield event
+ except (httpx.TimeoutException, asyncio.TimeoutError) as exc:
+ await self.on_timeout()
+ await self.on_exception(exc)
+ raise
+ except Exception as exc:
+ await self.on_exception(exc)
+ raise
+ finally:
+ await self.on_end()
+
+ async def __stream_text__(self) -> AsyncIterator[str]:
+ async for chunk in self:
+ if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
+ yield chunk.delta.text
+
+ async def _emit_sse_event(self, event: ToolsBetaMessageStreamEvent) -> None:
+ await self.on_stream_event(event)
+
+ if event.type == "message_start":
+ # nothing special we want to fire here
+ pass
+ elif event.type == "message_delta":
+ # nothing special we want to fire here
+ pass
+ elif event.type == "message_stop":
+ await self.on_message(self.current_message_snapshot)
+ elif event.type == "content_block_start":
+ # nothing special we want to fire here
+ pass
+ elif event.type == "content_block_delta":
+ content = self.current_message_snapshot.content[event.index]
+ if event.delta.type == "text_delta" and content.type == "text":
+ await self.on_text(event.delta.text, content.text)
+ elif event.delta.type == "input_json_delta" and content.type == "tool_use":
+ await self.on_input_json(event.delta.partial_json, content.input)
+ else:
+ # TODO: warn?
+ pass
+ elif event.type == "content_block_stop":
+ content = self.current_message_snapshot.content[event.index]
+ await self.on_content_block(content)
+
+ if content.type == "text":
+ await self.on_final_text(content.text)
+ else:
+ # we only want exhaustive checking for linters, not at runtime
+ if TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(event)
+
+
+AsyncToolsBetaMessageStreamT = TypeVar("AsyncToolsBetaMessageStreamT", bound=AsyncToolsBetaMessageStream)
+
+
+class AsyncToolsBetaMessageStreamManager(Generic[AsyncToolsBetaMessageStreamT]):
+ """Wrapper over AsyncMessageStream that is returned by `.stream()`
+ so that an async context manager can be used without `await`ing the
+ original client call.
+
+ ```py
+ async with client.beta.tools.messages.stream(...) as stream:
+ async for chunk in stream:
+ ...
+ ```
+ """
+
+ def __init__(self, api_request: Awaitable[AsyncToolsBetaMessageStreamT]) -> None:
+ self.__stream: AsyncToolsBetaMessageStreamT | None = None
+ self.__api_request = api_request
+
+ async def __aenter__(self) -> AsyncToolsBetaMessageStreamT:
+ self.__stream = await self.__api_request
+ return self.__stream
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ if self.__stream is not None:
+ await self.__stream.close()
+
+
+JSON_BUF_PROPERTY = "__json_buf"
+
+
+def accumulate_event(
+ *,
+ event: ToolsBetaMessageStreamEvent,
+ current_snapshot: ToolsBetaMessage | None,
+) -> ToolsBetaMessage:
+ if current_snapshot is None:
+ if event.type == "message_start":
+ return ToolsBetaMessage.construct(**cast(Any, event.message.to_dict()))
+
+ raise RuntimeError(f'Unexpected event order, got {event.type} before "message_start"')
+
+ if event.type == "content_block_start":
+ # TODO: check index
+ current_snapshot.content.append(
+ cast(
+ ToolsBetaContentBlock,
+ construct_type(type_=ToolsBetaContentBlock, value=event.content_block.model_dump()),
+ ),
+ )
+ elif event.type == "content_block_delta":
+ content = current_snapshot.content[event.index]
+ if content.type == "text" and event.delta.type == "text_delta":
+ content.text += event.delta.text
+ elif content.type == "tool_use" and event.delta.type == "input_json_delta":
+ try:
+ from pydantic_core import from_json
+ except ImportError as exc:
+ raise RuntimeError(
+ "Could not import `pydantic_core.from_json` which is required for tool use accumulation, do you have pydantic >= 2.7 installed?"
+ ) from exc
+
+ # we need to keep track of the raw JSON string as well so that we can
+ # re-parse it for each delta, for now we just store it as an untyped
+ # property on the snapshot
+ json_buf = cast(str, getattr(content, JSON_BUF_PROPERTY, ""))
+ json_buf += event.delta.partial_json
+
+ if json_buf:
+ content.input = from_json(json_buf, allow_partial=True)
+
+ setattr(content, JSON_BUF_PROPERTY, json_buf)
+ elif event.type == "message_delta":
+ current_snapshot.stop_reason = event.delta.stop_reason
+ current_snapshot.stop_sequence = event.delta.stop_sequence
+ current_snapshot.usage.output_tokens = event.usage.output_tokens
+
+ return current_snapshot
diff --git a/src/anthropic/resources/beta/tools/messages.py b/src/anthropic/resources/beta/tools/messages.py
index 2d5cd77b..53804a38 100644
--- a/src/anthropic/resources/beta/tools/messages.py
+++ b/src/anthropic/resources/beta/tools/messages.py
@@ -3,6 +3,7 @@
from __future__ import annotations
from typing import List, Iterable, overload
+from functools import partial
from typing_extensions import Literal
import httpx
@@ -22,10 +23,18 @@
make_request_options,
)
from ....types.beta.tools import message_create_params
-from ....types.message_stream_event import MessageStreamEvent
+from ....lib.streaming.beta import (
+ ToolsBetaMessageStream,
+ ToolsBetaMessageStreamT,
+ AsyncToolsBetaMessageStream,
+ AsyncToolsBetaMessageStreamT,
+ ToolsBetaMessageStreamManager,
+ AsyncToolsBetaMessageStreamManager,
+)
from ....types.beta.tools.tool_param import ToolParam
from ....types.beta.tools.tools_beta_message import ToolsBetaMessage
from ....types.beta.tools.tools_beta_message_param import ToolsBetaMessageParam
+from ....types.beta.tools.tools_beta_message_stream_event import ToolsBetaMessageStreamEvent
__all__ = ["Messages", "AsyncMessages"]
@@ -51,6 +60,7 @@ def create(
stream: Literal[False] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
@@ -77,7 +87,7 @@ def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -157,18 +167,18 @@ def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
metadata: An object describing metadata about the request.
@@ -184,14 +194,14 @@ def create(
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -202,6 +212,9 @@ def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
tools: [beta] Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
@@ -269,7 +282,7 @@ def create(
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
- See our [beta guide](https://docs.anthropic.com/claude/docs/tool-use) for more
+ See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
details.
top_k: Only sample from the top K options for each subsequent token.
@@ -312,6 +325,7 @@ def create(
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
@@ -321,7 +335,7 @@ def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> Stream[MessageStreamEvent]:
+ ) -> Stream[ToolsBetaMessageStreamEvent]:
"""
Create a Message.
@@ -338,7 +352,7 @@ def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -418,23 +432,23 @@ def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
metadata: An object describing metadata about the request.
@@ -452,7 +466,7 @@ def create(
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -463,6 +477,9 @@ def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
tools: [beta] Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
@@ -530,7 +547,7 @@ def create(
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
- See our [beta guide](https://docs.anthropic.com/claude/docs/tool-use) for more
+ See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
details.
top_k: Only sample from the top K options for each subsequent token.
@@ -573,6 +590,7 @@ def create(
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
@@ -582,7 +600,7 @@ def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> ToolsBetaMessage | Stream[MessageStreamEvent]:
+ ) -> ToolsBetaMessage | Stream[ToolsBetaMessageStreamEvent]:
"""
Create a Message.
@@ -599,7 +617,7 @@ def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -679,23 +697,23 @@ def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
metadata: An object describing metadata about the request.
@@ -713,7 +731,7 @@ def create(
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -724,6 +742,9 @@ def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
tools: [beta] Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
@@ -791,7 +812,7 @@ def create(
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
- See our [beta guide](https://docs.anthropic.com/claude/docs/tool-use) for more
+ See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
details.
top_k: Only sample from the top K options for each subsequent token.
@@ -834,6 +855,7 @@ def create(
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
@@ -843,8 +865,8 @@ def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> ToolsBetaMessage | Stream[MessageStreamEvent]:
- extra_headers = {"anthropic-beta": "tools-2024-04-04", **(extra_headers or {})}
+ ) -> ToolsBetaMessage | Stream[ToolsBetaMessageStreamEvent]:
+ extra_headers = {"anthropic-beta": "tools-2024-05-16", **(extra_headers or {})}
return self._post(
"/v1/messages?beta=tools",
body=maybe_transform(
@@ -857,6 +879,7 @@ def create(
"stream": stream,
"system": system,
"temperature": temperature,
+ "tool_choice": tool_choice,
"tools": tools,
"top_k": top_k,
"top_p": top_p,
@@ -868,8 +891,132 @@ def create(
),
cast_to=ToolsBetaMessage,
stream=stream or False,
- stream_cls=Stream[MessageStreamEvent],
+ stream_cls=Stream[ToolsBetaMessageStreamEvent],
+ )
+
+ @overload
+ def stream(
+ self,
+ *,
+ max_tokens: int,
+ messages: Iterable[ToolsBetaMessageParam],
+ model: str,
+ metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
+ stop_sequences: List[str] | NotGiven = NOT_GIVEN,
+ system: str | NotGiven = NOT_GIVEN,
+ temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+ top_k: int | NotGiven = NOT_GIVEN,
+ top_p: float | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ToolsBetaMessageStreamManager[ToolsBetaMessageStream]:
+ """Create a message stream with the beta tools API.
+
+ https://docs.anthropic.com/en/docs/tool-use-examples
+
+ Note: unlike the rest of the SDK, this method requires `pydantic >= 2.7`.
+ """
+ ...
+
+ @overload
+ def stream(
+ self,
+ *,
+ max_tokens: int,
+ messages: Iterable[ToolsBetaMessageParam],
+ model: str,
+ metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
+ stop_sequences: List[str] | NotGiven = NOT_GIVEN,
+ system: str | NotGiven = NOT_GIVEN,
+ temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+ top_k: int | NotGiven = NOT_GIVEN,
+ top_p: float | NotGiven = NOT_GIVEN,
+ event_handler: type[ToolsBetaMessageStreamT],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ToolsBetaMessageStreamManager[ToolsBetaMessageStreamT]:
+ """Create a message stream with the beta tools API.
+
+ https://docs.anthropic.com/en/docs/tool-use-examples
+
+ Note: unlike the rest of the SDK, this method requires `pydantic >= 2.7`.
+ """
+ ...
+
+ def stream(
+ self,
+ *,
+ max_tokens: int,
+ messages: Iterable[ToolsBetaMessageParam],
+ model: str,
+ metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
+ stop_sequences: List[str] | NotGiven = NOT_GIVEN,
+ system: str | NotGiven = NOT_GIVEN,
+ temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+ top_k: int | NotGiven = NOT_GIVEN,
+ top_p: float | NotGiven = NOT_GIVEN,
+ event_handler: type[ToolsBetaMessageStreamT] = ToolsBetaMessageStream, # type: ignore[assignment]
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ToolsBetaMessageStreamManager[ToolsBetaMessageStream] | ToolsBetaMessageStreamManager[ToolsBetaMessageStreamT]:
+ """Create a message stream with the beta tools API.
+
+ https://docs.anthropic.com/en/docs/tool-use-examples
+
+ Note: unlike the rest of the SDK, this method requires `pydantic >= 2.7`.
+ """
+ extra_headers = {
+ "X-Stainless-Stream-Helper": "messages",
+ "X-Stainless-Custom-Event-Handler": "true" if event_handler != ToolsBetaMessageStream else "false",
+ "anthropic-beta": "tools-2024-05-16",
+ **(extra_headers or {}),
+ }
+ make_request = partial(
+ self._post,
+ "/v1/messages?beta=tools",
+ body=maybe_transform(
+ {
+ "max_tokens": max_tokens,
+ "messages": messages,
+ "model": model,
+ "metadata": metadata,
+ "stop_sequences": stop_sequences,
+ "stream": True,
+ "system": system,
+ "temperature": temperature,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_k": top_k,
+ "top_p": top_p,
+ },
+ message_create_params.MessageCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ToolsBetaMessage,
+ stream=True,
+ stream_cls=event_handler,
)
+ return ToolsBetaMessageStreamManager(make_request)
class AsyncMessages(AsyncAPIResource):
@@ -893,6 +1040,7 @@ async def create(
stream: Literal[False] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
@@ -919,7 +1067,7 @@ async def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -999,18 +1147,18 @@ async def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
metadata: An object describing metadata about the request.
@@ -1026,14 +1174,14 @@ async def create(
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -1044,6 +1192,9 @@ async def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
tools: [beta] Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
@@ -1111,7 +1262,7 @@ async def create(
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
- See our [beta guide](https://docs.anthropic.com/claude/docs/tool-use) for more
+ See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
details.
top_k: Only sample from the top K options for each subsequent token.
@@ -1154,6 +1305,7 @@ async def create(
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
@@ -1163,7 +1315,7 @@ async def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> AsyncStream[MessageStreamEvent]:
+ ) -> AsyncStream[ToolsBetaMessageStreamEvent]:
"""
Create a Message.
@@ -1180,7 +1332,7 @@ async def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -1260,23 +1412,23 @@ async def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
metadata: An object describing metadata about the request.
@@ -1294,7 +1446,7 @@ async def create(
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -1305,6 +1457,9 @@ async def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
tools: [beta] Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
@@ -1372,7 +1527,7 @@ async def create(
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
- See our [beta guide](https://docs.anthropic.com/claude/docs/tool-use) for more
+ See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
details.
top_k: Only sample from the top K options for each subsequent token.
@@ -1415,6 +1570,7 @@ async def create(
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
@@ -1424,7 +1580,7 @@ async def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> ToolsBetaMessage | AsyncStream[MessageStreamEvent]:
+ ) -> ToolsBetaMessage | AsyncStream[ToolsBetaMessageStreamEvent]:
"""
Create a Message.
@@ -1441,7 +1597,7 @@ async def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -1521,23 +1677,23 @@ async def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
metadata: An object describing metadata about the request.
@@ -1555,7 +1711,7 @@ async def create(
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -1566,6 +1722,9 @@ async def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
tools: [beta] Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
@@ -1633,7 +1792,7 @@ async def create(
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
- See our [beta guide](https://docs.anthropic.com/claude/docs/tool-use) for more
+ See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
details.
top_k: Only sample from the top K options for each subsequent token.
@@ -1676,6 +1835,7 @@ async def create(
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
@@ -1685,8 +1845,8 @@ async def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> ToolsBetaMessage | AsyncStream[MessageStreamEvent]:
- extra_headers = {"anthropic-beta": "tools-2024-04-04", **(extra_headers or {})}
+ ) -> ToolsBetaMessage | AsyncStream[ToolsBetaMessageStreamEvent]:
+ extra_headers = {"anthropic-beta": "tools-2024-05-16", **(extra_headers or {})}
return await self._post(
"/v1/messages?beta=tools",
body=await async_maybe_transform(
@@ -1699,6 +1859,7 @@ async def create(
"stream": stream,
"system": system,
"temperature": temperature,
+ "tool_choice": tool_choice,
"tools": tools,
"top_k": top_k,
"top_p": top_p,
@@ -1710,8 +1871,134 @@ async def create(
),
cast_to=ToolsBetaMessage,
stream=stream or False,
- stream_cls=AsyncStream[MessageStreamEvent],
+ stream_cls=AsyncStream[ToolsBetaMessageStreamEvent],
+ )
+
+ @overload
+ def stream(
+ self,
+ *,
+ max_tokens: int,
+ messages: Iterable[ToolsBetaMessageParam],
+ model: str,
+ metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
+ stop_sequences: List[str] | NotGiven = NOT_GIVEN,
+ system: str | NotGiven = NOT_GIVEN,
+ temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+ top_k: int | NotGiven = NOT_GIVEN,
+ top_p: float | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncToolsBetaMessageStreamManager[AsyncToolsBetaMessageStream]:
+ """Create a message stream with the beta tools API.
+
+ https://docs.anthropic.com/en/docs/tool-use-examples
+
+ Note: unlike the rest of the SDK, this method requires `pydantic >= 2.7`.
+ """
+ ...
+
+ @overload
+ def stream(
+ self,
+ *,
+ max_tokens: int,
+ messages: Iterable[ToolsBetaMessageParam],
+ model: str,
+ metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
+ stop_sequences: List[str] | NotGiven = NOT_GIVEN,
+ system: str | NotGiven = NOT_GIVEN,
+ temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+ top_k: int | NotGiven = NOT_GIVEN,
+ top_p: float | NotGiven = NOT_GIVEN,
+ event_handler: type[AsyncToolsBetaMessageStreamT],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncToolsBetaMessageStreamManager[AsyncToolsBetaMessageStreamT]:
+ """Create a message stream with the beta tools API.
+
+ https://docs.anthropic.com/en/docs/tool-use-examples
+
+ Note: unlike the rest of the SDK, this method requires `pydantic >= 2.7`.
+ """
+ ...
+
+ def stream(
+ self,
+ *,
+ max_tokens: int,
+ messages: Iterable[ToolsBetaMessageParam],
+ model: str,
+ metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
+ stop_sequences: List[str] | NotGiven = NOT_GIVEN,
+ system: str | NotGiven = NOT_GIVEN,
+ temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
+ top_k: int | NotGiven = NOT_GIVEN,
+ top_p: float | NotGiven = NOT_GIVEN,
+ event_handler: type[AsyncToolsBetaMessageStreamT] = AsyncToolsBetaMessageStream, # type: ignore[assignment]
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> (
+ AsyncToolsBetaMessageStreamManager[AsyncToolsBetaMessageStream]
+ | AsyncToolsBetaMessageStreamManager[AsyncToolsBetaMessageStreamT]
+ ):
+ """Create a message stream with the beta tools API.
+
+ https://docs.anthropic.com/en/docs/tool-use-examples
+
+ Note: unlike the rest of the SDK, this method requires `pydantic >= 2.7`.
+ """
+ extra_headers = {
+ "X-Stainless-Stream-Helper": "messages",
+ "X-Stainless-Custom-Event-Handler": "true" if event_handler != AsyncToolsBetaMessageStream else "false",
+ "anthropic-beta": "tools-2024-05-16",
+ **(extra_headers or {}),
+ }
+ request = self._post(
+ "/v1/messages?beta=tools",
+ body=maybe_transform(
+ {
+ "max_tokens": max_tokens,
+ "messages": messages,
+ "model": model,
+ "metadata": metadata,
+ "stop_sequences": stop_sequences,
+ "stream": True,
+ "system": system,
+ "temperature": temperature,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_k": top_k,
+ "top_p": top_p,
+ },
+ message_create_params.MessageCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ToolsBetaMessage,
+ stream=True,
+ stream_cls=event_handler,
)
+ return AsyncToolsBetaMessageStreamManager(request)
class MessagesWithRawResponse:
diff --git a/src/anthropic/resources/completions.py b/src/anthropic/resources/completions.py
index 65caedb0..97ba680e 100644
--- a/src/anthropic/resources/completions.py
+++ b/src/anthropic/resources/completions.py
@@ -61,11 +61,10 @@ def create(
The Text Completions API is a legacy API.
We recommend using the
- [Messages API](https://docs.anthropic.com/claude/reference/messages_post) going
- forward.
+ [Messages API](https://docs.anthropic.com/en/api/messages) going forward.
Future models and features will not be compatible with Text Completions. See our
- [migration guide](https://docs.anthropic.com/claude/reference/migrating-from-text-completions-to-messages)
+ [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
for guidance in migrating from Text Completions to Messages.
Args:
@@ -76,8 +75,8 @@ def create(
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
prompt: The prompt that you want Claude to complete.
@@ -88,11 +87,10 @@ def create(
"\n\nHuman: {userQuestion}\n\nAssistant:"
```
- See
- [prompt validation](https://anthropic.readme.io/claude/reference/prompt-validation)
- and our guide to
- [prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
- for more details.
+ See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
+ our guide to
+ [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
+ details.
metadata: An object describing metadata about the request.
@@ -104,9 +102,7 @@ def create(
stream: Whether to incrementally stream the response using server-sent events.
- See
- [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
temperature: Amount of randomness injected into the response.
@@ -170,11 +166,10 @@ def create(
The Text Completions API is a legacy API.
We recommend using the
- [Messages API](https://docs.anthropic.com/claude/reference/messages_post) going
- forward.
+ [Messages API](https://docs.anthropic.com/en/api/messages) going forward.
Future models and features will not be compatible with Text Completions. See our
- [migration guide](https://docs.anthropic.com/claude/reference/migrating-from-text-completions-to-messages)
+ [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
for guidance in migrating from Text Completions to Messages.
Args:
@@ -185,8 +180,8 @@ def create(
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
prompt: The prompt that you want Claude to complete.
@@ -197,17 +192,14 @@ def create(
"\n\nHuman: {userQuestion}\n\nAssistant:"
```
- See
- [prompt validation](https://anthropic.readme.io/claude/reference/prompt-validation)
- and our guide to
- [prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
- for more details.
+ See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
+ our guide to
+ [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
+ details.
stream: Whether to incrementally stream the response using server-sent events.
- See
- [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
metadata: An object describing metadata about the request.
@@ -279,11 +271,10 @@ def create(
The Text Completions API is a legacy API.
We recommend using the
- [Messages API](https://docs.anthropic.com/claude/reference/messages_post) going
- forward.
+ [Messages API](https://docs.anthropic.com/en/api/messages) going forward.
Future models and features will not be compatible with Text Completions. See our
- [migration guide](https://docs.anthropic.com/claude/reference/migrating-from-text-completions-to-messages)
+ [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
for guidance in migrating from Text Completions to Messages.
Args:
@@ -294,8 +285,8 @@ def create(
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
prompt: The prompt that you want Claude to complete.
@@ -306,17 +297,14 @@ def create(
"\n\nHuman: {userQuestion}\n\nAssistant:"
```
- See
- [prompt validation](https://anthropic.readme.io/claude/reference/prompt-validation)
- and our guide to
- [prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
- for more details.
+ See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
+ our guide to
+ [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
+ details.
stream: Whether to incrementally stream the response using server-sent events.
- See
- [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
metadata: An object describing metadata about the request.
@@ -442,11 +430,10 @@ async def create(
The Text Completions API is a legacy API.
We recommend using the
- [Messages API](https://docs.anthropic.com/claude/reference/messages_post) going
- forward.
+ [Messages API](https://docs.anthropic.com/en/api/messages) going forward.
Future models and features will not be compatible with Text Completions. See our
- [migration guide](https://docs.anthropic.com/claude/reference/migrating-from-text-completions-to-messages)
+ [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
for guidance in migrating from Text Completions to Messages.
Args:
@@ -457,8 +444,8 @@ async def create(
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
prompt: The prompt that you want Claude to complete.
@@ -469,11 +456,10 @@ async def create(
"\n\nHuman: {userQuestion}\n\nAssistant:"
```
- See
- [prompt validation](https://anthropic.readme.io/claude/reference/prompt-validation)
- and our guide to
- [prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
- for more details.
+ See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
+ our guide to
+ [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
+ details.
metadata: An object describing metadata about the request.
@@ -485,9 +471,7 @@ async def create(
stream: Whether to incrementally stream the response using server-sent events.
- See
- [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
temperature: Amount of randomness injected into the response.
@@ -551,11 +535,10 @@ async def create(
The Text Completions API is a legacy API.
We recommend using the
- [Messages API](https://docs.anthropic.com/claude/reference/messages_post) going
- forward.
+ [Messages API](https://docs.anthropic.com/en/api/messages) going forward.
Future models and features will not be compatible with Text Completions. See our
- [migration guide](https://docs.anthropic.com/claude/reference/migrating-from-text-completions-to-messages)
+ [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
for guidance in migrating from Text Completions to Messages.
Args:
@@ -566,8 +549,8 @@ async def create(
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
prompt: The prompt that you want Claude to complete.
@@ -578,17 +561,14 @@ async def create(
"\n\nHuman: {userQuestion}\n\nAssistant:"
```
- See
- [prompt validation](https://anthropic.readme.io/claude/reference/prompt-validation)
- and our guide to
- [prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
- for more details.
+ See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
+ our guide to
+ [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
+ details.
stream: Whether to incrementally stream the response using server-sent events.
- See
- [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
metadata: An object describing metadata about the request.
@@ -660,11 +640,10 @@ async def create(
The Text Completions API is a legacy API.
We recommend using the
- [Messages API](https://docs.anthropic.com/claude/reference/messages_post) going
- forward.
+ [Messages API](https://docs.anthropic.com/en/api/messages) going forward.
Future models and features will not be compatible with Text Completions. See our
- [migration guide](https://docs.anthropic.com/claude/reference/migrating-from-text-completions-to-messages)
+ [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
for guidance in migrating from Text Completions to Messages.
Args:
@@ -675,8 +654,8 @@ async def create(
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
prompt: The prompt that you want Claude to complete.
@@ -687,17 +666,14 @@ async def create(
"\n\nHuman: {userQuestion}\n\nAssistant:"
```
- See
- [prompt validation](https://anthropic.readme.io/claude/reference/prompt-validation)
- and our guide to
- [prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
- for more details.
+ See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
+ our guide to
+ [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
+ details.
stream: Whether to incrementally stream the response using server-sent events.
- See
- [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
metadata: An object describing metadata about the request.
diff --git a/src/anthropic/resources/messages.py b/src/anthropic/resources/messages.py
index b5fd0541..e66b9956 100644
--- a/src/anthropic/resources/messages.py
+++ b/src/anthropic/resources/messages.py
@@ -94,7 +94,7 @@ def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -174,18 +174,18 @@ def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
metadata: An object describing metadata about the request.
@@ -201,14 +201,14 @@ def create(
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -294,7 +294,7 @@ def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -374,23 +374,23 @@ def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
metadata: An object describing metadata about the request.
@@ -408,7 +408,7 @@ def create(
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -494,7 +494,7 @@ def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -574,23 +574,23 @@ def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
metadata: An object describing metadata about the request.
@@ -608,7 +608,7 @@ def create(
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -890,7 +890,7 @@ async def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -970,18 +970,18 @@ async def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
metadata: An object describing metadata about the request.
@@ -997,14 +997,14 @@ async def create(
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -1090,7 +1090,7 @@ async def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -1170,23 +1170,23 @@ async def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
metadata: An object describing metadata about the request.
@@ -1204,7 +1204,7 @@ async def create(
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
@@ -1290,7 +1290,7 @@ async def create(
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
messages: Input messages.
@@ -1370,23 +1370,23 @@ async def create(
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
model: The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
stream: Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
metadata: An object describing metadata about the request.
@@ -1404,7 +1404,7 @@ async def create(
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
diff --git a/src/anthropic/types/beta/tools/__init__.py b/src/anthropic/types/beta/tools/__init__.py
index 3fc03434..4420e67b 100644
--- a/src/anthropic/types/beta/tools/__init__.py
+++ b/src/anthropic/types/beta/tools/__init__.py
@@ -4,9 +4,13 @@
from .tool_param import ToolParam as ToolParam
from .tool_use_block import ToolUseBlock as ToolUseBlock
+from .input_json_delta import InputJsonDelta as InputJsonDelta
from .tools_beta_message import ToolsBetaMessage as ToolsBetaMessage
from .tool_use_block_param import ToolUseBlockParam as ToolUseBlockParam
from .message_create_params import MessageCreateParams as MessageCreateParams
from .tool_result_block_param import ToolResultBlockParam as ToolResultBlockParam
from .tools_beta_content_block import ToolsBetaContentBlock as ToolsBetaContentBlock
from .tools_beta_message_param import ToolsBetaMessageParam as ToolsBetaMessageParam
+from .tools_beta_message_stream_event import ToolsBetaMessageStreamEvent as ToolsBetaMessageStreamEvent
+from .tools_beta_content_block_delta_event import ToolsBetaContentBlockDeltaEvent as ToolsBetaContentBlockDeltaEvent
+from .tools_beta_content_block_start_event import ToolsBetaContentBlockStartEvent as ToolsBetaContentBlockStartEvent
diff --git a/src/anthropic/types/beta/tools/input_json_delta.py b/src/anthropic/types/beta/tools/input_json_delta.py
new file mode 100644
index 00000000..004a8f67
--- /dev/null
+++ b/src/anthropic/types/beta/tools/input_json_delta.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["InputJsonDelta"]
+
+
+class InputJsonDelta(BaseModel):
+ partial_json: str
+
+ type: Literal["input_json_delta"]
diff --git a/src/anthropic/types/beta/tools/message_create_params.py b/src/anthropic/types/beta/tools/message_create_params.py
index 8eca73c5..878f9bca 100644
--- a/src/anthropic/types/beta/tools/message_create_params.py
+++ b/src/anthropic/types/beta/tools/message_create_params.py
@@ -8,7 +8,16 @@
from .tool_param import ToolParam
from .tools_beta_message_param import ToolsBetaMessageParam
-__all__ = ["MessageCreateParamsBase", "Metadata", "MessageCreateParamsNonStreaming", "MessageCreateParamsStreaming"]
+__all__ = [
+ "MessageCreateParamsBase",
+ "Metadata",
+ "ToolChoice",
+ "ToolChoiceToolChoiceAuto",
+ "ToolChoiceToolChoiceAny",
+ "ToolChoiceToolChoiceTool",
+ "MessageCreateParamsNonStreaming",
+ "MessageCreateParamsStreaming",
+]
class MessageCreateParamsBase(TypedDict, total=False):
@@ -19,7 +28,7 @@ class MessageCreateParamsBase(TypedDict, total=False):
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
"""
messages: Required[Iterable[ToolsBetaMessageParam]]
@@ -101,20 +110,20 @@ class MessageCreateParamsBase(TypedDict, total=False):
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
"""
model: Required[str]
"""The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
"""
metadata: Metadata
@@ -137,7 +146,7 @@ class MessageCreateParamsBase(TypedDict, total=False):
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
"""
temperature: float
@@ -151,6 +160,12 @@ class MessageCreateParamsBase(TypedDict, total=False):
deterministic.
"""
+ tool_choice: ToolChoice
+ """How the model should use the provided tools.
+
+ The model can use a specific tool, any available tool, or decide by itself.
+ """
+
tools: Iterable[ToolParam]
"""[beta] Definitions of tools that the model may use.
@@ -219,7 +234,7 @@ class MessageCreateParamsBase(TypedDict, total=False):
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
- See our [beta guide](https://docs.anthropic.com/claude/docs/tool-use) for more
+ See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
details.
"""
@@ -256,12 +271,30 @@ class Metadata(TypedDict, total=False):
"""
+class ToolChoiceToolChoiceAuto(TypedDict, total=False):
+ type: Required[Literal["auto"]]
+
+
+class ToolChoiceToolChoiceAny(TypedDict, total=False):
+ type: Required[Literal["any"]]
+
+
+class ToolChoiceToolChoiceTool(TypedDict, total=False):
+ name: Required[str]
+ """The name of the tool to use."""
+
+ type: Required[Literal["tool"]]
+
+
+ToolChoice = Union[ToolChoiceToolChoiceAuto, ToolChoiceToolChoiceAny, ToolChoiceToolChoiceTool]
+
+
class MessageCreateParamsNonStreaming(MessageCreateParamsBase):
stream: Literal[False]
"""Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
"""
@@ -269,8 +302,8 @@ class MessageCreateParamsStreaming(MessageCreateParamsBase):
stream: Required[Literal[True]]
"""Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
"""
diff --git a/src/anthropic/types/beta/tools/tool_result_block_param.py b/src/anthropic/types/beta/tools/tool_result_block_param.py
index b63f2bea..eec2270a 100644
--- a/src/anthropic/types/beta/tools/tool_result_block_param.py
+++ b/src/anthropic/types/beta/tools/tool_result_block_param.py
@@ -2,12 +2,15 @@
from __future__ import annotations
-from typing import Iterable
+from typing import Union, Iterable
from typing_extensions import Literal, Required, TypedDict
from ...text_block_param import TextBlockParam
+from ...image_block_param import ImageBlockParam
-__all__ = ["ToolResultBlockParam"]
+__all__ = ["ToolResultBlockParam", "Content"]
+
+Content = Union[TextBlockParam, ImageBlockParam]
class ToolResultBlockParam(TypedDict, total=False):
@@ -15,6 +18,6 @@ class ToolResultBlockParam(TypedDict, total=False):
type: Required[Literal["tool_result"]]
- content: Iterable[TextBlockParam]
+ content: Iterable[Content]
is_error: bool
diff --git a/src/anthropic/types/beta/tools/tools_beta_content_block_delta_event.py b/src/anthropic/types/beta/tools/tools_beta_content_block_delta_event.py
new file mode 100644
index 00000000..88786345
--- /dev/null
+++ b/src/anthropic/types/beta/tools/tools_beta_content_block_delta_event.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+from ...text_delta import TextDelta
+from .input_json_delta import InputJsonDelta
+
+__all__ = ["ToolsBetaContentBlockDeltaEvent", "Delta"]
+
+Delta = Annotated[Union[TextDelta, InputJsonDelta], PropertyInfo(discriminator="type")]
+
+
+class ToolsBetaContentBlockDeltaEvent(BaseModel):
+ delta: Delta
+
+ index: int
+
+ type: Literal["content_block_delta"]
diff --git a/src/anthropic/types/beta/tools/tools_beta_content_block_start_event.py b/src/anthropic/types/beta/tools/tools_beta_content_block_start_event.py
new file mode 100644
index 00000000..11deecce
--- /dev/null
+++ b/src/anthropic/types/beta/tools/tools_beta_content_block_start_event.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+from ...text_block import TextBlock
+from .tool_use_block import ToolUseBlock
+
+__all__ = ["ToolsBetaContentBlockStartEvent", "ContentBlock"]
+
+ContentBlock = Annotated[Union[TextBlock, ToolUseBlock], PropertyInfo(discriminator="type")]
+
+
+class ToolsBetaContentBlockStartEvent(BaseModel):
+ content_block: ContentBlock
+
+ index: int
+
+ type: Literal["content_block_start"]
diff --git a/src/anthropic/types/beta/tools/tools_beta_message_stream_event.py b/src/anthropic/types/beta/tools/tools_beta_message_stream_event.py
new file mode 100644
index 00000000..3459f2b5
--- /dev/null
+++ b/src/anthropic/types/beta/tools/tools_beta_message_stream_event.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated
+
+from ...._utils import PropertyInfo
+from ...message_stop_event import MessageStopEvent
+from ...message_delta_event import MessageDeltaEvent
+from ...message_start_event import MessageStartEvent
+from ...content_block_stop_event import ContentBlockStopEvent
+from .tools_beta_content_block_delta_event import ToolsBetaContentBlockDeltaEvent
+from .tools_beta_content_block_start_event import ToolsBetaContentBlockStartEvent
+
+__all__ = ["ToolsBetaMessageStreamEvent"]
+
+ToolsBetaMessageStreamEvent = Annotated[
+ Union[
+ MessageStartEvent,
+ MessageDeltaEvent,
+ MessageStopEvent,
+ ToolsBetaContentBlockStartEvent,
+ ToolsBetaContentBlockDeltaEvent,
+ ContentBlockStopEvent,
+ ],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/src/anthropic/types/completion_create_params.py b/src/anthropic/types/completion_create_params.py
index db67310b..be824ebf 100644
--- a/src/anthropic/types/completion_create_params.py
+++ b/src/anthropic/types/completion_create_params.py
@@ -28,8 +28,8 @@ class CompletionCreateParamsBase(TypedDict, total=False):
model: Required[Union[str, Literal["claude-2.0", "claude-2.1", "claude-instant-1.2"]]]
"""The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
"""
prompt: Required[str]
@@ -42,11 +42,10 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"\n\nHuman: {userQuestion}\n\nAssistant:"
```
- See
- [prompt validation](https://anthropic.readme.io/claude/reference/prompt-validation)
- and our guide to
- [prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
- for more details.
+ See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
+ our guide to
+ [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
+ details.
"""
metadata: Metadata
@@ -108,9 +107,7 @@ class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase):
stream: Literal[False]
"""Whether to incrementally stream the response using server-sent events.
- See
- [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
"""
@@ -118,9 +115,7 @@ class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
stream: Required[Literal[True]]
"""Whether to incrementally stream the response using server-sent events.
- See
- [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
"""
diff --git a/src/anthropic/types/message_create_params.py b/src/anthropic/types/message_create_params.py
index cc8eaa2d..b20699bc 100644
--- a/src/anthropic/types/message_create_params.py
+++ b/src/anthropic/types/message_create_params.py
@@ -18,7 +18,7 @@ class MessageCreateParamsBase(TypedDict, total=False):
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ [models](https://docs.anthropic.com/en/docs/models-overview) for details.
"""
messages: Required[Iterable[MessageParam]]
@@ -100,12 +100,12 @@ class MessageCreateParamsBase(TypedDict, total=False):
We currently support the `base64` source type for images, and the `image/jpeg`,
`image/png`, `image/gif`, and `image/webp` media types.
- See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- for more input examples.
+ See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ input examples.
Note that if you want to include a
- [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- use the top-level `system` parameter — there is no `"system"` role for input
+ [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ the top-level `system` parameter — there is no `"system"` role for input
messages in the Messages API.
"""
@@ -124,8 +124,8 @@ class MessageCreateParamsBase(TypedDict, total=False):
]
"""The model that will complete your prompt.
- See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- additional details and options.
+ See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ details and options.
"""
metadata: Metadata
@@ -148,7 +148,7 @@ class MessageCreateParamsBase(TypedDict, total=False):
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
"""
temperature: float
@@ -199,8 +199,8 @@ class MessageCreateParamsNonStreaming(MessageCreateParamsBase):
stream: Literal[False]
"""Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
"""
@@ -208,8 +208,8 @@ class MessageCreateParamsStreaming(MessageCreateParamsBase):
stream: Required[Literal[True]]
"""Whether to incrementally stream the response using server-sent events.
- See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- for details.
+ See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ details.
"""
diff --git a/tests/api_resources/beta/tools/test_messages.py b/tests/api_resources/beta/tools/test_messages.py
index a053c3eb..9d63e054 100644
--- a/tests/api_resources/beta/tools/test_messages.py
+++ b/tests/api_resources/beta/tools/test_messages.py
@@ -47,6 +47,7 @@ def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> No
stream=False,
system="Today's date is 2024-01-01.",
temperature=1,
+ tool_choice={"type": "auto"},
tools=[
{
"description": "Get the current weather in a given location",
@@ -174,6 +175,7 @@ def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> No
stop_sequences=["string", "string", "string"],
system="Today's date is 2024-01-01.",
temperature=1,
+ tool_choice={"type": "auto"},
tools=[
{
"description": "Get the current weather in a given location",
@@ -305,6 +307,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
stream=False,
system="Today's date is 2024-01-01.",
temperature=1,
+ tool_choice={"type": "auto"},
tools=[
{
"description": "Get the current weather in a given location",
@@ -432,6 +435,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
stop_sequences=["string", "string", "string"],
system="Today's date is 2024-01-01.",
temperature=1,
+ tool_choice={"type": "auto"},
tools=[
{
"description": "Get the current weather in a given location",