class Anthropic::Resources::Beta::Messages

def count_tokens(params)

Other tags:
    See: Anthropic::Models::Beta::MessageCountTokensParams -

Returns:
  • (Anthropic::Models::Beta::BetaMessageTokensCount) -

Parameters:
  • request_options (Anthropic::RequestOptions, Hash{Symbol=>Object}, nil) --
  • betas (Array) -- Header param: Optional header to specify the beta version(s) you want to use.
  • tools (Array) -- Body param: Definitions of tools that the model may use.
  • tool_choice (Anthropic::Models::Beta::BetaToolChoiceAuto, Anthropic::Models::Beta::BetaToolChoiceAny, Anthropic::Models::Beta::BetaToolChoiceTool, Anthropic::Models::Beta::BetaToolChoiceNone) -- Body param: How the model should use the provided tools. The model can use a spe
  • thinking (Anthropic::Models::Beta::BetaThinkingConfigEnabled, Anthropic::Models::Beta::BetaThinkingConfigDisabled) -- Body param: Configuration for enabling Claude's extended thinking.
  • system_ (String, Array) -- Body param: System prompt.
  • mcp_servers (Array) -- Body param: MCP servers to be utilized in this request
  • model (Symbol, String, Anthropic::Models::Model) -- Body param: The model that will complete your prompt.\n\nSee [models](https://do
  • messages (Array) -- Body param: Input messages.

Overloads:
  • count_tokens(messages:, model:, mcp_servers: nil, system_: nil, thinking: nil, tool_choice: nil, tools: nil, betas: nil, request_options: {})
def count_tokens(params)
  parsed, options = Anthropic::Beta::MessageCountTokensParams.dump_request(params)
  header_params = {betas: "anthropic-beta"}
  @client.request(
    method: :post,
    path: "v1/messages/count_tokens?beta=true",
    headers: parsed.slice(*header_params.keys).transform_keys(header_params),
    body: parsed.except(*header_params.keys),
    model: Anthropic::Beta::BetaMessageTokensCount,
    options: options
  )
end

def create(params)

Other tags:
    See: Anthropic::Models::Beta::MessageCreateParams -

Returns:
  • (Anthropic::Models::Beta::BetaMessage) -

Parameters:
  • request_options (Anthropic::RequestOptions, Hash{Symbol=>Object}, nil) --
  • betas (Array) -- Header param: Optional header to specify the beta version(s) you want to use.
  • top_p (Float) -- Body param: Use nucleus sampling.
  • top_k (Integer) -- Body param: Only sample from the top K options for each subsequent token.
  • tools (Array) -- Body param: Definitions of tools that the model may use.
  • tool_choice (Anthropic::Models::Beta::BetaToolChoiceAuto, Anthropic::Models::Beta::BetaToolChoiceAny, Anthropic::Models::Beta::BetaToolChoiceTool, Anthropic::Models::Beta::BetaToolChoiceNone) -- Body param: How the model should use the provided tools. The model can use a spe
  • thinking (Anthropic::Models::Beta::BetaThinkingConfigEnabled, Anthropic::Models::Beta::BetaThinkingConfigDisabled) -- Body param: Configuration for enabling Claude's extended thinking.
  • temperature (Float) -- Body param: Amount of randomness injected into the response.
  • system_ (String, Array) -- Body param: System prompt.
  • stop_sequences (Array) -- Body param: Custom text sequences that will cause the model to stop generating.
  • service_tier (Symbol, Anthropic::Models::Beta::MessageCreateParams::ServiceTier) -- Body param: Determines whether to use priority capacity (if available) or standa
  • metadata (Anthropic::Models::Beta::BetaMetadata) -- Body param: An object describing metadata about the request.
  • mcp_servers (Array) -- Body param: MCP servers to be utilized in this request
  • container (String, nil) -- Body param: Container identifier for reuse across requests.
  • model (Symbol, String, Anthropic::Models::Model) -- Body param: The model that will complete your prompt.\n\nSee [models](https://do
  • messages (Array) -- Body param: Input messages.
  • max_tokens (Integer) -- Body param: The maximum number of tokens to generate before stopping.

Overloads:
  • create(max_tokens:, messages:, model:, container: nil, mcp_servers: nil, metadata: nil, service_tier: nil, stop_sequences: nil, system_: nil, temperature: nil, thinking: nil, tool_choice: nil, tools: nil, top_k: nil, top_p: nil, betas: nil, request_options: {})
def create(params)
  parsed, options = Anthropic::Beta::MessageCreateParams.dump_request(params)
  if parsed[:stream]
    message = "Please use `#stream` for the streaming use case."
    raise ArgumentError.new(message)
  end
  if options.empty? && @client.timeout == Anthropic::Client::DEFAULT_TIMEOUT_IN_SECONDS
    model = parsed[:model].to_sym
    max_tokens = parsed[:max_tokens].to_i
    timeout = @client.calculate_nonstreaming_timeout(
      max_tokens,
      Anthropic::Client::MODEL_NONSTREAMING_TOKENS[model]
    )
    options = {timeout: timeout}
  else
    options = {timeout: 600, **options}
  end
  header_params = {betas: "anthropic-beta"}
  @client.request(
    method: :post,
    path: "v1/messages?beta=true",
    headers: parsed.slice(*header_params.keys).transform_keys(header_params),
    body: parsed.except(*header_params.keys),
    model: Anthropic::Beta::BetaMessage,
    options: options
  )
end

def initialize(client:)

Parameters:
  • client (Anthropic::Client) --

Other tags:
    Api: - private
def initialize(client:)
  @client = client
  @batches = Anthropic::Resources::Beta::Messages::Batches.new(client: client)
end

def stream(params)

Other tags:
    See: Anthropic::Models::Beta::MessageCreateParams -

Returns:
  • (Anthropic::Internal::Stream) -

Parameters:
  • request_options (Anthropic::RequestOptions, Hash{Symbol=>Object}, nil) --
  • betas (Array) -- Header param: Optional header to specify the beta version(s) you want to use.
  • top_p (Float) -- Body param: Use nucleus sampling.
  • top_k (Integer) -- Body param: Only sample from the top K options for each subsequent token.
  • tools (Array) -- Body param: Definitions of tools that the model may use.
  • tool_choice (Anthropic::Models::Beta::BetaToolChoiceAuto, Anthropic::Models::Beta::BetaToolChoiceAny, Anthropic::Models::Beta::BetaToolChoiceTool, Anthropic::Models::Beta::BetaToolChoiceNone) -- Body param: How the model should use the provided tools. The model can use a spe
  • thinking (Anthropic::Models::Beta::BetaThinkingConfigEnabled, Anthropic::Models::Beta::BetaThinkingConfigDisabled) -- Body param: Configuration for enabling Claude's extended thinking.
  • temperature (Float) -- Body param: Amount of randomness injected into the response.
  • system_ (String, Array) -- Body param: System prompt.
  • stop_sequences (Array) -- Body param: Custom text sequences that will cause the model to stop generating.
  • service_tier (Symbol, Anthropic::Models::Beta::MessageCreateParams::ServiceTier) -- Body param: Determines whether to use priority capacity (if available) or standa
  • metadata (Anthropic::Models::Beta::BetaMetadata) -- Body param: An object describing metadata about the request.
  • mcp_servers (Array) -- Body param: MCP servers to be utilized in this request
  • container (String, nil) -- Body param: Container identifier for reuse across requests.
  • model (Symbol, String, Anthropic::Models::Model) -- Body param: The model that will complete your prompt.\n\nSee [models](https://do
  • messages (Array) -- Body param: Input messages.
  • max_tokens (Integer) -- Body param: The maximum number of tokens to generate before stopping.

Overloads:
  • stream_raw(max_tokens:, messages:, model:, container: nil, mcp_servers: nil, metadata: nil, service_tier: nil, stop_sequences: nil, system_: nil, temperature: nil, thinking: nil, tool_choice: nil, tools: nil, top_k: nil, top_p: nil, betas: nil, request_options: {})
def stream(params)
  parsed, options = Anthropic::Models::Beta::MessageCreateParams.dump_request(params)
  unless parsed.fetch(:stream, true)
    message = "Please use `#create` for the non-streaming use case."
    raise ArgumentError.new(message)
  end
  parsed.store(:stream, true)
  raw_stream = @client.request(
    method: :post,
    path: "v1/messages?beta=true",
    headers: {"accept" => "text/event-stream"},
    body: parsed,
    stream: Anthropic::Internal::Stream,
    model: Anthropic::Beta::BetaRawMessageStreamEvent,
    options: options
  )
  Anthropic::Streaming::MessageStream.new(raw_stream: raw_stream)
end

def stream_raw(params)

Other tags:
    See: Anthropic::Models::Beta::MessageCreateParams -

Returns:
  • (Anthropic::Internal::Stream) -

Parameters:
  • request_options (Anthropic::RequestOptions, Hash{Symbol=>Object}, nil) --
  • betas (Array) -- Header param: Optional header to specify the beta version(s) you want to use.
  • top_p (Float) -- Body param: Use nucleus sampling.
  • top_k (Integer) -- Body param: Only sample from the top K options for each subsequent token.
  • tools (Array) -- Body param: Definitions of tools that the model may use.
  • tool_choice (Anthropic::Models::Beta::BetaToolChoiceAuto, Anthropic::Models::Beta::BetaToolChoiceAny, Anthropic::Models::Beta::BetaToolChoiceTool, Anthropic::Models::Beta::BetaToolChoiceNone) -- Body param: How the model should use the provided tools. The model can use a spe
  • thinking (Anthropic::Models::Beta::BetaThinkingConfigEnabled, Anthropic::Models::Beta::BetaThinkingConfigDisabled) -- Body param: Configuration for enabling Claude's extended thinking.
  • temperature (Float) -- Body param: Amount of randomness injected into the response.
  • system_ (String, Array) -- Body param: System prompt.
  • stop_sequences (Array) -- Body param: Custom text sequences that will cause the model to stop generating.
  • service_tier (Symbol, Anthropic::Models::Beta::MessageCreateParams::ServiceTier) -- Body param: Determines whether to use priority capacity (if available) or standa
  • metadata (Anthropic::Models::Beta::BetaMetadata) -- Body param: An object describing metadata about the request.
  • mcp_servers (Array) -- Body param: MCP servers to be utilized in this request
  • container (String, nil) -- Body param: Container identifier for reuse across requests.
  • model (Symbol, String, Anthropic::Models::Model) -- Body param: The model that will complete your prompt.\n\nSee [models](https://do
  • messages (Array) -- Body param: Input messages.
  • max_tokens (Integer) -- Body param: The maximum number of tokens to generate before stopping.

Overloads:
  • stream_raw(max_tokens:, messages:, model:, container: nil, mcp_servers: nil, metadata: nil, service_tier: nil, stop_sequences: nil, system_: nil, temperature: nil, thinking: nil, tool_choice: nil, tools: nil, top_k: nil, top_p: nil, betas: nil, request_options: {})
def stream_raw(params)
  parsed, options = Anthropic::Beta::MessageCreateParams.dump_request(params)
  unless parsed.fetch(:stream, true)
    message = "Please use `#create` for the non-streaming use case."
    raise ArgumentError.new(message)
  end
  parsed.store(:stream, true)
  header_params = {betas: "anthropic-beta"}
  @client.request(
    method: :post,
    path: "v1/messages?beta=true",
    headers: {
      "accept" => "text/event-stream",
      **parsed.slice(*header_params.keys)
    }.transform_keys(header_params),
    body: parsed.except(*header_params.keys),
    stream: Anthropic::Internal::Stream,
    model: Anthropic::Beta::BetaRawMessageStreamEvent,
    options: {timeout: 600, **options}
  )
end