class Anthropic::Resources::Messages

def count_tokens(params)

Other tags:
    See: Anthropic::Models::MessageCountTokensParams -

Returns:
  • (Anthropic::Models::MessageTokensCount) -

Parameters:
  • request_options (Anthropic::RequestOptions, Hash{Symbol=>Object}, nil) --
  • tools (Array) -- Definitions of tools that the model may use.
  • tool_choice (Anthropic::Models::ToolChoiceAuto, Anthropic::Models::ToolChoiceAny, Anthropic::Models::ToolChoiceTool, Anthropic::Models::ToolChoiceNone) -- How the model should use the provided tools. The model can use a specific tool,
  • thinking (Anthropic::Models::ThinkingConfigEnabled, Anthropic::Models::ThinkingConfigDisabled) -- Configuration for enabling Claude's extended thinking.
  • system_ (String, Array) -- System prompt.
  • model (Symbol, String, Anthropic::Models::Model) -- The model that will complete your prompt.\n\nSee [models](https://docs.anthropic
  • messages (Array) -- Input messages.

Overloads:
  • count_tokens(messages:, model:, system_: nil, thinking: nil, tool_choice: nil, tools: nil, request_options: {})
def count_tokens(params)
  parsed, options = Anthropic::MessageCountTokensParams.dump_request(params)
  @client.request(
    method: :post,
    path: "v1/messages/count_tokens",
    body: parsed,
    model: Anthropic::MessageTokensCount,
    options: options
  )
end

def create(params)

Other tags:
    See: Anthropic::Models::MessageCreateParams -

Returns:
  • (Anthropic::Models::Message) -

Parameters:
  • request_options (Anthropic::RequestOptions, Hash{Symbol=>Object}, nil) --
  • top_p (Float) -- Use nucleus sampling.
  • top_k (Integer) -- Only sample from the top K options for each subsequent token.
  • tools (Array) -- Definitions of tools that the model may use.
  • tool_choice (Anthropic::Models::ToolChoiceAuto, Anthropic::Models::ToolChoiceAny, Anthropic::Models::ToolChoiceTool, Anthropic::Models::ToolChoiceNone) -- How the model should use the provided tools. The model can use a specific tool,
  • thinking (Anthropic::Models::ThinkingConfigEnabled, Anthropic::Models::ThinkingConfigDisabled) -- Configuration for enabling Claude's extended thinking.
  • temperature (Float) -- Amount of randomness injected into the response.
  • system_ (String, Array) -- System prompt.
  • stop_sequences (Array) -- Custom text sequences that will cause the model to stop generating.
  • service_tier (Symbol, Anthropic::Models::MessageCreateParams::ServiceTier) -- Determines whether to use priority capacity (if available) or standard capacity
  • metadata (Anthropic::Models::Metadata) -- An object describing metadata about the request.
  • model (Symbol, String, Anthropic::Models::Model) -- The model that will complete your prompt.\n\nSee [models](https://docs.anthropic
  • messages (Array) -- Input messages.
  • max_tokens (Integer) -- The maximum number of tokens to generate before stopping.

Overloads:
  • create(max_tokens:, messages:, model:, metadata: nil, service_tier: nil, stop_sequences: nil, system_: nil, temperature: nil, thinking: nil, tool_choice: nil, tools: nil, top_k: nil, top_p: nil, request_options: {})
def create(params)
  parsed, options = Anthropic::MessageCreateParams.dump_request(params)
  if parsed[:stream]
    message = "Please use `#stream` for the streaming use case."
    raise ArgumentError.new(message)
  end
  if options.empty? && @client.timeout == Anthropic::Client::DEFAULT_TIMEOUT_IN_SECONDS
    model = parsed[:model].to_sym
    max_tokens = parsed[:max_tokens].to_i
    timeout = @client.calculate_nonstreaming_timeout(
      max_tokens,
      Anthropic::Client::MODEL_NONSTREAMING_TOKENS[model]
    )
    options = {timeout: timeout}
  else
    options = {timeout: 600, **options}
  end
  @client.request(
    method: :post,
    path: "v1/messages",
    body: parsed,
    model: Anthropic::Message,
    options: options
  )
end

def initialize(client:)

Parameters:
  • client (Anthropic::Client) --

Other tags:
    Api: - private
def initialize(client:)
  @client = client
  @batches = Anthropic::Resources::Messages::Batches.new(client: client)
end

def stream(params)

Other tags:
    See: Anthropic::Models::MessageCreateParams -

Returns:
  • (Anthropic::Streaming::MessageStream) -

Parameters:
  • request_options (Anthropic::RequestOptions, Hash{Symbol=>Object}, nil) --
  • top_p (Float) -- Use nucleus sampling.
  • top_k (Integer) -- Only sample from the top K options for each subsequent token.
  • tools (Array) -- Definitions of tools that the model may use.
  • tool_choice (Anthropic::Models::ToolChoiceAuto, Anthropic::Models::ToolChoiceAny, Anthropic::Models::ToolChoiceTool, Anthropic::Models::ToolChoiceNone) -- How the model should use the provided tools. The model can use a specific tool,
  • thinking (Anthropic::Models::ThinkingConfigEnabled, Anthropic::Models::ThinkingConfigDisabled) -- Configuration for enabling Claude's extended thinking.
  • temperature (Float) -- Amount of randomness injected into the response.
  • system_ (String, Array) -- System prompt.
  • stop_sequences (Array) -- Custom text sequences that will cause the model to stop generating.
  • service_tier (Symbol, Anthropic::Models::MessageCreateParams::ServiceTier) -- Determines whether to use priority capacity (if available) or standard capacity
  • metadata (Anthropic::Models::Metadata) -- An object describing metadata about the request.
  • model (Symbol, String, Anthropic::Models::Model) -- The model that will complete your prompt.\n\nSee [models](https://docs.anthropic
  • messages (Array) -- Input messages.
  • max_tokens (Integer) -- The maximum number of tokens to generate before stopping.

Overloads:
  • stream(max_tokens:, messages:, model:, metadata: nil, service_tier: nil, stop_sequences: nil, system_: nil, temperature: nil, thinking: nil, tool_choice: nil, tools: nil, top_k: nil, top_p: nil, request_options: {})
def stream(params)
  parsed, options = Anthropic::Models::MessageCreateParams.dump_request(params)
  unless parsed.fetch(:stream, true)
    message = "Please use `#create` for the non-streaming use case."
    raise ArgumentError.new(message)
  end
  parsed.store(:stream, true)
  raw_stream = @client.request(
    method: :post,
    path: "v1/messages",
    headers: {"accept" => "text/event-stream"},
    body: parsed,
    stream: Anthropic::Internal::Stream,
    model: Anthropic::Models::RawMessageStreamEvent,
    options: options
  )
  Anthropic::Streaming::MessageStream.new(raw_stream: raw_stream)
end

def stream_raw(params)

Other tags:
    See: Anthropic::Models::MessageCreateParams -

Returns:
  • (Anthropic::Internal::Stream) -

Parameters:
  • request_options (Anthropic::RequestOptions, Hash{Symbol=>Object}, nil) --
  • top_p (Float) -- Use nucleus sampling.
  • top_k (Integer) -- Only sample from the top K options for each subsequent token.
  • tools (Array) -- Definitions of tools that the model may use.
  • tool_choice (Anthropic::Models::ToolChoiceAuto, Anthropic::Models::ToolChoiceAny, Anthropic::Models::ToolChoiceTool, Anthropic::Models::ToolChoiceNone) -- How the model should use the provided tools. The model can use a specific tool,
  • thinking (Anthropic::Models::ThinkingConfigEnabled, Anthropic::Models::ThinkingConfigDisabled) -- Configuration for enabling Claude's extended thinking.
  • temperature (Float) -- Amount of randomness injected into the response.
  • system_ (String, Array) -- System prompt.
  • stop_sequences (Array) -- Custom text sequences that will cause the model to stop generating.
  • service_tier (Symbol, Anthropic::Models::MessageCreateParams::ServiceTier) -- Determines whether to use priority capacity (if available) or standard capacity
  • metadata (Anthropic::Models::Metadata) -- An object describing metadata about the request.
  • model (Symbol, String, Anthropic::Models::Model) -- The model that will complete your prompt.\n\nSee [models](https://docs.anthropic
  • messages (Array) -- Input messages.
  • max_tokens (Integer) -- The maximum number of tokens to generate before stopping.

Overloads:
  • stream_raw(max_tokens:, messages:, model:, metadata: nil, service_tier: nil, stop_sequences: nil, system_: nil, temperature: nil, thinking: nil, tool_choice: nil, tools: nil, top_k: nil, top_p: nil, request_options: {})
def stream_raw(params)
  parsed, options = Anthropic::MessageCreateParams.dump_request(params)
  unless parsed.fetch(:stream, true)
    message = "Please use `#create` for the non-streaming use case."
    raise ArgumentError.new(message)
  end
  parsed.store(:stream, true)
  @client.request(
    method: :post,
    path: "v1/messages",
    headers: {"accept" => "text/event-stream"},
    body: parsed,
    stream: Anthropic::Internal::Stream,
    model: Anthropic::RawMessageStreamEvent,
    options: {timeout: 600, **options}
  )
end