class Multiwoven::Integrations::Source::WatsonxAi::Client

def run_model_stream(connection_config, payload)

def run_model_stream(connection_config, payload)
  get_access_token(connection_config[:api_key])
  url = format(
    WATSONX_STREAM_DEPLOYMENT_URL,
    region: connection_config[:region],
    deployment_id: connection_config[:deployment_id],
    version: API_VERSION
  )
  send_streaming_request(
    url: url,
    http_method: HTTP_POST,
    payload: JSON.parse(payload),
    headers: auth_headers(@access_token),
    config: connection_config[:config]
  ) do |chunk|
    process_streaming_response(chunk) { |message| yield message if block_given? }
  end
rescue StandardError => e
  handle_exception(e, { context: "WATSONX AI:RUN_STREAM_MODEL:EXCEPTION", type: "error" })
end