module RubyLLM::Provider::InstanceMethods

def complete(messages, tools:, temperature:, model:, &block)

def complete(messages, tools:, temperature:, model:, &block)
  payload = build_payload messages, tools: tools, temperature: temperature, model: model, stream: block_given?
  if block_given?
    stream_response payload, &block
  else
    sync_response payload
  end
end

def connection

def connection
  @connection ||= Faraday.new(api_base) do |f|
    f.options.timeout = RubyLLM.config.request_timeout
    f.request :json
    f.response :json
    f.adapter Faraday.default_adapter
    f.use Faraday::Response::RaiseError
    f.response :logger, RubyLLM.logger, { headers: false, bodies: true, errors: true, log_level: :debug }
  end
end

def list_models

def list_models
  response = connection.get(models_url) do |req|
    req.headers.merge! headers
  end
  parse_list_models_response response
end

def post(payload)

def post(payload)
  connection.post completion_url, payload do |req|
    req.headers.merge! headers
    yield req if block_given?
  end
end

def stream_response(payload, &block)

def stream_response(payload, &block)
  accumulator = StreamAccumulator.new
  post payload do |req|
    req.options.on_data = handle_stream do |chunk|
      accumulator.add chunk
      block.call chunk
    end
  end
  accumulator.to_message
end

def sync_response(payload)

def sync_response(payload)
  response = post payload
  parse_completion_response response
end

def to_json_stream(&block)

def to_json_stream(&block)
  parser = EventStreamParser::Parser.new
  proc do |chunk, _bytes, _|
    parser.feed(chunk) do |_type, data|
      unless data == '[DONE]'
        parsed_data = JSON.parse(data)
        RubyLLM.logger.debug "chunk: #{parsed_data}"
        block.call(parsed_data)
      end
    end
  end
end