class FlowEngine::LLM::Client
flow Definition, and parses the structured JSON response.
Wraps an {Adapter} and a model name, builds the system prompt from the
High-level LLM client that parses introduction text into pre-filled answers.
def coerce_value(value, type)
def coerce_value(value, type) case type when :number value.is_a?(Numeric) ? value : value.to_i when :multi_select Array(value) when :number_matrix return {} unless value.is_a?(Hash) value.transform_values { |v| v.is_a?(Numeric) ? v : v.to_i } else value end end
def extract_json(text)
def extract_json(text) # LLM may wrap JSON in markdown code fences match = text.match(/```(?:json)?\s*\n?(.*?)\n?\s*```/m) match ? match[1].strip : text.strip end
def initialize(adapter:, model: nil)
-
model(String, nil) -- model identifier; defaults to the adapter's model -
adapter(Adapter) -- LLM provider adapter (e.g. Adapters::OpenAIAdapter)
def initialize(adapter:, model: nil) @adapter = adapter @model = model || adapter.model end
def parse_ai_intake(definition:, user_text:, answered: {}, conversation_history: [])
-
(Errors::LLMError)- on response parsing failures
Returns:
-
(Hash)- { answers: Hash, follow_up: String|nil }
Parameters:
-
conversation_history(Array) -- prior rounds [{role:, text:}] -
answered(Hash) -- already-answered steps -
user_text(String) -- user's free-form text -
definition(Definition) -- flow definition
def parse_ai_intake(definition:, user_text:, answered: {}, conversation_history: []) system_prompt = IntakePromptBuilder.new( definition, answered: answered, conversation_history: conversation_history ).build response_text = adapter.chat( system_prompt: system_prompt, user_prompt: user_text, model: model ) parse_intake_response(response_text, definition) end
def parse_intake_response(text, definition)
def parse_intake_response(text, definition) json_str = extract_json(text) raw = JSON.parse(json_str, symbolize_names: true) answers_raw = raw[:answers] || {} answers = answers_raw.each_with_object({}) do |(step_id, value), result| next unless definition.steps.key?(step_id) node = definition.step(step_id) result[step_id] = coerce_value(value, node.type) end { answers: answers, follow_up: raw[:follow_up] } rescue JSON::ParserError => e raise ::FlowEngine::Errors::LLMError, "Failed to parse LLM response as JSON: #{e.message}" end
def parse_introduction(definition:, introduction_text:)
-
(Errors::LLMError)- on response parsing failures
Returns:
-
(Hash- step_id => extracted value)
Parameters:
-
introduction_text(String) -- user's free-form introduction -
definition(Definition) -- flow definition (used to build system prompt)
def parse_introduction(definition:, introduction_text:) system_prompt = SystemPromptBuilder.new(definition).build response_text = adapter.chat( system_prompt: system_prompt, user_prompt: introduction_text, model: model ) parse_response(response_text, definition) end
def parse_response(text, definition)
def parse_response(text, definition) json_str = extract_json(text) raw = JSON.parse(json_str, symbolize_names: true) raw.each_with_object({}) do |(step_id, value), result| next unless definition.steps.key?(step_id) node = definition.step(step_id) result[step_id] = coerce_value(value, node.type) end rescue JSON::ParserError => e raise ::FlowEngine::Errors::LLMError, "Failed to parse LLM response as JSON: #{e.message}" end
def to_s
def to_s "#<#{self.class.name} adapter=#{adapter} model=#{model}>" end