class Vellum::EnrichedNormalizedCompletion

def self.from_json(json_object:)

Returns:
  • (EnrichedNormalizedCompletion) -

Parameters:
  • json_object (JSON) --
def self.from_json(json_object:)
  struct = JSON.parse(json_object, object_class: OpenStruct)
  parsed_json = JSON.parse(json_object)
  id = struct.id
  external_id = struct.external_id
  text = struct.text
  finish_reason = FINISH_REASON_ENUM.key(parsed_json["finish_reason"]) || parsed_json["finish_reason"]
  if parsed_json["logprobs"].nil?
    logprobs = nil
  else
    logprobs = parsed_json["logprobs"].to_json
    logprobs = NormalizedLogProbs.from_json(json_object: logprobs)
  end
  model_version_id = struct.model_version_id
  prompt_version_id = struct.prompt_version_id
  type = VELLUM_VARIABLE_TYPE.key(parsed_json["type"]) || parsed_json["type"]
  deployment_release_tag = struct.deployment_release_tag
  model_name = struct.model_name
  new(id: id, external_id: external_id, text: text, finish_reason: finish_reason, logprobs: logprobs,
      model_version_id: model_version_id, prompt_version_id: prompt_version_id, type: type, deployment_release_tag: deployment_release_tag, model_name: model_name, additional_properties: struct)
end

def self.validate_raw(obj:)

Returns:
  • (Void) -

Parameters:
  • obj (Object) --
def self.validate_raw(obj:)
  obj.id.is_a?(String) != false || raise("Passed value for field obj.id is not the expected type, validation failed.")
  obj.external_id&.is_a?(String) != false || raise("Passed value for field obj.external_id is not the expected type, validation failed.")
  obj.text.is_a?(String) != false || raise("Passed value for field obj.text is not the expected type, validation failed.")
  obj.finish_reason&.is_a?(FINISH_REASON_ENUM) != false || raise("Passed value for field obj.finish_reason is not the expected type, validation failed.")
  obj.logprobs.nil? || NormalizedLogProbs.validate_raw(obj: obj.logprobs)
  obj.model_version_id.is_a?(String) != false || raise("Passed value for field obj.model_version_id is not the expected type, validation failed.")
  obj.prompt_version_id.is_a?(String) != false || raise("Passed value for field obj.prompt_version_id is not the expected type, validation failed.")
  obj.type&.is_a?(VELLUM_VARIABLE_TYPE) != false || raise("Passed value for field obj.type is not the expected type, validation failed.")
  obj.deployment_release_tag.is_a?(String) != false || raise("Passed value for field obj.deployment_release_tag is not the expected type, validation failed.")
  obj.model_name.is_a?(String) != false || raise("Passed value for field obj.model_name is not the expected type, validation failed.")
end

def initialize(id:, text:, model_version_id:, prompt_version_id:, deployment_release_tag:, model_name:,

Returns:
  • (EnrichedNormalizedCompletion) -

Parameters:
  • additional_properties (OpenStruct) -- Additional properties unmapped to the current class definition
  • model_name (String) --
  • deployment_release_tag (String) --
  • type (VELLUM_VARIABLE_TYPE) --
  • prompt_version_id (String) --
  • model_version_id (String) -- The ID of the model version used to generate this completion.
  • logprobs (NormalizedLogProbs) -- The logprobs of the completion. Only present if specified in the original request options.
  • finish_reason (FINISH_REASON_ENUM) -- The reason the generation finished.
  • text (String) -- The text generated by the LLM.
  • external_id (String) -- The external ID that was originally provided along with the generation request, which uniquely identifies this generation in an external system.
  • id (String) -- The Vellum-generated ID of the completion.
def initialize(id:, text:, model_version_id:, prompt_version_id:, deployment_release_tag:, model_name:,
               external_id: nil, finish_reason: nil, logprobs: nil, type: nil, additional_properties: nil)
  # @type [String] The Vellum-generated ID of the completion.
  @id = id
  # @type [String] The external ID that was originally provided along with the generation request, which uniquely identifies this generation in an external system.
  @external_id = external_id
  # @type [String] The text generated by the LLM.
  @text = text
  # @type [FINISH_REASON_ENUM] The reason the generation finished.
  #   - `LENGTH` - LENGTH
  #   - `STOP` - STOP
  #   - `UNKNOWN` - UNKNOWN
  @finish_reason = finish_reason
  # @type [NormalizedLogProbs] The logprobs of the completion. Only present if specified in the original request options.
  @logprobs = logprobs
  # @type [String] The ID of the model version used to generate this completion.
  @model_version_id = model_version_id
  # @type [String]
  @prompt_version_id = prompt_version_id
  # @type [VELLUM_VARIABLE_TYPE]
  @type = type
  # @type [String]
  @deployment_release_tag = deployment_release_tag
  # @type [String]
  @model_name = model_name
  # @type [OpenStruct] Additional properties unmapped to the current class definition
  @additional_properties = additional_properties
end

def to_json(*_args)

Returns:
  • (JSON) -
def to_json(*_args)
  {
    "id": @id,
    "external_id": @external_id,
    "text": @text,
    "finish_reason": FINISH_REASON_ENUM[@finish_reason] || @finish_reason,
    "logprobs": @logprobs,
    "model_version_id": @model_version_id,
    "prompt_version_id": @prompt_version_id,
    "type": VELLUM_VARIABLE_TYPE[@type] || @type,
    "deployment_release_tag": @deployment_release_tag,
    "model_name": @model_name
  }.to_json
end