class Vellum::PromptDeploymentExpandMetaRequestRequest

def self.from_json(json_object:)

Returns:
  • (PromptDeploymentExpandMetaRequestRequest) -

Parameters:
  • json_object (JSON) --
def self.from_json(json_object:)
  struct = JSON.parse(json_object, object_class: OpenStruct)
  JSON.parse(json_object)
  model_name = struct.model_name
  latency = struct.latency
  deployment_release_tag = struct.deployment_release_tag
  prompt_version_id = struct.prompt_version_id
  finish_reason = struct.finish_reason
  new(model_name: model_name, latency: latency, deployment_release_tag: deployment_release_tag,
      prompt_version_id: prompt_version_id, finish_reason: finish_reason, additional_properties: struct)
end

def self.validate_raw(obj:)

Returns:
  • (Void) -

Parameters:
  • obj (Object) --
def self.validate_raw(obj:)
  obj.model_name&.is_a?(Boolean) != false || raise("Passed value for field obj.model_name is not the expected type, validation failed.")
  obj.latency&.is_a?(Boolean) != false || raise("Passed value for field obj.latency is not the expected type, validation failed.")
  obj.deployment_release_tag&.is_a?(Boolean) != false || raise("Passed value for field obj.deployment_release_tag is not the expected type, validation failed.")
  obj.prompt_version_id&.is_a?(Boolean) != false || raise("Passed value for field obj.prompt_version_id is not the expected type, validation failed.")
  obj.finish_reason&.is_a?(Boolean) != false || raise("Passed value for field obj.finish_reason is not the expected type, validation failed.")
end

def initialize(model_name: nil, latency: nil, deployment_release_tag: nil, prompt_version_id: nil,

Returns:
  • (PromptDeploymentExpandMetaRequestRequest) -

Parameters:
  • additional_properties (OpenStruct) -- Additional properties unmapped to the current class definition
  • finish_reason (Boolean) -- If enabled, the response will include the reason provided by the model for why the execution finished.
  • prompt_version_id (Boolean) -- If enabled, the response will include the ID of the Prompt Version backing the deployment.
  • deployment_release_tag (Boolean) -- If enabled, the response will include the release tag of the Prompt Deployment.
  • latency (Boolean) -- If enabled, the response will include the time in nanoseconds it took to execute the Prompt Deployment.
  • model_name (Boolean) -- If enabled, the response will include the model identifier representing the ML Model invoked by the Prompt Deployment.
def initialize(model_name: nil, latency: nil, deployment_release_tag: nil, prompt_version_id: nil,
               finish_reason: nil, additional_properties: nil)
  # @type [Boolean] If enabled, the response will include the model identifier representing the ML Model invoked by the Prompt Deployment.
  @model_name = model_name
  # @type [Boolean] If enabled, the response will include the time in nanoseconds it took to execute the Prompt Deployment.
  @latency = latency
  # @type [Boolean] If enabled, the response will include the release tag of the Prompt Deployment.
  @deployment_release_tag = deployment_release_tag
  # @type [Boolean] If enabled, the response will include the ID of the Prompt Version backing the deployment.
  @prompt_version_id = prompt_version_id
  # @type [Boolean] If enabled, the response will include the reason provided by the model for why the execution finished.
  @finish_reason = finish_reason
  # @type [OpenStruct] Additional properties unmapped to the current class definition
  @additional_properties = additional_properties
end

def to_json(*_args)

Returns:
  • (JSON) -
def to_json(*_args)
  {
    "model_name": @model_name,
    "latency": @latency,
    "deployment_release_tag": @deployment_release_tag,
    "prompt_version_id": @prompt_version_id,
    "finish_reason": @finish_reason
  }.to_json
end