class BenchmarkDriver::Runner::Recorded
Run only once, for testing
def initialize(config:, output:, contexts:)
-
contexts
(BenchmarkDriver::Context
) -- -
output
(BenchmarkDriver::Output
) -- -
config
(BenchmarkDriver::Config::RunnerConfig
) --
def initialize(config:, output:, contexts:) @config = config @output = output @contexts = contexts end
def parse(job_warmup_context_result:, metrics:)
-
metrics
(BenchmarkDriver::Metrics::Type
) -- -
job_warmup_context_result
(Hash{ BenchmarkDriver::Job => Hash{ TrueClass,FalseClass => Hash{ BenchmarkDriver::Context => BenchmarkDriver::Result } } }
) --
def parse(job_warmup_context_result:, metrics:) job_warmup_context_result.map do |job, warmup_context_result| Job.new( name: job.name, warmup_results: warmup_context_result.fetch(true, {}), benchmark_results: warmup_context_result.fetch(false, {}), metrics: metrics, contexts: warmup_context_result.values.map(&:keys).flatten!.tap(&:uniq!), ) end end
def run(records)
-
record
(Array
) --
def run(records) records.each do |record| unless record.warmup_results.empty? # TODO: end end @output.with_benchmark do records.each do |record| @output.with_job(name: record.name) do record.benchmark_results.each do |context, result| @output.with_context( name: context.name, executable: context.executable, gems: context.gems, prelude: context.prelude, ) do @output.report( values: result.values, all_values: result.all_values, duration: result.duration, loop_count: result.loop_count, environment: result.environment, ) end end end end end end