class BenchmarkDriver::Runner::Ips
Show iteration per second.
def debug_output(name, text)
def debug_output(name, text) sep = '-' * 30 $stdout.puts "\n\n#{sep}[#{name} begin]#{sep}\n#{text}#{sep}[#{name} end]#{sep}\n\n" end
def execute(*args, exception: true)
def execute(*args, exception: true) $stderr.puts "$ #{args.shelljoin}" if @config.verbose >= 2 stdout = IO.popen(args, &:read) debug_output('Command output', stdout) if @config.verbose >= 2 if exception && !$?.success? raise "Failed to execute: #{args.shelljoin} (status: #{$?})" end $?.success? end
def initialize(config:, output:, contexts:)
-
contexts
(BenchmarkDriver::Context
) -- -
output
(BenchmarkDriver::Output
) -- -
config
(BenchmarkDriver::Config::RunnerConfig
) --
def initialize(config:, output:, contexts:) @config = config @output = output @contexts = contexts end
def metric
def metric METRIC end
def run(jobs)
-
jobs
(Array
) --
def run(jobs) if jobs.any? { |job| job.loop_count.nil? } @output.with_warmup do jobs = jobs.map do |job| next job if job.loop_count # skip warmup if loop_count is set @output.with_job(name: job.name) do context = job.runnable_contexts(@contexts).first duration, loop_count = run_warmup(job, context: context) value, duration = value_duration(duration: duration, loop_count: loop_count) @output.with_context(name: context.name, executable: context.executable, gems: context.gems, prelude: context.prelude) do @output.report(values: { metric => value }, duration: duration, loop_count: loop_count) end job.loop_count = (loop_count.to_f * @config.run_duration / duration).floor job end end end end @output.with_benchmark do jobs.each do |job| @output.with_job(name: job.name) do job.runnable_contexts(@contexts).each do |context| repeat_params = { config: @config, larger_better: true, rest_on_average: :average } result = BenchmarkDriver::Repeater.with_repeat(**repeat_params) do run_benchmark(job, context: context) end value, duration = result.value @output.with_context(name: context.name, executable: context.executable, gems: context.gems, prelude: context.prelude) do @output.report( values: { metric => value }, all_values: { metric => result.all_values }, duration: duration, loop_count: job.loop_count, ) end end end end end end
def run_benchmark(job, context:)
-
(BenchmarkDriver::Metrics)
-
Parameters:
-
context
(BenchmarkDriver::Context
) -- -
job
(BenchmarkDriver::Runner::Ips::Job
) -- - loop_count is not nil
def run_benchmark(job, context:) benchmark = BenchmarkScript.new( preludes: [context.prelude, job.prelude], script: job.script, teardown: job.teardown, loop_count: job.loop_count, ) duration = Tempfile.open(['benchmark_driver-', '.rb']) do |f| with_script(benchmark.render(result: f.path)) do |path| success = execute(*context.executable.command, path, exception: false) if success && ((value = Float(f.read)) > 0) value else BenchmarkDriver::Result::ERROR end end end value_duration( loop_count: job.loop_count, duration: duration, ) end
def run_warmup(job, context:)
-
context
(BenchmarkDriver::Context
) -- -
job
(BenchmarkDriver::Runner::Ips::Job
) -- - loop_count is nil
def run_warmup(job, context:) warmup = WarmupScript.new( preludes: [context.prelude, job.prelude], script: job.script, teardown: job.teardown, loop_count: job.loop_count, first_warmup_duration: @config.run_duration / 6.0, # default: 0.5 second_warmup_duration: @config.run_duration / 3.0, # default: 1.0 ) duration, loop_count = Tempfile.open(['benchmark_driver-', '.rb']) do |f| with_script(warmup.render(result: f.path)) do |path| execute(*context.executable.command, path) end eval(f.read) end [duration, loop_count] end
def value_duration(duration:, loop_count:)
def value_duration(duration:, loop_count:) if BenchmarkDriver::Result::ERROR.equal?(duration) [BenchmarkDriver::Result::ERROR, BenchmarkDriver::Result::ERROR] else [loop_count.to_f / duration, duration] end end
def with_script(script)
def with_script(script) debug_output('Script', script) if @config.verbose >= 2 Tempfile.open(['benchmark_driver-', '.rb']) do |f| f.puts script f.close return yield(f.path) end end