class BenchmarkDriver::Runner::RubyStdout
def alternated_run(jobs)
def alternated_run(jobs) metric = jobs.first.metrics.first @output.with_benchmark do jobs.each do |job| @output.with_job(name: job.name) do # Running benchmarks in an alternated manner is NOT compatible with two things: # * Output plugins. They expect RubyA, RubyA, RubyB, RubyB, ... # * BenchmarkDriver::Repeater. It should be used for results of the same condition. # # Therefore, we run all benchmarks with executables alternated first here, and then # aggregate the results as if the same executable were repeated in a row. context_results = Hash.new do |hash, context| hash[context] = [] end jobs.each do |job| @config.repeat_count.times do @contexts.each do |context| context_results[context] << run_job(job, exec: context.executable) end end end # Aggregate reslts by BenchmarkDriver::Repeater and pass them to output. @contexts.each do |context| repeat_params = { config: @config, larger_better: metric.larger_better } result = BenchmarkDriver::Repeater.with_repeat(**repeat_params) do context_results[context].shift end value, environment = result.value exec = context.executable @output.with_context(name: exec.name, executable: exec) do @output.report( values: { metric => value }, all_values: { metric => result.all_values }, environment: environment, ) end end end end end end