class BenchmarkDriver::Runner::RubyStdout
Use stdout of ruby command
def alternated_run(jobs)
def alternated_run(jobs) metric = jobs.first.metrics.first @output.with_benchmark do jobs.each do |job| @output.with_job(name: job.name) do # Running benchmarks in an alternated manner is NOT compatible with two things: # * Output plugins. They expect RubyA, RubyA, RubyB, RubyB, ... # * BenchmarkDriver::Repeater. It should be used for results of the same condition. # # Therefore, we run all benchmarks with executables alternated first here, and then # aggregate the results as if the same executable were repeated in a row. context_results = Hash.new do |hash, context| hash[context] = [] end jobs.each do |job| @config.repeat_count.times do @contexts.each do |context| context_results[context] << run_job(job, exec: context.executable) end end end # Aggregate reslts by BenchmarkDriver::Repeater and pass them to output. @contexts.each do |context| repeat_params = { config: @config, larger_better: metric.larger_better } result = BenchmarkDriver::Repeater.with_repeat(**repeat_params) do context_results[context].shift end value, environment = result.value exec = context.executable @output.with_context(name: exec.name, executable: exec) do @output.report( values: { metric => value }, all_values: { metric => result.all_values }, environment: environment, ) end end end end end end
def execute(*args)
def execute(*args) stdout, stderr, status = Open3.capture3(*args) unless status.success? raise CommandFailure.new("Failed to execute: #{args.shelljoin} (status: #{$?.exitstatus}):\n\n[stdout]:\n#{stdout}\n[stderr]:\n#{stderr}") end stdout end
def incremental_run(jobs)
def incremental_run(jobs) metric = jobs.first.metrics.first @output.with_benchmark do jobs.each do |job| @output.with_job(name: job.name) do @contexts.each do |context| exec = context.executable repeat_params = { config: @config, larger_better: metric.larger_better } result = BenchmarkDriver::Repeater.with_repeat(**repeat_params) do run_job(job, exec: exec) end value, environment = result.value @output.with_context(name: exec.name, executable: exec) do @output.report( values: { metric => value }, all_values: { metric => result.all_values }, environment: environment, ) end end end end end end
def initialize(config:, output:, contexts:)
-
contexts
(BenchmarkDriver::Context
) -- -
output
(BenchmarkDriver::Output
) -- -
config
(BenchmarkDriver::Config::RunnerConfig
) --
def initialize(config:, output:, contexts:) @config = config @output = output @contexts = contexts end
def parse(name:, command:, working_directory: nil, metrics:, environment: {})
-
stdout_to_metrics
(String
) -- -
metrics_type
(Hash
) -- -
working_directory
(String, NilClass
) -- -
command
(String
) -- -
name
(String
) --
def parse(name:, command:, working_directory: nil, metrics:, environment: {}) unless metrics.is_a?(Hash) raise ArgumentError.new("metrics must be Hash, but got #{metrics.class}") end if metrics.size == 0 raise ArgumentError.new('At least one metric must be specified"') elsif metrics.size != 1 raise NotImplementedError.new('Having multiple metrics is not supported yet') end metric_name, metric_params = metrics.first metric, value_from_stdout = parse_metric(metric_name, **metric_params) environment_from_stdout = Hash[environment.map { |k, v| [k, parse_environment(**v)] }] Job.new( name: name, command: command.shellsplit, working_directory: working_directory, metrics: [metric], value_from_stdout: value_from_stdout, environment_from_stdout: environment_from_stdout, ) end
def parse_environment(from_stdout:)
def parse_environment(from_stdout:) from_stdout end
def parse_metric(name, unit:, from_stdout:, larger_better: true, worse_word: 'slower')
def parse_metric(name, unit:, from_stdout:, larger_better: true, worse_word: 'slower') metric = BenchmarkDriver::Metric.new( name: name, unit: unit, larger_better: larger_better, worse_word: worse_word, ) [metric, from_stdout] end
def run(jobs)
-
jobs
(Array
) --
def run(jobs) if @config.alternate alternated_run(jobs) else incremental_run(jobs) end end
def run_job(job, exec:)
def run_job(job, exec:) stdout = with_chdir(job.working_directory) do with_ruby_prefix(exec) { execute(*exec.command, *job.command) } end script = StdoutToMetrics.new( stdout: stdout, value_from_stdout: job.value_from_stdout, environment_from_stdout: job.environment_from_stdout, ) [script.value, script.environment] rescue CommandFailure => e $stderr.puts("\n```\n#{e.message}```\n") [BenchmarkDriver::Result::ERROR, {}] end
def with_chdir(working_directory, &block)
def with_chdir(working_directory, &block) if working_directory Dir.chdir(working_directory) { block.call } else block.call end end
def with_ruby_prefix(executable, &block)
def with_ruby_prefix(executable, &block) env = ENV.to_h.dup ENV['PATH'] = "#{File.dirname(executable.command.first)}:#{ENV['PATH']}" block.call ensure ENV.replace(env) end