class Benchmark::IPS::Job

Benchmark jobs.

def all_results_have_been_run?

def all_results_have_been_run?
  @full_report.entries.size == @list.size
end

def clear_held_results

def clear_held_results
  File.delete @held_path if File.exist?(@held_path)
end

def compare!

Run comparison utility.
def compare!
  @compare = true
end

def compare?

Returns:
  • (Boolean) - Need to compare?
def compare?
  @compare
end

def config opts

Options Hash: (**iterations)
  • :time (Integer) -- Warmup and calculation iterations.
  • :time (Integer) -- Calculation time.
  • :warmup (Integer) -- Warmup time.
def config opts
  @warmup = opts[:warmup] if opts[:warmup]
  @time = opts[:time] if opts[:time]
  @suite = opts[:suite] if opts[:suite]
  @iterations = opts[:iterations] if opts[:iterations]
  @stats = opts[:stats] if opts[:stats]
  @confidence = opts[:confidence] if opts[:confidence]
end

def create_report(label, measured_us, iter, samples, cycles)

Returns:
  • (Report::Entry) - Entry with data.

Parameters:
  • cycles (Integer) -- Number of Cycles.
  • samples (Array) -- Sampled iterations per second.
  • iter (Integer) -- Iterations.
  • measured_us (Integer) -- Measured time in microsecond.
  • label (String) -- Report item label.
def create_report(label, measured_us, iter, samples, cycles)
  @full_report.add_entry label, measured_us, iter, samples, cycles
end

def create_stats(samples)

def create_stats(samples)
  case @stats
    when :sd
      Stats::SD.new(samples)
    when :bootstrap
      Stats::Bootstrap.new(samples, @confidence)
    else
      raise "unknown stats #{@stats}"
  end
end

def cycles_per_100ms time_msec, iters

Returns:
  • (Integer) - Cycles per 100ms.

Parameters:
  • iters (Integer) -- Iterations.
  • time_msec (Float) -- Each iteration's time in ms.
def cycles_per_100ms time_msec, iters
  cycles = ((MICROSECONDS_PER_100MS / time_msec) * iters).to_i
  cycles <= 0 ? 1 : cycles
end

def generate_json

Generate json from +@full_report+.
def generate_json
  @full_report.generate_json @json_path if json?
end

def hold!(held_path)

Parameters:
  • held_path (String) -- File name to store hold file.
def hold!(held_path)
  @held_path = held_path
  @run_single = true
end

def hold?

Returns:
  • (Boolean) - Need to hold results between multiple Ruby invocations?
def hold?
  !!@held_path
end

def initialize opts={}

Options Hash: (**opts)
  • (false) (Boolean) -- :quiet Suppress the printing of information.
  • (nil) (Benchmark::Suite) -- :suite Specify Benchmark::Suite.
def initialize opts={}
  @suite = opts[:suite] || nil
  @stdout = opts[:quiet] ? nil : StdoutReport.new
  @list = []
  @compare = false
  @run_single = false
  @json_path = false
  @held_path = nil
  @held_results = nil
  @timing = Hash.new 1 # default to 1 in case warmup isn't run
  @full_report = Report.new
  # Default warmup and calculation time in seconds.
  @warmup = 2
  @time = 5
  @iterations = 1
  # Default statistical model
  @stats = :sd
  @confidence = 95
end

def item(label="", str=nil, &blk) # :yield:

Raises:
  • (ArgumentError) - Raises if str and blk are both absent.
  • (ArgumentError) - Raises if str and blk are both present.

Parameters:
  • blk (Proc) -- Code to be benchmarked.
  • str (String) -- Code to be benchmarked.
  • label (String) -- Label of benchmarked code.
def item(label="", str=nil, &blk) # :yield:
  if blk and str
    raise ArgumentError, "specify a block and a str, but not both"
  end
  action = str || blk
  raise ArgumentError, "no block or string" unless action
  @list.push Entry.new(label, action)
  self
end

def iterations_per_sec cycles, time_us

Returns:
  • (Float) - Iteration per second.

Parameters:
  • time_us (Integer) -- Time in microsecond.
  • cycles (Integer) -- Cycles.
def iterations_per_sec cycles, time_us
  MICROSECONDS_PER_SECOND * (cycles.to_f / time_us.to_f)
end

def json!(path="data.json")

Generate json to given path, defaults to "data.json".
def json!(path="data.json")
  @json_path = path
end

def json?

Returns:
  • (Boolean) - Need to generate json?
def json?
  !!@json_path
end

def load_held_results

def load_held_results
  return unless @held_path && !File.zero?(@held_path)
  require "json"
  @held_results = {}
  JSON.load(IO.read(@held_path)).each do |result|
    @held_results[result['item']] = result
    create_report(result['item'], result['measured_us'], result['iter'],
                  create_stats(result['samples']), result['cycles'])
  end
end

def run

def run
  if @warmup && @warmup != 0 then
    @stdout.start_warming if @stdout
    @iterations.times do
      run_warmup
    end
  end
  @stdout.start_running if @stdout
  @iterations.times do |n|
    run_benchmark
  end
  @stdout.footer if @stdout
end

def run_benchmark

Run calculation.
def run_benchmark
  @list.each do |item|
    next if run_single? && @held_results && @held_results.key?(item.label)
    @suite.running item.label, @time if @suite
    @stdout.running item.label, @time if @stdout
    Timing.clean_env
    iter = 0
    measurements_us = []
    # Running this number of cycles should take around 100ms.
    cycles = @timing[item]
    target = Timing.add_second Timing.now, @time
    while (before = Timing.now) < target
      item.call_times cycles
      after = Timing.now
      # If for some reason the timing said this took no time (O_o)
      # then ignore the iteration entirely and start another.
      iter_us = Timing.time_us before, after
      next if iter_us <= 0.0
      iter += cycles
      measurements_us << iter_us
    end
    final_time = before
    measured_us = measurements_us.inject(:+)
    samples = measurements_us.map { |time_us|
      iterations_per_sec cycles, time_us
    }
    rep = create_report(item.label, measured_us, iter, create_stats(samples), cycles)
    if (final_time - target).abs >= (@time.to_f * MAX_TIME_SKEW)
      rep.show_total_time!
    end
    @stdout.add_report rep, caller(1).first if @stdout
    @suite.add_report rep, caller(1).first if @suite
    break if run_single?
  end
end

def run_comparison

Run comparison of entries in +@full_report+.
def run_comparison
  @full_report.run_comparison if compare?
end

def run_single?

Returns:
  • (Boolean) - Run just a single item?
def run_single?
  @run_single
end

def run_warmup

Run warmup.
def run_warmup
  @list.each do |item|
    next if run_single? && @held_results && @held_results.key?(item.label)
    @suite.warming item.label, @warmup if @suite
    @stdout.warming item.label, @warmup if @stdout
    Timing.clean_env
    # Run for up to half of the configured warmup time with an increasing
    # number of cycles to reduce overhead and improve accuracy.
    # This also avoids running with a constant number of cycles, which a
    # JIT might speculate on and then have to recompile in #run_benchmark.
    before = Timing.now
    target = Timing.add_second before, @warmup / 2.0
    cycles = 1
    warmup_iter = 1
    warmup_time_us = 0.0
    while Timing.now + warmup_time_us * 2 < target
      t0 = Timing.now
      item.call_times cycles
      t1 = Timing.now
      warmup_iter = cycles
      warmup_time_us = Timing.time_us(t0, t1)
      cycles *= 2
    end
    cycles = cycles_per_100ms warmup_time_us, warmup_iter
    @timing[item] = cycles
    # Run for the remaining of warmup in a similar way as #run_benchmark.
    target = Timing.add_second before, @warmup
    while Timing.now + MICROSECONDS_PER_100MS < target
      item.call_times cycles
    end
    @stdout.warmup_stats warmup_time_us, @timing[item] if @stdout
    @suite.warmup_stats warmup_time_us, @timing[item] if @suite
    break if run_single?
  end
end

def save!(held_path)

Parameters:
  • held_path (String) -- File name to store hold file.
def save!(held_path)
  @held_path = held_path
  @run_single = false
end

def save_held_results

def save_held_results
  return unless @held_path
  require "json"
  data = full_report.entries.map { |e|
    {
      'item' => e.label,
      'measured_us' => e.microseconds,
      'iter' => e.iterations,
      'samples' => e.samples,
      'cycles' => e.measurement_cycle
    }
  }
  IO.write(@held_path, JSON.generate(data) << "\n")
end

def time_us before, after

Returns:
  • (Float) - Time difference of before and after.

Parameters:
  • after (Time) -- time.
  • before (Time) -- time.
def time_us before, after
  (after.to_f - before.to_f) * MICROSECONDS_PER_SECOND
end