class Benchmark::IPS::Job
Benchmark jobs.
def all_results_have_been_run?
def all_results_have_been_run? @full_report.entries.size == @list.size end
def clear_held_results
def clear_held_results File.delete @held_path if File.exist?(@held_path) end
def compare!
def compare! @compare = true end
def compare?
-
(Boolean)
- Need to compare?
def compare? @compare end
def config opts
(**iterations)
-
:time
(Integer
) -- Warmup and calculation iterations. -
:time
(Integer
) -- Calculation time. -
:warmup
(Integer
) -- Warmup time.
def config opts @warmup = opts[:warmup] if opts[:warmup] @time = opts[:time] if opts[:time] @suite = opts[:suite] if opts[:suite] @iterations = opts[:iterations] if opts[:iterations] @stats = opts[:stats] if opts[:stats] @confidence = opts[:confidence] if opts[:confidence] self.quiet = opts[:quiet] self.suite = opts[:suite] end
def create_report(label, measured_us, iter, samples, cycles)
-
(Report::Entry)
- Entry with data.
Parameters:
-
cycles
(Integer
) -- Number of Cycles. -
samples
(Array
) -- Sampled iterations per second. -
iter
(Integer
) -- Iterations. -
measured_us
(Integer
) -- Measured time in microsecond. -
label
(String
) -- Report item label.
def create_report(label, measured_us, iter, samples, cycles) @full_report.add_entry label, measured_us, iter, samples, cycles end
def create_stats(samples)
def create_stats(samples) case @stats when :sd Stats::SD.new(samples) when :bootstrap Stats::Bootstrap.new(samples, @confidence) else raise "unknown stats #{@stats}" end end
def cycles_per_100ms time_msec, iters
-
(Integer)
- Cycles per 100ms.
Parameters:
-
iters
(Integer
) -- Iterations. -
time_msec
(Float
) -- Each iteration's time in ms.
def cycles_per_100ms time_msec, iters cycles = ((MICROSECONDS_PER_100MS / time_msec) * iters).to_i cycles <= 0 ? 1 : cycles end
def generate_json
def generate_json @full_report.generate_json @json_path if json? end
def hold!(held_path)
-
held_path
(String
) -- File name to store hold file.
def hold!(held_path) @held_path = held_path @run_single = true end
def hold?
-
(Boolean)
- Need to hold results between multiple Ruby invocations?
def hold? !!@held_path end
def initialize opts={}
def initialize opts={} @list = [] @run_single = false @json_path = false @compare = false @held_path = nil @held_results = nil @timing = Hash.new 1 # default to 1 in case warmup isn't run @full_report = Report.new # Default warmup and calculation time in seconds. @warmup = 2 @time = 5 @iterations = 1 # Default statistical model @stats = :sd @confidence = 95 end
def item(label="", str=nil, &blk) # :yield:
-
(ArgumentError)
- Raises if str and blk are both absent. -
(ArgumentError)
- Raises if str and blk are both present.
Parameters:
-
blk
(Proc
) -- Code to be benchmarked. -
str
(String
) -- Code to be benchmarked. -
label
(String
) -- Label of benchmarked code.
def item(label="", str=nil, &blk) # :yield: if blk and str raise ArgumentError, "specify a block and a str, but not both" end action = str || blk raise ArgumentError, "no block or string" unless action @list.push Entry.new(label, action) self end
def iterations_per_sec cycles, time_us
-
(Float)
- Iteration per second.
Parameters:
-
time_us
(Integer
) -- Time in microsecond. -
cycles
(Integer
) -- Cycles.
def iterations_per_sec cycles, time_us MICROSECONDS_PER_SECOND * (cycles.to_f / time_us.to_f) end
def json!(path="data.json")
def json!(path="data.json") @json_path = path end
def json?
-
(Boolean)
- Need to generate json?
def json? !!@json_path end
def load_held_results
def load_held_results return unless @held_path && File.exist?(@held_path) && !File.zero?(@held_path) require "json" @held_results = {} JSON.load(IO.read(@held_path)).each do |result| @held_results[result['item']] = result create_report(result['item'], result['measured_us'], result['iter'], create_stats(result['samples']), result['cycles']) end end
def quiet=(val)
def quiet=(val) @stdout = reporter(quiet: val) end
def reporter(quiet:)
def reporter(quiet:) quiet ? NoopReport.new : StdoutReport.new end
def run
def run if @warmup && @warmup != 0 then @stdout.start_warming @iterations.times do run_warmup end end @stdout.start_running @iterations.times do |n| run_benchmark end @stdout.footer end
def run_benchmark
def run_benchmark @list.each do |item| next if run_single? && @held_results && @held_results.key?(item.label) @suite.running item.label, @time @stdout.running item.label, @time Timing.clean_env iter = 0 measurements_us = [] # Running this number of cycles should take around 100ms. cycles = @timing[item] target = Timing.add_second Timing.now, @time while (before = Timing.now) < target item.call_times cycles after = Timing.now # If for some reason the timing said this took no time (O_o) # then ignore the iteration entirely and start another. iter_us = Timing.time_us before, after next if iter_us <= 0.0 iter += cycles measurements_us << iter_us end final_time = before measured_us = measurements_us.inject(:+) samples = measurements_us.map { |time_us| iterations_per_sec cycles, time_us } rep = create_report(item.label, measured_us, iter, create_stats(samples), cycles) if (final_time - target).abs >= (@time.to_f * MAX_TIME_SKEW) rep.show_total_time! end @stdout.add_report rep, caller(1).first @suite.add_report rep, caller(1).first break if run_single? end end
def run_comparison
def run_comparison @full_report.run_comparison if compare? end
def run_single?
-
(Boolean)
- Run just a single item?
def run_single? @run_single end
def run_warmup
def run_warmup @list.each do |item| next if run_single? && @held_results && @held_results.key?(item.label) @suite.warming item.label, @warmup @stdout.warming item.label, @warmup Timing.clean_env # Run for up to half of the configured warmup time with an increasing # number of cycles to reduce overhead and improve accuracy. # This also avoids running with a constant number of cycles, which a # JIT might speculate on and then have to recompile in #run_benchmark. before = Timing.now target = Timing.add_second before, @warmup / 2.0 cycles = 1 warmup_iter = 1 warmup_time_us = 0.0 while Timing.now + warmup_time_us * 2 < target t0 = Timing.now item.call_times cycles t1 = Timing.now warmup_iter = cycles warmup_time_us = Timing.time_us(t0, t1) # If the number of cycles would go outside the 32-bit signed integers range # then exit the loop to avoid overflows and start the 100ms warmup runs break if cycles >= POW_2_30 cycles *= 2 end cycles = cycles_per_100ms warmup_time_us, warmup_iter @timing[item] = cycles # Run for the remaining of warmup in a similar way as #run_benchmark. target = Timing.add_second before, @warmup while Timing.now + MICROSECONDS_PER_100MS < target item.call_times cycles end @stdout.warmup_stats warmup_time_us, @timing[item] @suite.warmup_stats warmup_time_us, @timing[item] break if run_single? end end
def save!(held_path)
-
held_path
(String
) -- File name to store hold file.
def save!(held_path) @held_path = held_path @run_single = false end
def save_held_results
def save_held_results return unless @held_path require "json" data = full_report.entries.map { |e| { 'item' => e.label, 'measured_us' => e.microseconds, 'iter' => e.iterations, 'samples' => e.samples, 'cycles' => e.measurement_cycle } } IO.write(@held_path, JSON.generate(data) << "\n") end
def suite=(suite)
def suite=(suite) @suite = suite || Benchmark::IPS::NoopSuite.new end
def time_us before, after
-
(Float)
- Time difference of before and after.
Parameters:
-
after
(Time
) -- time. -
before
(Time
) -- time.
def time_us before, after (after.to_f - before.to_f) * MICROSECONDS_PER_SECOND end