class Sidekiq::WorkSet


end
# run_at is an epoch Integer.
# [work.queue, work.run_at, work.payload]
# work is a ‘Sidekiq::Work` instance that has the following accessor methods.
# thread_id is a unique identifier per thread
# process_id is a unique identifier per Sidekiq process
works.each do |process_id, thread_id, work|
works.size => 2
works = Sidekiq::WorkSet.new
called 5 times, you’re going to have a bad time.
If you call #size => 5 and then expect #each to be
This is live data that can change every millisecond.
WARNING WARNING WARNING
It tracks the process and thread working on each job.
The WorkSet stores the work being done by this Sidekiq cluster.
#

def each(&block)

def each(&block)
  results = []
  procs = nil
  all_works = nil
  Sidekiq.redis do |conn|
    procs = conn.sscan("processes").to_a.sort
    all_works = conn.pipelined do |pipeline|
      procs.each do |key|
        pipeline.hgetall("#{key}:work")
      end
    end
  end
  procs.zip(all_works).each do |key, workers|
    workers.each_pair do |tid, json|
      results << [key, tid, Sidekiq::Work.new(key, tid, Sidekiq.load_json(json))] unless json.empty?
    end
  end
  results.sort_by { |(_, _, work)| work.run_at }.each(&block)
end

def find_work(jid)

Returns:
  • (Sidekiq::Work) - the work or nil

Parameters:
  • jid (String) -- the job identifier
def find_work(jid)
  each do |_process_id, _thread_id, work|
    job = work.job
    return work if job.jid == jid
  end
  nil
end

def size

which can easily get out of sync with crashy processes.
processes but the alternative is a global counter
Not very efficient if you have lots of Sidekiq

which happens every 5 seconds. It is NOT real-time.
Note that #size is only as accurate as Sidekiq's heartbeat,
def size
  Sidekiq.redis do |conn|
    procs = conn.sscan("processes").to_a
    if procs.empty?
      0
    else
      conn.pipelined { |pipeline|
        procs.each do |key|
          pipeline.hget(key, "busy")
        end
      }.sum(&:to_i)
    end
  end
end