class Sidekiq::JobRetry
def process_retry(jobinst, msg, queue, exception)
instantiate the job instance. All access must be guarded and
Note that +jobinst+ can be nil here if an error is raised before we can
def process_retry(jobinst, msg, queue, exception) max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries) msg["queue"] = (msg["retry_queue"] || queue) m = exception_message(exception) if m.respond_to?(:scrub!) m.force_encoding("utf-8") m.scrub! end msg["error_message"] = m msg["error_class"] = exception.class.name count = if msg["retry_count"] msg["retried_at"] = Time.now.to_f msg["retry_count"] += 1 else msg["failed_at"] = Time.now.to_f msg["retry_count"] = 0 end if msg["backtrace"] backtrace = @backtrace_cleaner.call(exception.backtrace) lines = if msg["backtrace"] == true backtrace else backtrace[0...msg["backtrace"].to_i] end msg["error_backtrace"] = compress_backtrace(lines) end # Goodbye dear message, you (re)tried your best I'm sure. return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts strategy, delay = delay_for(jobinst, count, exception, msg) case strategy when :discard return # poof! when :kill return retries_exhausted(jobinst, msg, exception) end # Logging here can break retries if the logging device raises ENOSPC #3979 # logger.debug { "Failure! Retry #{count} in #{delay} seconds" } jitter = rand(10) * (count + 1) retry_at = Time.now.to_f + delay + jitter payload = Sidekiq.dump_json(msg) redis do |conn| conn.zadd("retry", retry_at.to_s, payload) end end