class Berkshelf::Installer

def build_universe

def build_universe
  berksfile.sources.collect do |source|
    Thread.new do
      Berkshelf.formatter.msg("Fetching cookbook index from #{source}...")
      source.build_universe
    rescue Berkshelf::APIClientError => ex
      Berkshelf.formatter.warn "Error retrieving universe from source: #{source}"
      Berkshelf.formatter.warn "  * [#{ex.class}] #{ex}"
    end
  end.map(&:join)
end

def download_locations(dependencies)

def download_locations(dependencies)
  dependencies.select(&:location).each do |dependency|
    unless dependency.location.installed?
      Berkshelf.formatter.fetch(dependency)
      dependency.location.install
    end
  end
end

def initialize(berksfile)

Parameters:
  • berksfile (Berkshelf::Berksfile) --
def initialize(berksfile)
  @berksfile  = berksfile
  @lockfile   = berksfile.lockfile
  @pool       = Concurrent::FixedThreadPool.new([Concurrent.processor_count - 1, 2].max)
  @worker     = Worker.new(berksfile)
end

def install_from_lockfile

Returns:
  • (Array Array>) -
def install_from_lockfile
  Berkshelf.log.info "Installing from lockfile"
  dependencies = lockfile.graph.locks.values
  Berkshelf.log.debug "  Dependencies"
  dependencies.map do |dependency|
    Berkshelf.log.debug "    #{dependency}"
  end
  download_locations(dependencies)
  # Only construct the universe if we are going to install things
  unless dependencies.all?(&:installed?)
    Berkshelf.log.debug "  Not all dependencies are installed"
    build_universe
  end
  futures = dependencies.sort.map { |dependency| Concurrent::Future.execute(executor: pool) { worker.install(dependency) } }
  cookbooks = futures.map(&:value)
  rejects = futures.select(&:rejected?)
  raise rejects.first.reason unless rejects.empty?
  [dependencies, cookbooks]
end

def install_from_universe

Returns:
  • (Array Array>) -
def install_from_universe
  Berkshelf.log.info "Installing from universe"
  dependencies = lockfile.graph.locks.values + berksfile.dependencies
  dependencies = dependencies.inject({}) do |hash, dependency|
    # Fancy way of ensuring no duplicate dependencies are used...
    hash[dependency.name] ||= dependency
    hash
  end.values
  download_locations(dependencies)
  Berkshelf.log.debug "  Creating a resolver"
  resolver = Resolver.new(berksfile, dependencies)
  # Unlike when installing from the lockfile, we _always_ need to build
  # the universe when installing from the universe... duh
  build_universe
  # Add any explicit dependencies for already-downloaded cookbooks (like
  # path locations)
  dependencies.each do |dependency|
    if dependency.location
      cookbook = dependency.cached_cookbook
      Berkshelf.log.debug "  Adding explicit dependency on #{cookbook}"
      resolver.add_explicit_dependencies(cookbook)
    end
  end
  Berkshelf.log.debug "  Starting resolution..."
  futures = resolver.resolve.sort.map { |dependency| Concurrent::Future.execute(executor: pool) { worker.install(dependency) } }
  cookbooks = futures.map(&:value)
  rejects = futures.select(&:rejected?)
  raise rejects.first.reason unless rejects.empty?
  [dependencies, cookbooks]
end

def run

Returns:
  • (Array) -
def run
  lockfile.reduce!
  Berkshelf.formatter.msg("Resolving cookbook dependencies...")
  dependencies, cookbooks =
    if lockfile.trusted?
      install_from_lockfile
    else
      install_from_universe
    end
  Berkshelf.log.debug "  Finished resolving, calculating locks"
  to_lock = dependencies.select do |dependency|
    berksfile.has_dependency?(dependency)
  end
  Berkshelf.log.debug "  New locks"
  to_lock.each do |lock|
    Berkshelf.log.debug "    #{lock}"
  end
  lockfile.graph.update(cookbooks)
  lockfile.update(to_lock)
  lockfile.save
  cookbooks
end