class RuboCop::AST::ProcessedSource

It also provides a convenient way to access source lines.
and other information such as disabled lines for cops.
ProcessedSource contains objects which are generated by Parser

def self.from_file(path, ruby_version)

def self.from_file(path, ruby_version)
  file = File.read(path, mode: 'rb')
  new(file, ruby_version, path)
end

def [](*args)

def [](*args)
  lines[*args]
end

def ast_with_comments

def ast_with_comments
  return if !ast || !comments
  @ast_with_comments ||= Parser::Source::Comment.associate(ast, comments)
end

def blank?

def blank?
  ast.nil?
end

def checksum

Raw source checksum for tracking infinite loops.
def checksum
  Digest::SHA1.hexdigest(@raw_source)
end

def comment_at_line(line)

Returns:
  • (Comment, nil) - the comment at that line, if any.
def comment_at_line(line)
  comment_index[line]
end

def comment_index

def comment_index
  @comment_index ||= {}.tap do |hash|
    comments.each { |c| hash[c.location.line] = c }
  end
end

def comments_before_line(line)

Deprecated:
  • Use `each_comment_in_lines`
def comments_before_line(line)
  each_comment_in_lines(0..line).to_a
end

def contains_comment?(source_range)

Returns:
  • (Boolean) - if any of the lines in the given `source_range` has a comment.
def contains_comment?(source_range)
  each_comment_in_lines(source_range.line..source_range.last_line).any?
end

def create_parser(ruby_version)

def create_parser(ruby_version)
  builder = RuboCop::AST::Builder.new
  parser_class(ruby_version).new(builder).tap do |parser|
    # On JRuby there's a risk that we hang in tokenize() if we
    # don't set the all errors as fatal flag. The problem is caused by a bug
    # in Racc that is discussed in issue #93 of the whitequark/parser
    # project on GitHub.
    parser.diagnostics.all_errors_are_fatal = (RUBY_ENGINE != 'ruby')
    parser.diagnostics.ignore_warnings = false
    parser.diagnostics.consumer = lambda do |diagnostic|
      @diagnostics << diagnostic
    end
  end
end

def current_line(token)

def current_line(token)
  lines[token.line - 1]
end

def each_comment(&block)

Deprecated:
  • Use `comments.each`
def each_comment(&block)
  comments.each(&block)
end

def each_comment_in_lines(line_range)

Enumerates on the comments contained with the given `line_range`
def each_comment_in_lines(line_range)
  return to_enum(:each_comment_in_lines, line_range) unless block_given?
  line_range.each do |line|
    if (comment = comment_index[line])
      yield comment
    end
  end
end

def each_token(&block)

Deprecated:
  • Use `tokens.each`
def each_token(&block)
  tokens.each(&block)
end

def file_path

def file_path
  buffer.name
end

def find_comment(&block)

Deprecated:
  • Use `comment_at_line`, `each_comment_in_lines`, or `comments.find`
def find_comment(&block)
  comments.find(&block)
end

def find_token(&block)

Deprecated:
  • Use `tokens.find`
def find_token(&block)
  tokens.find(&block)
end

def first_token_index(range_or_node)

def first_token_index(range_or_node)
  begin_pos = source_range(range_or_node).begin_pos
  sorted_tokens.bsearch_index { |token| token.begin_pos >= begin_pos }
end

def first_token_of(range_or_node)

def first_token_of(range_or_node)
  sorted_tokens[first_token_index(range_or_node)]
end

def following_line(token)

def following_line(token)
  lines[token.line]
end

def initialize(source, ruby_version, path = nil)

def initialize(source, ruby_version, path = nil)
  # Defaults source encoding to UTF-8, regardless of the encoding it has
  # been read with, which could be non-utf8 depending on the default
  # external encoding.
  source.force_encoding(Encoding::UTF_8) unless source.encoding == Encoding::UTF_8
  @raw_source = source
  @path = path
  @diagnostics = []
  @ruby_version = ruby_version
  @parser_error = nil
  parse(source, ruby_version)
end

def last_token_index(range_or_node)

def last_token_index(range_or_node)
  end_pos = source_range(range_or_node).end_pos
  sorted_tokens.bsearch_index { |token| token.end_pos >= end_pos }
end

def last_token_of(range_or_node)

def last_token_of(range_or_node)
  sorted_tokens[last_token_index(range_or_node)]
end

def line_indentation(line_number)

def line_indentation(line_number)
  lines[line_number - 1]
    .match(/^(\s*)/)[1]
    .to_s
    .length
end

def line_with_comment?(line)

Returns:
  • (Boolean) - if the given line number has a comment.
def line_with_comment?(line)
  comment_index.include?(line)
end

def lines

possible __END__ and everything that comes after.
Returns the source lines, line break characters removed, excluding a
def lines
  @lines ||= begin
    all_lines = @buffer.source_lines
    last_token_line = tokens.any? ? tokens.last.line : all_lines.size
    result = []
    all_lines.each_with_index do |line, ix|
      break if ix >= last_token_line && line == '__END__'
      result << line
    end
    result
  end
end

def parse(source, ruby_version)

def parse(source, ruby_version)
  buffer_name = @path || STRING_SOURCE_NAME
  @buffer = Parser::Source::Buffer.new(buffer_name, 1)
  begin
    @buffer.source = source
  rescue EncodingError => e
    @parser_error = e
    @ast = nil
    @comments = []
    @tokens = []
    return
  end
  @ast, @comments, @tokens = tokenize(create_parser(ruby_version))
end

def parser_class(ruby_version)

rubocop:disable Metrics/MethodLength
def parser_class(ruby_version)
  case ruby_version
  when 2.4
    require 'parser/ruby24'
    Parser::Ruby24
  when 2.5
    require 'parser/ruby25'
    Parser::Ruby25
  when 2.6
    require 'parser/ruby26'
    Parser::Ruby26
  when 2.7
    require 'parser/ruby27'
    Parser::Ruby27
  when 2.8, 3.0
    require 'parser/ruby30'
    Parser::Ruby30
  else
    raise ArgumentError,
          "RuboCop found unknown Ruby version: #{ruby_version.inspect}"
  end
end

def preceding_line(token)

def preceding_line(token)
  lines[token.line - 2]
end

def sorted_tokens

heredoc contents' tokens.
is passed as a method argument. In this case tokens are interleaved by
The tokens list is always sorted by token position, except for cases when heredoc
def sorted_tokens
  # Use stable sort.
  @sorted_tokens ||= tokens.sort_by.with_index { |token, i| [token.begin_pos, i] }
end

def source_range(range_or_node)

def source_range(range_or_node)
  if range_or_node.respond_to?(:source_range)
    range_or_node.source_range
  else
    range_or_node
  end
end

def start_with?(string)

def start_with?(string)
  return false if self[0].nil?
  self[0].start_with?(string)
end

def tokenize(parser)

def tokenize(parser)
  begin
    ast, comments, tokens = parser.tokenize(@buffer)
    ast ||= nil # force `false` to `nil`, see https://github.com/whitequark/parser/pull/722
  rescue Parser::SyntaxError
    # All errors are in diagnostics. No need to handle exception.
    comments = []
    tokens = []
  end
  ast&.complete!
  tokens.map! { |t| Token.from_parser_token(t) }
  [ast, comments, tokens]
end

def tokens_within(range_or_node)

def tokens_within(range_or_node)
  begin_index = first_token_index(range_or_node)
  end_index = last_token_index(range_or_node)
  sorted_tokens[begin_index..end_index]
end

def valid_syntax?

def valid_syntax?
  return false if @parser_error
  @diagnostics.none? { |d| INVALID_LEVELS.include?(d.level) }
end