class RuboCop::Cop::Style::LineEndConcatenation


‘bala’
some_str = ‘ala’ <br># good
’bala’
some_str = ‘ala’ <<
‘bala’
some_str = ‘ala’ +
# bad
@example
the end of a line.
This cop checks for string literal concatenation at

def self.autocorrect_incompatible_with

def self.autocorrect_incompatible_with
  [Style::UnneededInterpolation]
end

def autocorrect(operator_range)

def autocorrect(operator_range)
  # Include any trailing whitespace so we don't create a syntax error.
  operator_range = range_with_surrounding_space(range: operator_range,
                                                side: :right,
                                                newlines: false)
  one_more_char = operator_range.resize(operator_range.size + 1)
  # Don't create a double backslash at the end of the line, in case
  # there already was a backslash after the concatenation operator.
  operator_range = one_more_char if one_more_char.source.end_with?('\\')
  ->(corrector) { corrector.replace(operator_range, '\\') }
end

def check_token_set(index)

def check_token_set(index)
  predecessor, operator, successor = processed_source.tokens[index, 3]
  return unless eligible_successor?(successor) &&
                eligible_operator?(operator) &&
                eligible_predecessor?(predecessor)
  return if operator.line == successor.line
  next_successor = token_after_last_string(successor, index)
  return unless eligible_next_successor?(next_successor)
  add_offense(operator.pos, location: operator.pos)
end

def eligible_next_successor?(next_successor)

def eligible_next_successor?(next_successor)
  !(next_successor &&
    HIGH_PRECEDENCE_OP_TOKEN_TYPES.include?(next_successor.type))
end

def eligible_operator?(operator)

def eligible_operator?(operator)
  CONCAT_TOKEN_TYPES.include?(operator.type)
end

def eligible_predecessor?(predecessor)

def eligible_predecessor?(predecessor)
  standard_string_literal?(predecessor)
end

def eligible_successor?(successor)

def eligible_successor?(successor)
  successor && standard_string_literal?(successor)
end

def investigate(processed_source)

def investigate(processed_source)
  processed_source.tokens.each_index do |index|
    check_token_set(index)
  end
end

def standard_string_literal?(token)

def standard_string_literal?(token)
  case token.type
  when SIMPLE_STRING_TOKEN_TYPE
    true
  when *COMPLEX_STRING_EDGE_TOKEN_TYPES
    QUOTE_DELIMITERS.include?(token.text)
  else
    false
  end
end

def token_after_last_string(successor, base_index)

def token_after_last_string(successor, base_index)
  index = base_index + 3
  begin_token, end_token = COMPLEX_STRING_EDGE_TOKEN_TYPES
  if successor.type == begin_token
    ends_to_find = 1
    while ends_to_find > 0
      case processed_source.tokens[index].type
      when begin_token then ends_to_find += 1
      when end_token then ends_to_find -= 1
      end
      index += 1
    end
  end
  processed_source.tokens[index]
end