class PDF::Reader::Buffer


the raw tokens into objects we can work with (strings, ints, arrays, etc)
This will usually be used in conjunction with PDF:Reader::Parser, which converts
This is very low level, and getting the raw tokens is not very useful in itself.
string, repeated calls to token() will return the next token from the source.
A string tokeniser that recognises PDF grammar. When passed an IO stream or a

def check_size_is_non_zero

def check_size_is_non_zero
  @io.seek(-1, IO::SEEK_END)
  @io.seek(0)
rescue Errno::EINVAL
  raise MalformedPDFError, "PDF file is empty"
end

def empty?


return true if there are no more tokens left
def empty?
  prepare_tokens if @tokens.size < 3
  @tokens.empty?
end

def find_first_xref_offset


return the byte offset where the first XRef table in th source can be found.
def find_first_xref_offset
  check_size_is_non_zero
  @io.seek(-TRAILING_BYTECOUNT, IO::SEEK_END) rescue @io.seek(0)
  data = @io.read(TRAILING_BYTECOUNT)
  raise MalformedPDFError, "PDF does not contain EOF marker" if data.nil?
  # the PDF 1.7 spec (section #3.4) says that EOL markers can be either \r, \n, or both.
  lines = data.split(/[\n\r]+/).reverse
  eof_index = lines.index { |l| l.strip[/^%%EOF/] }
  raise MalformedPDFError, "PDF does not contain EOF marker" if eof_index.nil?
  raise MalformedPDFError, "PDF EOF marker does not follow offset" if eof_index >= lines.size-1
  offset = lines[eof_index+1].to_i
  # a byte offset < 0 doesn't make much sense. This is unlikely to happen, but in theory some
  # corrupted PDFs might have a line that looks like a negative int preceding the `%%EOF`
  raise MalformedPDFError, "invalid xref offset" if offset < 0
  offset
end

def in_content_stream?


Returns true if this buffer is parsing a content stream
def in_content_stream?
  @in_content_stream ? true : false
end

def initialize(io, opts = {})


content stream. Defaults to false
:content_stream - set to true if buffer will be tokenising a
:seek - a byte offset to seek to before starting to tokenise

options:

io - an IO stream (usually a StringIO) with the raw data to tokenise

Params:

Creates a new buffer.
def initialize(io, opts = {})
  @io = io
  @tokens = []
  @in_content_stream = opts[:content_stream]
  @io.seek(opts[:seek]) if opts[:seek]
  @pos = @io.pos
end

def merge_indirect_reference


expensive regexp checks if possible.
indirect reference, so test for that case first and avoid the relatively
It's incredibly likely that the next 3 tokens in the buffer are NOT an

that extra check.
like an indirect object. For optimisation reasons, I'd rather avoid
code further up the stack would need to check every token to see if it looks
Merging them into a single string was another option, but that would mean

them, replace the tokens with a PDF::Reader::Reference instance.
detect a series of 3 tokens that make up an indirect object. If we find
def merge_indirect_reference
  return if @tokens.size < 3
  return if @tokens[2] != "R"
  token_one = @tokens[0]
  token_two = @tokens[1]
  if token_one.is_a?(String) && token_two.is_a?(String) && token_one.match(DIGITS_ONLY) && token_two.match(DIGITS_ONLY)
    @tokens[0] = PDF::Reader::Reference.new(token_one.to_i, token_two.to_i)
    @tokens.delete_at(2)
    @tokens.delete_at(1)
  end
end

def peek_byte


untouched
peek at the next character in the io stream, leaving the stream position
def peek_byte
  byte = @io.getbyte
  @io.seek(-1, IO::SEEK_CUR) if byte
  byte
end

def prepare_hex_token


we find a closing >
if we're currently inside a hex string, read hex nibbles until
def prepare_hex_token
  str = "".dup
  loop do
    byte = @io.getbyte
    if byte.nil?
      break
    elsif (48..57).include?(byte) || (65..90).include?(byte) || (97..122).include?(byte)
      str << byte
    elsif byte <= 32
      # ignore it
    else
      @tokens << str if str.size > 0
      @tokens << ">" if byte != 0x3E # '>'
      @tokens << byte.chr
      break
    end
  end
end

def prepare_inline_token

This is to reduce the chance of accidentally matching an embedded EI
The EI must followed by white-space or end of buffer
If the EI follows white-space the space is dropped from the data
Extract data between ID and EI
def prepare_inline_token
  idstart = @io.pos
  prevchr = ''
  eisize = 0 # how many chars in the end marker
  seeking = 'E' # what are we looking for now?
  loop do
    chr = @io.read(1)
    break if chr.nil?
    case seeking
    when 'E'
      if chr == 'E'
        seeking = 'I'
        if WHITE_SPACE.include? prevchr
          eisize = 3 # include whitespace in delimiter, i.e. drop from data
        else # assume the EI immediately follows the data
          eisize = 2 # leave prevchr in data
        end
      end
    when 'I'
      if chr == 'I'
        seeking = ''
      else
        seeking = 'E'
      end
    when ''
      if WHITE_SPACE.include? chr
        eisize += 1 # Drop trailer
        break
      else
        seeking = 'E'
      end
    end
    prevchr = chr.is_a?(String) ? chr : ''
  end
  unless seeking == ''
    raise MalformedPDFError, "EI terminator not found"
  end
  eiend = @io.pos
  @io.seek(idstart, IO::SEEK_SET)
  str = @io.read(eiend - eisize - idstart) # get the ID content
  @tokens << str.freeze if str
end

def prepare_literal_token


problem.
processing to fix things like escaped new lines, but that's someone else's
The entire literal string will be returned as a single token. It will need further

string.
start of a new token in regular mode are left untouched when inside a literal
we find the closing ) delimiter. Lots of bytes that would otherwise indicate the
if we're currently inside a literal string we more or less just read bytes until
def prepare_literal_token
  str = "".dup
  count = 1
  while count > 0
    byte = @io.getbyte
    if byte.nil?
      count = 0 # unbalanced params
    elsif byte == 0x5C
      str << byte << @io.getbyte
    elsif byte == 0x28 # "("
      str << "("
      count += 1
    elsif byte == 0x29 # ")"
      count -= 1
      str << ")" unless count == 0
    else
      str << byte unless count == 0
    end
  end
  @tokens << str if str.size > 0
  @tokens << ")"
end

def prepare_regular_token


to read up on it.
What each byte means is complex, check out section "3.1.1 Character Set" of the 1.7 spec

Extract the next regular token and stock it in our buffer, ready to be returned.
def prepare_regular_token
  tok = "".dup
  loop do
    byte = @io.getbyte
    case byte
    when nil
      break
    when 0x25
      # comment, ignore everything until the next EOL char
      loop do
        commentbyte = @io.getbyte
        break if commentbyte.nil? || commentbyte == 0x0A || commentbyte == 0x0D
      end
    when *TOKEN_WHITESPACE
      # white space, token finished
      @tokens << tok if tok.size > 0
      #If the token was empty, chomp the rest of the whitespace too
      while TOKEN_WHITESPACE.include?(peek_byte) && tok.size == 0
        @io.getbyte
      end
      tok = "".dup
      break
    when 0x3C
      # opening delimiter '<', start of new token
      @tokens << tok if tok.size > 0
      if peek_byte == 0x3C # check if token is actually '<<'
        @io.getbyte
        @tokens << "<<"
      else
        @tokens << "<"
      end
      tok = "".dup
      break
    when 0x3E
      # closing delimiter '>', start of new token
      @tokens << tok if tok.size > 0
      if peek_byte == 0x3E # check if token is actually '>>'
        @io.getbyte
        @tokens << ">>"
      else
        @tokens << ">"
      end
      tok = "".dup
      break
    when 0x28, 0x5B, 0x7B
      # opening delimiter, start of new token
      @tokens << tok if tok.size > 0
      @tokens << byte.chr
      tok = "".dup
      break
    when 0x29, 0x5D, 0x7D
      # closing delimiter
      @tokens << tok if tok.size > 0
      @tokens << byte.chr
      tok = "".dup
      break
    when 0x2F
      # PDF name, start of new token
      @tokens << tok if tok.size > 0
      @tokens << byte.chr
      @tokens << "" if byte == 0x2F && ([nil, 0x20, 0x0A] + TOKEN_DELIMITER).include?(peek_byte)
      tok = "".dup
      break
    else
      tok << byte
    end
  end
  @tokens << tok if tok.size > 0
end

def prepare_tokens


attempt to prime the buffer with the next few tokens.
def prepare_tokens
  10.times do
    case state
    when :literal_string then prepare_literal_token
    when :hex_string     then prepare_hex_token
    when :regular        then prepare_regular_token
    when :inline         then prepare_inline_token
    end
  end
  save_pos
end

def read(bytes, opts = {})

However we check for CRLF first, so the ambiguity is avoided.
This is because the data may start with LF.
Skipping a bare CR is not spec-compliant.
Note:
that is sitting under the io cursor.
:skip_eol - if true, the IO stream is advanced past a CRLF, CR or LF

options:

bytes - the number of bytes to read

return raw bytes from the underlying IO stream.
def read(bytes, opts = {})
  reset_pos
  if opts[:skip_eol]
    @io.seek(-1, IO::SEEK_CUR)
    str = @io.read(2)
    if str.nil?
      return nil
    elsif str == CRLF # This MUST be done before checking for CR alone
      # do nothing
    elsif str[0, 1] == LF || str[0, 1] == CR # LF or CR alone
      @io.seek(-1, IO::SEEK_CUR)
    else
      @io.seek(-2, IO::SEEK_CUR)
    end
  end
  bytes = @io.read(bytes)
  save_pos
  bytes
end

def reset_pos


Some bastard moved our IO stream cursor. Restore it.
def reset_pos
  @io.seek(@pos) if @io.pos != @pos
end

def save_pos


moves the cursor, we can then restore it.
save the current position of the source IO stream. If someone else (like another buffer)
def save_pos
  @pos = @io.pos
end

def state


Determine the current context/state by examining the last token we found
tokenising behaves slightly differently based on the current context.
def state
  case @tokens.last
  when LEFT_PAREN then :literal_string
  when LESS_THAN then :hex_string
  when STREAM then :stream
  when ID
    if in_content_stream?  && @tokens[-2] != FWD_SLASH
      :inline
    else
      :regular
    end
  else
    :regular
  end
end

def token


is found, nil if there are no tokens left.
return the next token from the source. Returns a string if a token
def token
  reset_pos
  prepare_tokens if @tokens.size < 3
  merge_indirect_reference
  prepare_tokens if @tokens.size < 3
  @tokens.shift
end