How to read a file in reverse order?

Question:

How to read a file in reverse order using python? I want to read a file from last line to first line.

Asked By: Nimmy

||

Answers:

for line in reversed(open("filename").readlines()):
    print line.rstrip()

And in Python 3:

for line in reversed(list(open("filename"))):
    print(line.rstrip())
Answered By: Matt Joiner
for line in reversed(open("file").readlines()):
    print line.rstrip()

If you are on linux, you can use tac command.

$ tac file

2 recipes you can find in ActiveState here and here

Answered By: ghostdog74
import re

def filerev(somefile, buffer=0x20000):
  somefile.seek(0, os.SEEK_END)
  size = somefile.tell()
  lines = ['']
  rem = size % buffer
  pos = max(0, (size // buffer - 1) * buffer)
  while pos >= 0:
    somefile.seek(pos, os.SEEK_SET)
    data = somefile.read(rem + buffer) + lines[0]
    rem = 0
    lines = re.findall('[^n]*n?', data)
    ix = len(lines) - 2
    while ix > 0:
      yield lines[ix]
      ix -= 1
    pos -= buffer
  else:
    yield lines[0]

with open(sys.argv[1], 'r') as f:
  for line in filerev(f):
    sys.stdout.write(line)

I had to do this some time ago and used the below code. It pipes to the shell. I am afraid i do not have the complete script anymore. If you are on a unixish operating system, you can use “tac”, however on e.g. Mac OSX tac command does not work, use tail -r. The below code snippet tests for which platform you’re on, and adjusts the command accordingly

# We need a command to reverse the line order of the file. On Linux this
# is 'tac', on OSX it is 'tail -r'
# 'tac' is not supported on osx, 'tail -r' is not supported on linux.

if sys.platform == "darwin":
    command += "|tail -r"
elif sys.platform == "linux2":
    command += "|tac"
else:
    raise EnvironmentError('Platform %s not supported' % sys.platform)
Answered By: jeorgen

Here you can find my my implementation, you can limit the ram usage by changing the “buffer” variable, there is a bug that the program prints an empty line in the beginning.

And also ram usage may be increase if there is no new lines for more than buffer bytes, “leak” variable will increase until seeing a new line (“n”).

This is also working for 16 GB files which is bigger then my total memory.

import os,sys
buffer = 1024*1024 # 1MB
f = open(sys.argv[1])
f.seek(0, os.SEEK_END)
filesize = f.tell()

division, remainder = divmod(filesize, buffer)
line_leak=''

for chunk_counter in range(1,division + 2):
    if division - chunk_counter < 0:
        f.seek(0, os.SEEK_SET)
        chunk = f.read(remainder)
    elif division - chunk_counter >= 0:
        f.seek(-(buffer*chunk_counter), os.SEEK_END)
        chunk = f.read(buffer)

    chunk_lines_reversed = list(reversed(chunk.split('n')))
    if line_leak: # add line_leak from previous chunk to beginning
        chunk_lines_reversed[0] += line_leak

    # after reversed, save the leakedline for next chunk iteration
    line_leak = chunk_lines_reversed.pop()

    if chunk_lines_reversed:
        print "n".join(chunk_lines_reversed)
    # print the last leaked line
    if division - chunk_counter < 0:
        print line_leak
Answered By: Bekir Dogan

A correct, efficient answer written as a generator.

import os

def reverse_readline(filename, buf_size=8192):
    """A generator that returns the lines of a file in reverse order"""
    with open(filename, 'rb') as fh:
        segment = None
        offset = 0
        fh.seek(0, os.SEEK_END)
        file_size = remaining_size = fh.tell()
        while remaining_size > 0:
            offset = min(file_size, offset + buf_size)
            fh.seek(file_size - offset)
            buffer = fh.read(min(remaining_size, buf_size)).decode(encoding='utf-8')
            remaining_size -= buf_size
            lines = buffer.split('n')
            # The first line of the buffer is probably not a complete line so
            # we'll save it and append it to the last line of the next buffer
            # we read
            if segment is not None:
                # If the previous chunk starts right from the beginning of line
                # do not concat the segment to the last line of new chunk.
                # Instead, yield the segment first 
                if buffer[-1] != 'n':
                    lines[-1] += segment
                else:
                    yield segment
            segment = lines[0]
            for index in range(len(lines) - 1, 0, -1):
                if lines[index]:
                    yield lines[index]
        # Don't yield None if the file was empty
        if segment is not None:
            yield segment
Answered By: srohde

How about something like this:

import os


def readlines_reverse(filename):
    with open(filename) as qfile:
        qfile.seek(0, os.SEEK_END)
        position = qfile.tell()
        line = ''
        while position >= 0:
            qfile.seek(position)
            next_char = qfile.read(1)
            if next_char == "n":
                yield line[::-1]
                line = ''
            else:
                line += next_char
            position -= 1
        yield line[::-1]


if __name__ == '__main__':
    for qline in readlines_reverse(raw_input()):
        print qline

Since the file is read character by character in reverse order, it will work even on very large files, as long as individual lines fit into memory.

Answered By: Berislav Lopac

a simple function to create a second file reversed (linux only):

import os
def tac(file1, file2):
     print(os.system('tac %s > %s' % (file1,file2)))

how to use

tac('ordered.csv', 'reversed.csv')
f = open('reversed.csv')
Answered By: Alexandre Andrade
def reverse_lines(filename):
    y=open(filename).readlines()
    return y[::-1]
Answered By: Gareema

Always use with when working with files as it handles everything for you:

with open('filename', 'r') as f:
    for line in reversed(f.readlines()):
        print line

Or in Python 3:

with open('filename', 'r') as f:
    for line in reversed(list(f.readlines())):
        print(line)
Answered By: Carlos Afonso

You can also use python module file_read_backwards.

After installing it, via pip install file_read_backwards (v1.2.1), you can read the entire file backwards (line-wise) in a memory efficient manner via:

#!/usr/bin/env python2.7

from file_read_backwards import FileReadBackwards

with FileReadBackwards("/path/to/file", encoding="utf-8") as frb:
    for l in frb:
         print l

It supports “utf-8″,”latin-1”, and “ascii” encodings.

Support is also available for python3. Further documentation can be found at http://file-read-backwards.readthedocs.io/en/latest/readme.html

Answered By: user7321751

Thanks for the answer @srohde. It has a small bug checking for newline character with ‘is’ operator, and I could not comment on the answer with 1 reputation. Also I’d like to manage file open outside because that enables me to embed my ramblings for luigi tasks.

What I needed to change has the form:

with open(filename) as fp:
    for line in fp:
        #print line,  # contains new line
        print '>{}<'.format(line)

I’d love to change to:

with open(filename) as fp:
    for line in reversed_fp_iter(fp, 4):
        #print line,  # contains new line
        print '>{}<'.format(line)

Here is a modified answer that wants a file handle and keeps newlines:

def reversed_fp_iter(fp, buf_size=8192):
    """a generator that returns the lines of a file in reverse order
    ref: https://stackoverflow.com/a/23646049/8776239
    """
    segment = None  # holds possible incomplete segment at the beginning of the buffer
    offset = 0
    fp.seek(0, os.SEEK_END)
    file_size = remaining_size = fp.tell()
    while remaining_size > 0:
        offset = min(file_size, offset + buf_size)
        fp.seek(file_size - offset)
        buffer = fp.read(min(remaining_size, buf_size))
        remaining_size -= buf_size
        lines = buffer.splitlines(True)
        # the first line of the buffer is probably not a complete line so
        # we'll save it and append it to the last line of the next buffer
        # we read
        if segment is not None:
            # if the previous chunk starts right from the beginning of line
            # do not concat the segment to the last line of new chunk
            # instead, yield the segment first
            if buffer[-1] == 'n':
                #print 'buffer ends with newline'
                yield segment
            else:
                lines[-1] += segment
                #print 'enlarged last line to >{}<, len {}'.format(lines[-1], len(lines))
        segment = lines[0]
        for index in range(len(lines) - 1, 0, -1):
            if len(lines[index]):
                yield lines[index]
    # Don't yield None if the file was empty
    if segment is not None:
        yield segment
Answered By: Murat Yükselen

If you are concerned about file size / memory usage, memory-mapping the file and scanning backwards for newlines is a solution:

How to search for a string in text files?

Answered By: Federico

you would need to first open your file in read format, save it to a variable, then open the second file in write format where you would write or append the variable using a the [::-1] slice, completely reversing the file. You can also use readlines() to make it into a list of lines, which you can manipulate

def copy_and_reverse(filename, newfile):
    with open(filename) as file:
        text = file.read()
    with open(newfile, "w") as file2:
        file2.write(text[::-1])
Answered By: PawlakJ

Most of the answers need to read the whole file before doing anything. This sample reads increasingly large samples from the end.

I only saw Murat Yükselen’s answer while writing this answer. It’s nearly the same, which I suppose is a good thing. The sample below also deals with r and increases its buffersize at each step. I also have some unit tests to back this code up.

def readlines_reversed(f):
    """ Iterate over the lines in a file in reverse. The file must be
    open in 'rb' mode. Yields the lines unencoded (as bytes), including the
    newline character. Produces the same result as readlines, but reversed.
    If this is used to reverse the line in a file twice, the result is
    exactly the same.
    """
    head = b""
    f.seek(0, 2)
    t = f.tell()
    buffersize, maxbuffersize = 64, 4096
    while True:
        if t <= 0:
            break
        # Read next block
        buffersize = min(buffersize * 2, maxbuffersize)
        tprev = t
        t = max(0, t - buffersize)
        f.seek(t)
        lines = f.read(tprev - t).splitlines(True)
        # Align to line breaks
        if not lines[-1].endswith((b"n", b"r")):
            lines[-1] += head  # current tail is previous head
        elif head == b"n" and lines[-1].endswith(b"r"):
            lines[-1] += head  # Keep rn together
        elif head:
            lines.append(head)
        head = lines.pop(0)  # can be 'n' (ok)
        # Iterate over current block in reverse
        for line in reversed(lines):
            yield line
    if head:
        yield head
Answered By: Almar

Accepted answer won’t work for cases with large files that won’t fit in memory (which is not a rare case).

As it was noted by others, @srohde answer looks good, but it has next issues:

  • openning file looks redundant, when we can pass file object & leave it to user to decide in which encoding it should be read,
  • even if we refactor to accept file object, it won’t work for all encodings: we can choose file with utf-8 encoding and non-ascii contents like
й

pass buf_size equal to 1 and will have

    UnicodeDecodeError: 'utf8' codec can't decode byte 0xb9 in position 0: invalid start byte

of course text may be larger but buf_size may be picked up so it’ll lead to obfuscated error like above,

  • we can’t specify custom line separator,
  • we can’t choose to keep line separator.

So considering all these concerns I’ve written separate functions:

  • one which works with byte streams,
  • second one which works with text streams and delegates its underlying byte stream to the first one and decodes resulting lines.

First of all let’s define next utility functions:

ceil_division for making division with ceiling (in contrast with standard // division with floor, more info can be found in this thread)

def ceil_division(left_number, right_number):
    """
    Divides given numbers with ceiling.
    """
    return -(-left_number // right_number)

split for splitting string by given separator from right end with ability to keep it:

def split(string, separator, keep_separator):
    """
    Splits given string by given separator.
    """
    parts = string.split(separator)
    if keep_separator:
        *parts, last_part = parts
        parts = [part + separator for part in parts]
        if last_part:
            return parts + [last_part]
    return parts

read_batch_from_end to read batch from the right end of binary stream

def read_batch_from_end(byte_stream, size, end_position):
    """
    Reads batch from the end of given byte stream.
    """
    if end_position > size:
        offset = end_position - size
    else:
        offset = 0
        size = end_position
    byte_stream.seek(offset)
    return byte_stream.read(size)

After that we can define function for reading byte stream in reverse order like

import functools
import itertools
import os
from operator import methodcaller, sub


def reverse_binary_stream(byte_stream, batch_size=None,
                          lines_separator=None,
                          keep_lines_separator=True):
    if lines_separator is None:
        lines_separator = (b'r', b'n', b'rn')
        lines_splitter = methodcaller(str.splitlines.__name__,
                                      keep_lines_separator)
    else:
        lines_splitter = functools.partial(split,
                                           separator=lines_separator,
                                           keep_separator=keep_lines_separator)
    stream_size = byte_stream.seek(0, os.SEEK_END)
    if batch_size is None:
        batch_size = stream_size or 1
    batches_count = ceil_division(stream_size, batch_size)
    remaining_bytes_indicator = itertools.islice(
            itertools.accumulate(itertools.chain([stream_size],
                                                 itertools.repeat(batch_size)),
                                 sub),
            batches_count)
    try:
        remaining_bytes_count = next(remaining_bytes_indicator)
    except StopIteration:
        return

    def read_batch(position):
        result = read_batch_from_end(byte_stream,
                                     size=batch_size,
                                     end_position=position)
        while result.startswith(lines_separator):
            try:
                position = next(remaining_bytes_indicator)
            except StopIteration:
                break
            result = (read_batch_from_end(byte_stream,
                                          size=batch_size,
                                          end_position=position)
                      + result)
        return result

    batch = read_batch(remaining_bytes_count)
    segment, *lines = lines_splitter(batch)
    yield from lines[::-1]
    for remaining_bytes_count in remaining_bytes_indicator:
        batch = read_batch(remaining_bytes_count)
        lines = lines_splitter(batch)
        if batch.endswith(lines_separator):
            yield segment
        else:
            lines[-1] += segment
        segment, *lines = lines
        yield from lines[::-1]
    yield segment

and finally a function for reversing text file can be defined like:

import codecs


def reverse_file(file, batch_size=None,
                 lines_separator=None,
                 keep_lines_separator=True):
    encoding = file.encoding
    if lines_separator is not None:
        lines_separator = lines_separator.encode(encoding)
    yield from map(functools.partial(codecs.decode,
                                     encoding=encoding),
                   reverse_binary_stream(
                           file.buffer,
                           batch_size=batch_size,
                           lines_separator=lines_separator,
                           keep_lines_separator=keep_lines_separator))

Tests

Preparations

I’ve generated 4 files using fsutil command:

  1. empty.txt with no contents, size 0MB
  2. tiny.txt with size of 1MB
  3. small.txt with size of 10MB
  4. large.txt with size of 50MB

also I’ve refactored @srohde solution to work with file object instead of file path.

Test script

from timeit import Timer

repeats_count = 7
number = 1
create_setup = ('from collections import dequen'
                'from __main__ import reverse_file, reverse_readlinen'
                'file = open("{}")').format
srohde_solution = ('with file:n'
                   '    deque(reverse_readline(file,n'
                   '                           buf_size=8192),'
                   '          maxlen=0)')
azat_ibrakov_solution = ('with file:n'
                         '    deque(reverse_file(file,n'
                         '                       lines_separator="\n",n'
                         '                       keep_lines_separator=False,n'
                         '                       batch_size=8192), maxlen=0)')
print('reversing empty file by "srohde"',
      min(Timer(srohde_solution,
                create_setup('empty.txt')).repeat(repeats_count, number)))
print('reversing empty file by "Azat Ibrakov"',
      min(Timer(azat_ibrakov_solution,
                create_setup('empty.txt')).repeat(repeats_count, number)))
print('reversing tiny file (1MB) by "srohde"',
      min(Timer(srohde_solution,
                create_setup('tiny.txt')).repeat(repeats_count, number)))
print('reversing tiny file (1MB) by "Azat Ibrakov"',
      min(Timer(azat_ibrakov_solution,
                create_setup('tiny.txt')).repeat(repeats_count, number)))
print('reversing small file (10MB) by "srohde"',
      min(Timer(srohde_solution,
                create_setup('small.txt')).repeat(repeats_count, number)))
print('reversing small file (10MB) by "Azat Ibrakov"',
      min(Timer(azat_ibrakov_solution,
                create_setup('small.txt')).repeat(repeats_count, number)))
print('reversing large file (50MB) by "srohde"',
      min(Timer(srohde_solution,
                create_setup('large.txt')).repeat(repeats_count, number)))
print('reversing large file (50MB) by "Azat Ibrakov"',
      min(Timer(azat_ibrakov_solution,
                create_setup('large.txt')).repeat(repeats_count, number)))

Note: I’ve used collections.deque class to exhaust generator.

Outputs

For PyPy 3.5 on Windows 10:

reversing empty file by "srohde" 8.31e-05
reversing empty file by "Azat Ibrakov" 0.00016090000000000028
reversing tiny file (1MB) by "srohde" 0.160081
reversing tiny file (1MB) by "Azat Ibrakov" 0.09594989999999998
reversing small file (10MB) by "srohde" 8.8891863
reversing small file (10MB) by "Azat Ibrakov" 5.323388100000001
reversing large file (50MB) by "srohde" 186.5338368
reversing large file (50MB) by "Azat Ibrakov" 99.07450229999998

For CPython 3.5 on Windows 10:

reversing empty file by "srohde" 3.600000000000001e-05
reversing empty file by "Azat Ibrakov" 4.519999999999958e-05
reversing tiny file (1MB) by "srohde" 0.01965560000000001
reversing tiny file (1MB) by "Azat Ibrakov" 0.019207699999999994
reversing small file (10MB) by "srohde" 3.1341862999999996
reversing small file (10MB) by "Azat Ibrakov" 3.0872588000000007
reversing large file (50MB) by "srohde" 82.01206720000002
reversing large file (50MB) by "Azat Ibrakov" 82.16775059999998

So as we can see it performs like original solution, but is more general and free of its disadvantages listed above.


Advertisement

I’ve added this to 0.3.0 version of lz package (requires Python 3.5+) that have many well-tested functional/iterating utilities.

Can be used like

import io
from lz.reversal import reverse
...
with open('path/to/file') as file:
     for line in reverse(file, batch_size=io.DEFAULT_BUFFER_SIZE):
         print(line)

It supports all standard encodings (maybe except utf-7 since it is hard for me to define a strategy for generating strings encodable with it).

Answered By: Azat Ibrakov

Read the file line by line and then add it on a list in reverse order.

Here is an example of code :

reverse = []
with open("file.txt", "r") as file:
    for line in file:
        line = line.strip()
         reverse[0:0] = line
Answered By: willywonka
import sys
f = open(sys.argv[1] , 'r')
for line in f.readlines()[::-1]:
    print line
Answered By: Powerup California

with open(“filename”) as f:

    print(f.read()[::-1])
Answered By: JackoM
def previous_line(self, opened_file):
        opened_file.seek(0, os.SEEK_END)
        position = opened_file.tell()
        buffer = bytearray()
        while position >= 0:
            opened_file.seek(position)
            position -= 1
            new_byte = opened_file.read(1)
            if new_byte == self.NEW_LINE:
                parsed_string = buffer.decode()
                yield parsed_string
                buffer = bytearray()
            elif new_byte == self.EMPTY_BYTE:
                continue
            else:
                new_byte_array = bytearray(new_byte)
                new_byte_array.extend(buffer)
                buffer = new_byte_array
        yield None

to use:

opened_file = open(filepath, "rb")
iterator = self.previous_line(opened_file)
line = next(iterator) #one step
close(opened_file)
Answered By: Vyacheslav

I don’t think this has been mentioned before, but using deque from collections and reverse works for me:

from collections import deque

fs = open("test.txt","rU")
fr = deque(fs)
fr.reverse()  # reverse in-place, returns None

for li in fr:
   print li

fs.close()
Answered By: beroe

Here’s a Python 3.8+ approach, using two string buffers, with grep-like substring matching (or just simply iterating each and every line if the empty substring is passed). I’d expect this to be more memory efficient than loading all the file into memory (you can control the buffer size, which is sometimes desirable), e.g. if you only want to find something at the end of a file. Gist here.

from __future__ import annotations

from io import StringIO, SEEK_END
from pathlib import Path
from typing import Iterator, TextIO


def grep_backwards(
    fh: TextIO,
    match_substr: str,
    line_ending: str = "n",
    strip_eol: bool = False,
    step: int = 10,
) -> Iterator[str]:
    """
    Helper for scanning a file line by line from the end, imitating the behaviour of
    the Unix command line tools ``grep`` (when passed ``match_substr``) or ``tac`` (when
    ``match_substr`` is the empty string ``""``, i.e. matching all lines).

    Args:
      fh            : The file handle to read from
      match_substr  : Substring to match at. If given as the empty string, gives a
                      reverse line iterator rather than a reverse matching line iterator.
      line_ending   : The line ending to split lines on (default: "n" newline)
      strip_eol     : Whether to strip (default: ``True``) or keep (``False``) line
                      endings off the end of the strings returned by the iterator.
      step          : Number of characters to load into chunk buffer (i.e. chunk size)
    """
    # Store the end of file (EOF) position as we are advancing backwards from there
    file_end_pos = fh.seek(0, SEEK_END)  # cursor has moved to EOF
    # Keep a reversed string line buffer as we are writing right-to-left
    revlinebuf = StringIO()
    # Keep a [left-to-right] string buffer as we read left-to-right, one chunk at a time
    chunk_buf = StringIO()
    # Initialise 'last chunk start' at position after the EOF (unreachable by ``read``)
    last_chunk_start = file_end_pos + 1
    line_offset = 0  # relative to SEEK_END
    has_EOF_newline = False  # may change upon finding first newline
    # In the worst case, seek all the way back to the start (position 0)
    while last_chunk_start > 0:
        # Ensure that read(size=step) will read at least 1 character
        # e.g. when step=4, last_chunk_start=3, reduce step to 3 --> chunk=[0,1,2]
        if step > last_chunk_start:
            step = last_chunk_start
        chunk_start = last_chunk_start - step
        fh.seek(chunk_start)
        # Read in the chunk for the current step (possibly after pre-existing chunks)
        chunk_buf.write(fh.read(step))
        while chunk := chunk_buf.getvalue():
            # Keep reading intra-chunk lines RTL, leaving any leftovers in revlinebuf
            lhs, EOL_match, rhs = chunk.rpartition(line_ending)
            if EOL_match:
                if line_offset == 0:
                    has_EOF_newline = rhs == ""
                # Reverse the right-hand-side of the rightmost line_ending and
                # insert it after anything already in the reversed line buffer
                if rhs:
                    # Only bother writing rhs to line buffer if there's anything in it
                    revlinebuf.write(rhs[::-1])
                # Un-reverse the line buffer --> full line after the line_ending match
                completed_line = revlinebuf.getvalue()[::-1]  # (may be empty string)
                # Clear the reversed line buffer
                revlinebuf.seek(0)
                revlinebuf.truncate()
                # `grep` if line matches (or behaves like `tac` if match_substr == "")
                if line_offset == 0:
                    if not has_EOF_newline and match_substr in completed_line:
                        # The 0'th line from the end (by definition) cannot get an EOL
                        yield completed_line
                elif match_substr in (completed_line + line_ending):
                    if not strip_eol:
                        completed_line += line_ending
                    yield completed_line
                line_offset += 1
            else:
                # If line_ending not found in chunk then add entire [remaining] chunk,
                # in reverse, onto the reversed line buffer, before chunk_buf is cleared
                revlinebuf.write(chunk_buf.getvalue()[::-1])
            # The LHS of the rightmost line_ending (if any) may contain another line
            # ending so truncate the chunk to that and re-iterate (else clear chunk_buf)
            chunk_buf.seek(len(lhs))
            chunk_buf.truncate()
        last_chunk_start = chunk_start
    if completed_line := revlinebuf.getvalue()[::-1]:
        # Iteration has reached the line at start of file, left over in the line buffer
        if line_offset == 0 and not has_EOF_newline and match_substr in completed_line:
            # The 0'th line from the end (by definition) cannot get an EOL
            yield completed_line
        elif match_substr in (
            completed_line + (line_ending if line_offset > 1 or has_EOF_newline else "")
        ):
            if line_offset == 1:
                if has_EOF_newline and not strip_eol:
                    completed_line += line_ending
            elif not strip_eol:
                completed_line += line_ending
            yield completed_line
    else:
        raise StopIteration

Here’s some tests to show it works, with 3 test input files made by counting up to 100 saying ‘Hi 0’, ‘Hi 9’, ‘Hi 18’, … :

  • … and give number 27 a double newline
  • … and give the end of file no newline
  • … and give the end of file 2 newlines
# Write lines counting to 100 saying 'Hi 0', 'Hi 9', ... give number 27 a double newline
str_out = "".join([f"Hi {i}n" if i != 27 else f"Hi {i}nn" for i in range(0, 100, 9)])
example_file = Path("example.txt")
no_eof_nl_file = Path("no_eof_nl.txt")  # no end of file newline
double_eof_nl_file = Path("double_eof_nl.txt")  # double end of file newline

with open(example_file, "w") as f_out:
    f_out.write(str_out)

with open(no_eof_nl_file, "w") as f_out:
    f_out.write(str_out.rstrip("n"))

with open(double_eof_nl_file, "w") as f_out:
    f_out.write(str_out + "n")

file_list = [example_file, no_eof_nl_file, double_eof_nl_file]
labels = [
    "EOF_NL    ",
    "NO_EOF_NL ",
    "DBL_EOF_NL",
]

print("------------------------------------------------------------")
print()
print(f"match_substr = ''")
for label, each_file in zip(labels, file_list):
    with open(each_file, "r") as fh:
        lines_rev_from_iterator = list(grep_backwards(fh=fh, match_substr=""))

    with open(each_file, "r") as fh:
        lines_rev_from_readline = list(reversed(fh.readlines()))

    print(label, f"{lines_rev_from_iterator == lines_rev_from_readline=}")
print()

for label, each_file in zip(labels, file_list):
    with open(each_file, "r") as fh:
        reverse_iterator = grep_backwards(fh=fh, match_substr="")
        first_match = next(reverse_iterator)
    print(label, f"{first_match=}")
print()

for label, each_file in zip(labels, file_list):
    with open(each_file, "r") as fh:
        all_matches = list(grep_backwards(fh=fh, match_substr=""))
    print(label, f"{all_matches=}")
print()
print()
print("------------------------------------------------------------")
print()
print(f"match_substr = 'Hi 9'")

for label, each_file in zip(labels, file_list):
    with open(each_file, "r") as fh:
        reverse_iterator = grep_backwards(fh=fh, match_substr="Hi 9")
        first_match = next(reverse_iterator)
    print(label, f"{first_match=}")
print()

for label, each_file in zip(labels, file_list):
    with open(each_file, "r") as fh:
        all_matches = list(grep_backwards(fh=fh, match_substr="Hi 9"))
    print(label, f"{all_matches=}")
print()
print("------------------------------------------------------------")
print()
print(f"match_substr = '\n'")

for len_flag in (True, False):
    for label, each_file in zip(labels, file_list):
        with open(each_file, "r") as fh:
            lines_rev_from_iterator = list(grep_backwards(fh=fh, match_substr="n"))
        if len_flag:
            print(label, f"{len(lines_rev_from_iterator)=}")
        else:
            print(label, f"{lines_rev_from_iterator=}")
    print()

for label, each_file in zip(labels, file_list):
    with open(each_file, "r") as fh:
        reverse_iterator = grep_backwards(fh=fh, match_substr="n")
        first_match = next(reverse_iterator)
    print(label, f"{first_match=}")
print()

for label, each_file in zip(labels, file_list):
    with open(each_file, "r") as fh:
        all_matches = list(grep_backwards(fh=fh, match_substr="n"))
    print(label, f"{all_matches=}")
print()
print("------------------------------------------------------------")

------------------------------------------------------------

match_substr = ''
EOF_NL     lines_rev_from_iterator == lines_rev_from_readline=True
NO_EOF_NL  lines_rev_from_iterator == lines_rev_from_readline=True
DBL_EOF_NL lines_rev_from_iterator == lines_rev_from_readline=True

EOF_NL     first_match='Hi 99n'
NO_EOF_NL  first_match='Hi 99'
DBL_EOF_NL first_match='n'

EOF_NL     all_matches=['Hi 99n', 'Hi 90n', 'Hi 81n', 'Hi 72n', 'Hi 63n', 'Hi 54n', 'Hi 45n', 'Hi 36n', 'n', 'Hi 27n', 'Hi 18n', 'Hi 9n', 'Hi 0n']
NO_EOF_NL  all_matches=['Hi 99', 'Hi 90n', 'Hi 81n', 'Hi 72n', 'Hi 63n', 'Hi 54n', 'Hi 45n', 'Hi 36n', 'n', 'Hi 27n', 'Hi 18n', 'Hi 9n', 'Hi 0n']
DBL_EOF_NL all_matches=['n', 'Hi 99n', 'Hi 90n', 'Hi 81n', 'Hi 72n', 'Hi 63n', 'Hi 54n', 'Hi 45n', 'Hi 36n', 'n', 'Hi 27n', 'Hi 18n', 'Hi 9n', 'Hi 0n']


------------------------------------------------------------

match_substr = 'Hi 9'
EOF_NL     first_match='Hi 99n'
NO_EOF_NL  first_match='Hi 99'
DBL_EOF_NL first_match='Hi 99n'

EOF_NL     all_matches=['Hi 99n', 'Hi 90n', 'Hi 9n']
NO_EOF_NL  all_matches=['Hi 99', 'Hi 90n', 'Hi 9n']
DBL_EOF_NL all_matches=['Hi 99n', 'Hi 90n', 'Hi 9n']

------------------------------------------------------------

match_substr = 'n'
EOF_NL     len(lines_rev_from_iterator)=13
NO_EOF_NL  len(lines_rev_from_iterator)=12
DBL_EOF_NL len(lines_rev_from_iterator)=14

EOF_NL     lines_rev_from_iterator=['Hi 99n', 'Hi 90n', 'Hi 81n', 'Hi 72n', 'Hi 63n', 'Hi 54n', 'Hi 45n', 'Hi 36n', 'n', 'Hi 27n', 'Hi 18n', 'Hi 9n', 'Hi 0n']
NO_EOF_NL  lines_rev_from_iterator=['Hi 90n', 'Hi 81n', 'Hi 72n', 'Hi 63n', 'Hi 54n', 'Hi 45n', 'Hi 36n', 'n', 'Hi 27n', 'Hi 18n', 'Hi 9n', 'Hi 0n']
DBL_EOF_NL lines_rev_from_iterator=['n', 'Hi 99n', 'Hi 90n', 'Hi 81n', 'Hi 72n', 'Hi 63n', 'Hi 54n', 'Hi 45n', 'Hi 36n', 'n', 'Hi 27n', 'Hi 18n', 'Hi 9n', 'Hi 0n']

EOF_NL     first_match='Hi 99n'
NO_EOF_NL  first_match='Hi 90n'
DBL_EOF_NL first_match='n'

EOF_NL     all_matches=['Hi 99n', 'Hi 90n', 'Hi 81n', 'Hi 72n', 'Hi 63n', 'Hi 54n', 'Hi 45n', 'Hi 36n', 'n', 'Hi 27n', 'Hi 18n', 'Hi 9n', 'Hi 0n']
NO_EOF_NL  all_matches=['Hi 90n', 'Hi 81n', 'Hi 72n', 'Hi 63n', 'Hi 54n', 'Hi 45n', 'Hi 36n', 'n', 'Hi 27n', 'Hi 18n', 'Hi 9n', 'Hi 0n']
DBL_EOF_NL all_matches=['n', 'Hi 99n', 'Hi 90n', 'Hi 81n', 'Hi 72n', 'Hi 63n', 'Hi 54n', 'Hi 45n', 'Hi 36n', 'n', 'Hi 27n', 'Hi 18n', 'Hi 9n', 'Hi 0n']

------------------------------------------------------------
Answered By: Louis Maddox
Categories: questions Tags: , ,
Answers are sorted by their score. The answer accepted by the question owner as the best is marked with
at the top-right corner.