Best way to override lineno in Python logger
Question:
I have written a decorator which logs the arguments used to call a particular function or method. As shown below, it works well except that the line number reported in the logRecord
is the line number of the decorator rather than the line number of func
that is being wrapped:
from functools import wraps
import inspect
import logging
arg_log_fmt = "{name}({arg_str})"
def log_args(logger, level=logging.DEBUG):
"""Decorator to log arguments passed to func."""
def inner_func(func):
line_no = inspect.getsourcelines(func)[-1]
@wraps(func)
def return_func(*args, **kwargs):
arg_list = list("{!r}".format(arg) for arg in args)
arg_list.extend("{}={!r}".format(key, val)
for key, val in kwargs.iteritems())
msg = arg_log_fmt.format(name=func.__name__,
arg_str=", ".join(arg_list))
logger.log(level, msg)
return func(*args, **kwargs)
return return_func
return inner_func
if __name__ == "__main__":
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
fmt = "%(asctime)s %(levelname)-8.8s [%(name)s:%(lineno)4s] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
@log_args(logger)
def foo(x, y, z):
pass
class Bar(object):
@log_args(logger)
def baz(self, a, b, c):
pass
foo(1, 2, z=3)
foo(1, 2, 3)
foo(x=1, y=2, z=3)
bar = Bar()
bar.baz(1, c=3, b=2)
This sample results in the following output
2015-09-07 12:42:47,779 DEBUG [__main__: 25] foo(1, 2, z=3)
2015-09-07 12:42:47,779 DEBUG [__main__: 25] foo(1, 2, 3)
2015-09-07 12:42:47,779 DEBUG [__main__: 25] foo(y=2, x=1, z=3)
2015-09-07 12:42:47,779 DEBUG [__main__: 25] baz(<__main__.Bar object at 0x1029094d0>, 1, c=3, b=2)
Note that the line numbers all point to the decorator.
With inspect.getsourcelines(func)
I can get the line number I’m interested in, but an attempt to over-write lineno
in logger.debug
results in an error. What is the best approach to getting the line number of the wrapped function to appear in the logging statement?
Answers:
You can’t easily change the line number, because the Logger.findCaller()
method extracts this information via introspection.
You could re-build the function and code objects for the wrapper function you generate, but that is very hairy indeed (see the hoops I and Veedrac jump through on this post) and will lead to problems when you have a bug, as your traceback will show the wrong source lines!
You’d be better off adding the line number, as well as your module name (since that can differ too) to your log output manually:
arg_log_fmt = "{name}({arg_str}) in {filename}:{lineno}"
# ...
codeobj = func.__code__
msg = arg_log_fmt.format(
name=func.__name__, arg_str=", ".join(arg_list),
filename=codeobj.co_filename, lineno=codeobj.co_firstlineno)
Since you always have a function here, I used some more direct introspection to get the first line number for the function, via the associated code object.
As Martijn points out, things sometimes change. However, since you’re using Python 2 (the iteritems gave it away), the following code will work if you don’t mind monkey-patching logging:
from functools import wraps
import logging
class ArgLogger(object):
"""
Singleton class -- will only be instantiated once
because of the monkey-patching of logger.
"""
singleton = None
def __new__(cls):
self = cls.singleton
if self is not None:
return self
self = cls.singleton = super(ArgLogger, cls).__new__(cls)
self.code_location = None
# Do the monkey patch exactly one time
def findCaller(log_self):
self.code_location, code_location = None, self.code_location
if code_location is not None:
return code_location
return old_findCaller(log_self)
old_findCaller = logging.Logger.findCaller
logging.Logger.findCaller = findCaller
return self
def log_args(self, logger, level=logging.DEBUG):
"""Decorator to log arguments passed to func."""
def inner_func(func):
co = func.__code__
code_loc = (co.co_filename, co.co_firstlineno, co.co_name)
@wraps(func)
def return_func(*args, **kwargs):
arg_list = list("{!r}".format(arg) for arg in args)
arg_list.extend("{}={!r}".format(key, val)
for key, val in kwargs.iteritems())
msg = "{name}({arg_str})".format(name=func.__name__,
arg_str=", ".join(arg_list))
self.code_location = code_loc
logger.log(level, msg)
return func(*args, **kwargs)
return return_func
return inner_func
log_args = ArgLogger().log_args
if __name__ == "__main__":
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
fmt = "%(asctime)s %(levelname)-8.8s [%(name)s:%(lineno)4s] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
@log_args(logger)
def foo(x, y, z):
pass
class Bar(object):
@log_args(logger)
def baz(self, a, b, c):
pass
def test_regular_log():
logger.debug("Logging without ArgLog still works fine")
foo(1, 2, z=3)
foo(1, 2, 3)
foo(x=1, y=2, z=3)
bar = Bar()
bar.baz(1, c=3, b=2)
test_regular_log()
Another possibility is to subclass Logger
to override Logger.makeRecord
. This is the method that raises a KeyError
if you try to change any of the standard attributes (like rv.lineno
) in the LogRecord
:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
By removing this precaution, we can override the lineno value by supplying an
extra
argument to the logger.log
call:
logger.log(level, msg, extra=dict(lineno=line_no))
from functools import wraps
import inspect
import logging
arg_log_fmt = "{name}({arg_str})"
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
rv.__dict__.update(extra)
return rv
def log_args(logger, level=logging.DEBUG, cache=dict()):
"""Decorator to log arguments passed to func."""
logger_class = logger.__class__
if logger_class in cache:
UpdateableLogger = cache[logger_class]
else:
cache[logger_class] = UpdateableLogger = type(
'UpdateableLogger', (logger_class,), dict(makeRecord=makeRecord))
def inner_func(func):
line_no = inspect.getsourcelines(func)[-1]
@wraps(func)
def return_func(*args, **kwargs):
arg_list = list("{!r}".format(arg) for arg in args)
arg_list.extend("{}={!r}".format(key, val)
for key, val in kwargs.iteritems())
msg = arg_log_fmt.format(name=func.__name__,
arg_str=", ".join(arg_list))
logger.__class__ = UpdateableLogger
try:
logger.log(level, msg, extra=dict(lineno=line_no))
finally:
logger.__class__ = logger_class
return func(*args, **kwargs)
return return_func
return inner_func
if __name__ == "__main__":
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
fmt = "%(asctime)s %(levelname)-8.8s [%(name)s:%(lineno)4s] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
@log_args(logger)
def foo(x, y, z):
pass
class Bar(object):
@log_args(logger)
def baz(self, a, b, c):
pass
foo(1, 2, z=3)
foo(1, 2, 3)
foo(x=1, y=2, z=3)
bar = Bar()
bar.baz(1, c=3, b=2)
yields
2015-09-07 16:01:22,332 DEBUG [__main__: 48] foo(1, 2, z=3)
2015-09-07 16:01:22,332 DEBUG [__main__: 48] foo(1, 2, 3)
2015-09-07 16:01:22,332 DEBUG [__main__: 48] foo(y=2, x=1, z=3)
2015-09-07 16:01:22,332 DEBUG [__main__: 53] baz(<__main__.Bar object at 0x7f17f75b0490>, 1, c=3, b=2)
The line
UpdateableLogger = type('UpdateableLogger', (type(logger),),
dict(makeRecord=makeRecord))
creates a new class which is a subclass of type(logger)
which overrides makeRecord
.
Inside return_func
, the logger
‘s class is changed to UpdateableLogger
so the call to logger.log
can modify lineno
and then the original logger class is restored.
By doing it this way — by avoiding monkey-patching Logger.makeRecord
— all logger
s behave exactly as before outside the decorated functions.
For comparison, the monkey-patching approach is shown here.
This is an old post, but this answer might still be useful for someone else.
One problem with the existing solutions is that there are multiple parameters providing logging context, and all of these would need to be patched if you want to support arbitrary logging formatters.
It turns out that this was raised as an issue with the Python logging library about a year ago, and as a result, the stacklevel
keyword argument was added in Python 3.8. With that feature, you could just modify your logging call to set the stacklevel to 2 (one level above where logger.log
is called in your example):
logger.log(level, msg, stacklevel=2)
Since Python 3.8 isn’t out yet (at the time of this response), you can monkey-patch your logger with the findCaller
and _log
methods updated in Python 3.8.
I have a logging utility library called logquacious, where I do the same sort of monkey-patching. You can reuse the patch_logger
class that I’ve defined in logquacious and update your logging example above with:
from logquacious.backport_configurable_stacklevel import patch_logger
logger = logging.getLogger(__name__)
logger.__class__ = patch_logger(logger.__class__)
As mentioned in unutbu’s answer, it might be a good idea to undo this monkey-patching outside of the scope where it’s used, which is what some of the other code in that file does.
Firstly, let’s have a look at how this exception happens in makeRecord:
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
In makeRecord function, if the key is already defined in rv, it will throw a KeyError.
One simple and elegant way is to override makeRecord function, of course. But I would rather recommend you to override record factory function, which is also the recommend method for inject your own values into a LogRecord at creation time, as shown by the following pattern:
old_factory = logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
record = old_factory(*args, **kwargs)
record.custom_attribute = 0xdecafbad
return record
logging.setLogRecordFactory(record_factory)
Also inspired by methods to modify frame.f_locals, we can accessory the extra in makeRecord and use it to update and then set extra to None to escape from check.
old_factory = logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
# get extra
frame = inspect.currentframe().f_back
extra = frame.f_locals['extra']
# update
record = old_factory(*args, **kwargs)
for ii, (key, value) in enumerate(extra.items()):
exec('record.{} = value'.format(key))
# reset extra to None
frame.f_locals.update(dict(extra=None))
ctypes.pythonapi.PyFrame_LocalsToFast(
ctypes.py_object(frame), ctypes.c_int(0))
return record
logging.setLogRecordFactory(record_factory)
I have written a decorator which logs the arguments used to call a particular function or method. As shown below, it works well except that the line number reported in the logRecord
is the line number of the decorator rather than the line number of func
that is being wrapped:
from functools import wraps
import inspect
import logging
arg_log_fmt = "{name}({arg_str})"
def log_args(logger, level=logging.DEBUG):
"""Decorator to log arguments passed to func."""
def inner_func(func):
line_no = inspect.getsourcelines(func)[-1]
@wraps(func)
def return_func(*args, **kwargs):
arg_list = list("{!r}".format(arg) for arg in args)
arg_list.extend("{}={!r}".format(key, val)
for key, val in kwargs.iteritems())
msg = arg_log_fmt.format(name=func.__name__,
arg_str=", ".join(arg_list))
logger.log(level, msg)
return func(*args, **kwargs)
return return_func
return inner_func
if __name__ == "__main__":
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
fmt = "%(asctime)s %(levelname)-8.8s [%(name)s:%(lineno)4s] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
@log_args(logger)
def foo(x, y, z):
pass
class Bar(object):
@log_args(logger)
def baz(self, a, b, c):
pass
foo(1, 2, z=3)
foo(1, 2, 3)
foo(x=1, y=2, z=3)
bar = Bar()
bar.baz(1, c=3, b=2)
This sample results in the following output
2015-09-07 12:42:47,779 DEBUG [__main__: 25] foo(1, 2, z=3)
2015-09-07 12:42:47,779 DEBUG [__main__: 25] foo(1, 2, 3)
2015-09-07 12:42:47,779 DEBUG [__main__: 25] foo(y=2, x=1, z=3)
2015-09-07 12:42:47,779 DEBUG [__main__: 25] baz(<__main__.Bar object at 0x1029094d0>, 1, c=3, b=2)
Note that the line numbers all point to the decorator.
With inspect.getsourcelines(func)
I can get the line number I’m interested in, but an attempt to over-write lineno
in logger.debug
results in an error. What is the best approach to getting the line number of the wrapped function to appear in the logging statement?
You can’t easily change the line number, because the Logger.findCaller()
method extracts this information via introspection.
You could re-build the function and code objects for the wrapper function you generate, but that is very hairy indeed (see the hoops I and Veedrac jump through on this post) and will lead to problems when you have a bug, as your traceback will show the wrong source lines!
You’d be better off adding the line number, as well as your module name (since that can differ too) to your log output manually:
arg_log_fmt = "{name}({arg_str}) in {filename}:{lineno}"
# ...
codeobj = func.__code__
msg = arg_log_fmt.format(
name=func.__name__, arg_str=", ".join(arg_list),
filename=codeobj.co_filename, lineno=codeobj.co_firstlineno)
Since you always have a function here, I used some more direct introspection to get the first line number for the function, via the associated code object.
As Martijn points out, things sometimes change. However, since you’re using Python 2 (the iteritems gave it away), the following code will work if you don’t mind monkey-patching logging:
from functools import wraps
import logging
class ArgLogger(object):
"""
Singleton class -- will only be instantiated once
because of the monkey-patching of logger.
"""
singleton = None
def __new__(cls):
self = cls.singleton
if self is not None:
return self
self = cls.singleton = super(ArgLogger, cls).__new__(cls)
self.code_location = None
# Do the monkey patch exactly one time
def findCaller(log_self):
self.code_location, code_location = None, self.code_location
if code_location is not None:
return code_location
return old_findCaller(log_self)
old_findCaller = logging.Logger.findCaller
logging.Logger.findCaller = findCaller
return self
def log_args(self, logger, level=logging.DEBUG):
"""Decorator to log arguments passed to func."""
def inner_func(func):
co = func.__code__
code_loc = (co.co_filename, co.co_firstlineno, co.co_name)
@wraps(func)
def return_func(*args, **kwargs):
arg_list = list("{!r}".format(arg) for arg in args)
arg_list.extend("{}={!r}".format(key, val)
for key, val in kwargs.iteritems())
msg = "{name}({arg_str})".format(name=func.__name__,
arg_str=", ".join(arg_list))
self.code_location = code_loc
logger.log(level, msg)
return func(*args, **kwargs)
return return_func
return inner_func
log_args = ArgLogger().log_args
if __name__ == "__main__":
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
fmt = "%(asctime)s %(levelname)-8.8s [%(name)s:%(lineno)4s] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
@log_args(logger)
def foo(x, y, z):
pass
class Bar(object):
@log_args(logger)
def baz(self, a, b, c):
pass
def test_regular_log():
logger.debug("Logging without ArgLog still works fine")
foo(1, 2, z=3)
foo(1, 2, 3)
foo(x=1, y=2, z=3)
bar = Bar()
bar.baz(1, c=3, b=2)
test_regular_log()
Another possibility is to subclass Logger
to override Logger.makeRecord
. This is the method that raises a KeyError
if you try to change any of the standard attributes (like rv.lineno
) in the LogRecord
:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
By removing this precaution, we can override the lineno value by supplying an
extra
argument to the logger.log
call:
logger.log(level, msg, extra=dict(lineno=line_no))
from functools import wraps
import inspect
import logging
arg_log_fmt = "{name}({arg_str})"
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
rv.__dict__.update(extra)
return rv
def log_args(logger, level=logging.DEBUG, cache=dict()):
"""Decorator to log arguments passed to func."""
logger_class = logger.__class__
if logger_class in cache:
UpdateableLogger = cache[logger_class]
else:
cache[logger_class] = UpdateableLogger = type(
'UpdateableLogger', (logger_class,), dict(makeRecord=makeRecord))
def inner_func(func):
line_no = inspect.getsourcelines(func)[-1]
@wraps(func)
def return_func(*args, **kwargs):
arg_list = list("{!r}".format(arg) for arg in args)
arg_list.extend("{}={!r}".format(key, val)
for key, val in kwargs.iteritems())
msg = arg_log_fmt.format(name=func.__name__,
arg_str=", ".join(arg_list))
logger.__class__ = UpdateableLogger
try:
logger.log(level, msg, extra=dict(lineno=line_no))
finally:
logger.__class__ = logger_class
return func(*args, **kwargs)
return return_func
return inner_func
if __name__ == "__main__":
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
fmt = "%(asctime)s %(levelname)-8.8s [%(name)s:%(lineno)4s] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
@log_args(logger)
def foo(x, y, z):
pass
class Bar(object):
@log_args(logger)
def baz(self, a, b, c):
pass
foo(1, 2, z=3)
foo(1, 2, 3)
foo(x=1, y=2, z=3)
bar = Bar()
bar.baz(1, c=3, b=2)
yields
2015-09-07 16:01:22,332 DEBUG [__main__: 48] foo(1, 2, z=3)
2015-09-07 16:01:22,332 DEBUG [__main__: 48] foo(1, 2, 3)
2015-09-07 16:01:22,332 DEBUG [__main__: 48] foo(y=2, x=1, z=3)
2015-09-07 16:01:22,332 DEBUG [__main__: 53] baz(<__main__.Bar object at 0x7f17f75b0490>, 1, c=3, b=2)
The line
UpdateableLogger = type('UpdateableLogger', (type(logger),),
dict(makeRecord=makeRecord))
creates a new class which is a subclass of type(logger)
which overrides makeRecord
.
Inside return_func
, the logger
‘s class is changed to UpdateableLogger
so the call to logger.log
can modify lineno
and then the original logger class is restored.
By doing it this way — by avoiding monkey-patching Logger.makeRecord
— all logger
s behave exactly as before outside the decorated functions.
For comparison, the monkey-patching approach is shown here.
This is an old post, but this answer might still be useful for someone else.
One problem with the existing solutions is that there are multiple parameters providing logging context, and all of these would need to be patched if you want to support arbitrary logging formatters.
It turns out that this was raised as an issue with the Python logging library about a year ago, and as a result, the stacklevel
keyword argument was added in Python 3.8. With that feature, you could just modify your logging call to set the stacklevel to 2 (one level above where logger.log
is called in your example):
logger.log(level, msg, stacklevel=2)
Since Python 3.8 isn’t out yet (at the time of this response), you can monkey-patch your logger with the findCaller
and _log
methods updated in Python 3.8.
I have a logging utility library called logquacious, where I do the same sort of monkey-patching. You can reuse the patch_logger
class that I’ve defined in logquacious and update your logging example above with:
from logquacious.backport_configurable_stacklevel import patch_logger
logger = logging.getLogger(__name__)
logger.__class__ = patch_logger(logger.__class__)
As mentioned in unutbu’s answer, it might be a good idea to undo this monkey-patching outside of the scope where it’s used, which is what some of the other code in that file does.
Firstly, let’s have a look at how this exception happens in makeRecord:
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
In makeRecord function, if the key is already defined in rv, it will throw a KeyError.
One simple and elegant way is to override makeRecord function, of course. But I would rather recommend you to override record factory function, which is also the recommend method for inject your own values into a LogRecord at creation time, as shown by the following pattern:
old_factory = logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
record = old_factory(*args, **kwargs)
record.custom_attribute = 0xdecafbad
return record
logging.setLogRecordFactory(record_factory)
Also inspired by methods to modify frame.f_locals, we can accessory the extra in makeRecord and use it to update and then set extra to None to escape from check.
old_factory = logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
# get extra
frame = inspect.currentframe().f_back
extra = frame.f_locals['extra']
# update
record = old_factory(*args, **kwargs)
for ii, (key, value) in enumerate(extra.items()):
exec('record.{} = value'.format(key))
# reset extra to None
frame.f_locals.update(dict(extra=None))
ctypes.pythonapi.PyFrame_LocalsToFast(
ctypes.py_object(frame), ctypes.c_int(0))
return record
logging.setLogRecordFactory(record_factory)