Skip to content

Commit

Permalink
Don't break sinks with "enqueue=True" and "catch=False" on error (#833)
Browse files Browse the repository at this point in the history
  • Loading branch information
Delgan committed Mar 26, 2023
1 parent b9acbad commit e0ca15b
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 26 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
- Update ``InterceptHandler`` recipe to make it compatible with Python 3.11 (`#654 <https://github.com/Delgan/loguru/issues/654>`_).
- Add a new ``watch`` optional argument to file sinks in order to automatically re-create possibly deleted or changed file (`#471 <https://github.com/Delgan/loguru/issues/471>`_).
- Make ``patch()`` calls cumulative instead of overriding the possibly existing patching function (`#462 <https://github.com/Delgan/loguru/issues/462>`_).
- Make sinks added with ``enqueue=True`` and ``catch=False`` still process logged messages in case of internal exception (`#833 <https://github.com/Delgan/loguru/issues/833>`_).
- Avoid possible deadlocks caused by re-using the logger inside a sink, a signal handler or a ``__del__`` method. Since the logger is not re-entrant, such misuse will be detected and will now generate a ``RuntimeError`` (`#712 <https://github.com/Delgan/loguru/issues/712>`_, thanks `@jacksmith15 <https://github.com/jacksmith15>`_).
- Fix file sink rotation using an aware ``datetime.time`` for which the timezone was ignored (`#697 <https://github.com/Delgan/loguru/issues/697>`_).
- Fix logs colorization not automatically enabled for Jupyter Notebook and Google Colab (`#494 <https://github.com/Delgan/loguru/issues/494>`_).
Expand Down
12 changes: 0 additions & 12 deletions loguru/_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ def __init__(
self._confirmation_lock = None
self._owner_process_pid = None
self._thread = None
self._is_thread_dead = None

if self._is_formatter_dynamic:
if self._colorize:
Expand All @@ -91,7 +90,6 @@ def __init__(
self._confirmation_event = multiprocessing.Event()
self._confirmation_lock = multiprocessing.Lock()
self._owner_process_pid = os.getpid()
self._is_thread_dead = multiprocessing.Event()
self._thread = Thread(
target=self._queued_writer, daemon=True, name="loguru-writer-%d" % self._id
)
Expand Down Expand Up @@ -220,8 +218,6 @@ def complete_queue(self):
return

with self._confirmation_lock:
if self._is_thread_dead.is_set():
return
self._queue.put(True)
self._confirmation_event.wait()
self._confirmation_event.clear()
Expand Down Expand Up @@ -295,10 +291,6 @@ def _queued_writer(self):
message = queue.get()
except Exception:
with lock:
if not self._error_interceptor.should_catch():
self._is_thread_dead.set()
self._confirmation_event.set()
raise
self._error_interceptor.print(None)
continue

Expand All @@ -313,10 +305,6 @@ def _queued_writer(self):
try:
self._sink.write(message)
except Exception:
if not self._error_interceptor.should_catch():
self._is_thread_dead.set()
self._confirmation_event.set()
raise
self._error_interceptor.print(message.record)

def __getstate__(self):
Expand Down
2 changes: 1 addition & 1 deletion loguru/_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ def add(
Whether the exception trace should display the variables values to eases the debugging.
This should be set to ``False`` in production to avoid leaking sensitive data.
enqueue : |bool|, optional
Whether the messages to be logged should first pass through a multiprocess-safe queue
Whether the messages to be logged should first pass through a multiprocessing-safe queue
before reaching the sink. This is useful while logging to a file through multiple
processes. This also has the advantage of making logging calls non-blocking.
catch : |bool|, optional
Expand Down
2 changes: 1 addition & 1 deletion tests/test_add_option_catch.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,4 +140,4 @@ def broken_sink(m):
logger.info("B")
time.sleep(0.1)

assert called == 1
assert called == 2
32 changes: 20 additions & 12 deletions tests/test_add_option_enqueue.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,15 +146,17 @@ def test_not_caught_exception_queue_get(writer, capsys):
with default_threading_excepthook():
logger.info("It's fine")
logger.bind(broken=NotUnpicklable()).info("Bye bye...")
logger.info("It's not fine")
logger.info("It's fine again")
logger.remove()

out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\n"
assert writer.read() == "It's fine\nIt's fine again\n"
assert out == ""
assert lines[0].startswith("Exception")
assert lines[-1].endswith("UnpicklingError: You shall not de-serialize me!")
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert lines[1] == "Record was: None"
assert lines[-2].endswith("UnpicklingError: You shall not de-serialize me!")
assert lines[-1] == "--- End of logging error ---"


def test_not_caught_exception_sink_write(capsys):
Expand All @@ -163,14 +165,16 @@ def test_not_caught_exception_sink_write(capsys):
with default_threading_excepthook():
logger.info("It's fine")
logger.bind(fail=True).info("Bye bye...")
logger.info("It's not fine")
logger.info("It's fine again")
logger.remove()

out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == "It's fine\n"
assert lines[0].startswith("Exception")
assert lines[-1] == "RuntimeError: You asked me to fail..."
assert out == "It's fine\nIt's fine again\n"
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1])
assert lines[-2] == "RuntimeError: You asked me to fail..."
assert lines[-1] == "--- End of logging error ---"


def test_not_caught_exception_sink_write_then_complete(capsys):
Expand All @@ -185,8 +189,10 @@ def test_not_caught_exception_sink_write_then_complete(capsys):
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0].startswith("Exception")
assert lines[-1] == "RuntimeError: You asked me to fail..."
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1])
assert lines[-2] == "RuntimeError: You asked me to fail..."
assert lines[-1] == "--- End of logging error ---"


def test_not_caught_exception_queue_get_then_complete(writer, capsys):
Expand All @@ -202,8 +208,10 @@ def test_not_caught_exception_queue_get_then_complete(writer, capsys):
lines = err.strip().splitlines()
assert writer.read() == ""
assert out == ""
assert lines[0].startswith("Exception")
assert lines[-1].endswith("UnpicklingError: You shall not de-serialize me!")
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert lines[1] == "Record was: None"
assert lines[-2].endswith("UnpicklingError: You shall not de-serialize me!")
assert lines[-1] == "--- End of logging error ---"


def test_wait_for_all_messages_enqueued(capsys):
Expand Down

0 comments on commit e0ca15b

Please sign in to comment.