/celery/log.py

https://github.com/miracle2k/celery · Python · 176 lines · 107 code · 29 blank · 40 comment · 17 complexity · 0f337d3b538c99ffc5154004f1c1bf5f MD5 · raw file

  1. """celery.log"""
  2. import os
  3. import sys
  4. import time
  5. import logging
  6. import traceback
  7. from celery import conf
  8. from celery.patch import monkeypatch
  9. def get_default_logger(loglevel=None):
  10. """Get default logger instance.
  11. :keyword loglevel: Initial log level.
  12. """
  13. from multiprocessing.util import get_logger
  14. logger = get_logger()
  15. loglevel is not None and logger.setLevel(loglevel)
  16. return logger
  17. _monkeypatched = [False]
  18. def setup_logger(loglevel=conf.DAEMON_LOG_LEVEL, logfile=None,
  19. format=conf.LOG_FORMAT, **kwargs):
  20. """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
  21. ``stderr`` is used.
  22. Returns logger object.
  23. """
  24. if not _monkeypatched[0]:
  25. monkeypatch()
  26. _monkeypatched[0] = True
  27. logger = get_default_logger(loglevel=loglevel)
  28. if logger.handlers:
  29. # Logger already configured
  30. return logger
  31. if logfile:
  32. if hasattr(logfile, "write"):
  33. log_file_handler = logging.StreamHandler(logfile)
  34. else:
  35. log_file_handler = logging.FileHandler(logfile)
  36. formatter = logging.Formatter(format)
  37. log_file_handler.setFormatter(formatter)
  38. logger.addHandler(log_file_handler)
  39. else:
  40. import multiprocessing
  41. multiprocessing.log_to_stderr()
  42. return logger
  43. def emergency_error(logfile, message):
  44. """Emergency error logging, for when there's no standard file
  45. descriptors open because the process has been daemonized or for
  46. some other reason."""
  47. logfh_needs_to_close = False
  48. if not logfile:
  49. logfile = sys.__stderr__
  50. if hasattr(logfile, "write"):
  51. logfh = logfile
  52. else:
  53. logfh = open(logfile, "a")
  54. logfh_needs_to_close = True
  55. logfh.write("[%(asctime)s: FATAL/%(pid)d]: %(message)s\n" % {
  56. "asctime": time.asctime(),
  57. "pid": os.getpid(),
  58. "message": message})
  59. if logfh_needs_to_close:
  60. logfh.close()
  61. def redirect_stdouts_to_logger(logger, loglevel=None):
  62. """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
  63. logging instance.
  64. :param logger: The :class:`logging.Logger` instance to redirect to.
  65. :param loglevel: The loglevel redirected messages will be logged as.
  66. """
  67. proxy = LoggingProxy(logger, loglevel)
  68. sys.stdout = proxy
  69. sys.stderr = proxy
  70. return proxy
  71. class LoggingProxy(object):
  72. """Forward file object to :class:`logging.Logger` instance.
  73. :param logger: The :class:`logging.Logger` instance to forward to.
  74. :param loglevel: Loglevel to use when writing messages.
  75. """
  76. mode = "w"
  77. name = None
  78. closed = False
  79. loglevel = logging.INFO
  80. def __init__(self, logger, loglevel=None):
  81. self.logger = logger
  82. self.loglevel = loglevel or self.logger.level or self.loglevel
  83. self._safewrap_handlers()
  84. def _safewrap_handlers(self):
  85. """Make the logger handlers dump internal errors to
  86. ``sys.__stderr__`` instead of ``sys.stderr`` to circumvent
  87. infinite loops."""
  88. def wrap_handler(handler):
  89. class WithSafeHandleError(logging.Handler):
  90. def handleError(self, record):
  91. exc_info = sys.exc_info()
  92. try:
  93. traceback.print_exception(exc_info[0], exc_info[1],
  94. exc_info[2], None,
  95. sys.__stderr__)
  96. except IOError:
  97. pass # see python issue 5971
  98. finally:
  99. del(exc_info)
  100. handler.handleError = WithSafeHandleError().handleError
  101. return map(wrap_handler, self.logger.handlers)
  102. def write(self, data):
  103. """Write message to logging object."""
  104. if not self.closed:
  105. self.logger.log(self.loglevel, data)
  106. def writelines(self, sequence):
  107. """``writelines(sequence_of_strings) -> None``.
  108. Write the strings to the file.
  109. The sequence can be any iterable object producing strings.
  110. This is equivalent to calling :meth:`write` for each string.
  111. """
  112. map(self.write, sequence)
  113. def flush(self):
  114. """This object is not buffered so any :meth:`flush` requests
  115. are ignored."""
  116. pass
  117. def close(self):
  118. """When the object is closed, no write requests are forwarded to
  119. the logging object anymore."""
  120. self.closed = True
  121. def isatty(self):
  122. """Always returns ``False``. Just here for file support."""
  123. return False
  124. def fileno(self):
  125. return None
  126. class SilenceRepeated(object):
  127. """Only log action every n iterations."""
  128. def __init__(self, action, max_iterations=10):
  129. self.action = action
  130. self.max_iterations = max_iterations
  131. self._iterations = 0
  132. def __call__(self, *msgs):
  133. if self._iterations >= self.max_iterations:
  134. map(self.action, msgs)
  135. self._iterations = 0
  136. else:
  137. self._iterations += 1