/Lib/logging/handlers.py

http://unladen-swallow.googlecode.com/ · Python · 1144 lines · 738 code · 44 blank · 362 comment · 115 complexity · 7a90891efc25603a9a6556a969459ddf MD5 · raw file

  1. # Copyright 2001-2007 by Vinay Sajip. All Rights Reserved.
  2. #
  3. # Permission to use, copy, modify, and distribute this software and its
  4. # documentation for any purpose and without fee is hereby granted,
  5. # provided that the above copyright notice appear in all copies and that
  6. # both that copyright notice and this permission notice appear in
  7. # supporting documentation, and that the name of Vinay Sajip
  8. # not be used in advertising or publicity pertaining to distribution
  9. # of the software without specific, written prior permission.
  10. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
  11. # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
  12. # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
  13. # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
  14. # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
  15. # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. """
  17. Additional handlers for the logging package for Python. The core package is
  18. based on PEP 282 and comments thereto in comp.lang.python, and influenced by
  19. Apache's log4j system.
  20. Copyright (C) 2001-2009 Vinay Sajip. All Rights Reserved.
  21. To use, simply 'import logging.handlers' and log away!
  22. """
  23. import logging, socket, types, os, string, cPickle, struct, time, re
  24. from stat import ST_DEV, ST_INO
  25. try:
  26. import codecs
  27. except ImportError:
  28. codecs = None
  29. #
  30. # Some constants...
  31. #
  32. DEFAULT_TCP_LOGGING_PORT = 9020
  33. DEFAULT_UDP_LOGGING_PORT = 9021
  34. DEFAULT_HTTP_LOGGING_PORT = 9022
  35. DEFAULT_SOAP_LOGGING_PORT = 9023
  36. SYSLOG_UDP_PORT = 514
  37. _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
  38. class BaseRotatingHandler(logging.FileHandler):
  39. """
  40. Base class for handlers that rotate log files at a certain point.
  41. Not meant to be instantiated directly. Instead, use RotatingFileHandler
  42. or TimedRotatingFileHandler.
  43. """
  44. def __init__(self, filename, mode, encoding=None, delay=0):
  45. """
  46. Use the specified filename for streamed logging
  47. """
  48. if codecs is None:
  49. encoding = None
  50. logging.FileHandler.__init__(self, filename, mode, encoding, delay)
  51. self.mode = mode
  52. self.encoding = encoding
  53. def emit(self, record):
  54. """
  55. Emit a record.
  56. Output the record to the file, catering for rollover as described
  57. in doRollover().
  58. """
  59. try:
  60. if self.shouldRollover(record):
  61. self.doRollover()
  62. logging.FileHandler.emit(self, record)
  63. except (KeyboardInterrupt, SystemExit):
  64. raise
  65. except:
  66. self.handleError(record)
  67. class RotatingFileHandler(BaseRotatingHandler):
  68. """
  69. Handler for logging to a set of files, which switches from one file
  70. to the next when the current file reaches a certain size.
  71. """
  72. def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
  73. """
  74. Open the specified file and use it as the stream for logging.
  75. By default, the file grows indefinitely. You can specify particular
  76. values of maxBytes and backupCount to allow the file to rollover at
  77. a predetermined size.
  78. Rollover occurs whenever the current log file is nearly maxBytes in
  79. length. If backupCount is >= 1, the system will successively create
  80. new files with the same pathname as the base file, but with extensions
  81. ".1", ".2" etc. appended to it. For example, with a backupCount of 5
  82. and a base file name of "app.log", you would get "app.log",
  83. "app.log.1", "app.log.2", ... through to "app.log.5". The file being
  84. written to is always "app.log" - when it gets filled up, it is closed
  85. and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
  86. exist, then they are renamed to "app.log.2", "app.log.3" etc.
  87. respectively.
  88. If maxBytes is zero, rollover never occurs.
  89. """
  90. if maxBytes > 0:
  91. mode = 'a' # doesn't make sense otherwise!
  92. BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
  93. self.maxBytes = maxBytes
  94. self.backupCount = backupCount
  95. def doRollover(self):
  96. """
  97. Do a rollover, as described in __init__().
  98. """
  99. self.stream.close()
  100. if self.backupCount > 0:
  101. for i in range(self.backupCount - 1, 0, -1):
  102. sfn = "%s.%d" % (self.baseFilename, i)
  103. dfn = "%s.%d" % (self.baseFilename, i + 1)
  104. if os.path.exists(sfn):
  105. #print "%s -> %s" % (sfn, dfn)
  106. if os.path.exists(dfn):
  107. os.remove(dfn)
  108. os.rename(sfn, dfn)
  109. dfn = self.baseFilename + ".1"
  110. if os.path.exists(dfn):
  111. os.remove(dfn)
  112. os.rename(self.baseFilename, dfn)
  113. #print "%s -> %s" % (self.baseFilename, dfn)
  114. self.mode = 'w'
  115. self.stream = self._open()
  116. def shouldRollover(self, record):
  117. """
  118. Determine if rollover should occur.
  119. Basically, see if the supplied record would cause the file to exceed
  120. the size limit we have.
  121. """
  122. if self.stream is None: # delay was set...
  123. self.stream = self._open()
  124. if self.maxBytes > 0: # are we rolling over?
  125. msg = "%s\n" % self.format(record)
  126. self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
  127. if self.stream.tell() + len(msg) >= self.maxBytes:
  128. return 1
  129. return 0
  130. class TimedRotatingFileHandler(BaseRotatingHandler):
  131. """
  132. Handler for logging to a file, rotating the log file at certain timed
  133. intervals.
  134. If backupCount is > 0, when rollover is done, no more than backupCount
  135. files are kept - the oldest ones are deleted.
  136. """
  137. def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=0, utc=0):
  138. BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
  139. self.when = string.upper(when)
  140. self.backupCount = backupCount
  141. self.utc = utc
  142. # Calculate the real rollover interval, which is just the number of
  143. # seconds between rollovers. Also set the filename suffix used when
  144. # a rollover occurs. Current 'when' events supported:
  145. # S - Seconds
  146. # M - Minutes
  147. # H - Hours
  148. # D - Days
  149. # midnight - roll over at midnight
  150. # W{0-6} - roll over on a certain day; 0 - Monday
  151. #
  152. # Case of the 'when' specifier is not important; lower or upper case
  153. # will work.
  154. currentTime = int(time.time())
  155. if self.when == 'S':
  156. self.interval = 1 # one second
  157. self.suffix = "%Y-%m-%d_%H-%M-%S"
  158. self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
  159. elif self.when == 'M':
  160. self.interval = 60 # one minute
  161. self.suffix = "%Y-%m-%d_%H-%M"
  162. self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
  163. elif self.when == 'H':
  164. self.interval = 60 * 60 # one hour
  165. self.suffix = "%Y-%m-%d_%H"
  166. self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
  167. elif self.when == 'D' or self.when == 'MIDNIGHT':
  168. self.interval = 60 * 60 * 24 # one day
  169. self.suffix = "%Y-%m-%d"
  170. self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
  171. elif self.when.startswith('W'):
  172. self.interval = 60 * 60 * 24 * 7 # one week
  173. if len(self.when) != 2:
  174. raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
  175. if self.when[1] < '0' or self.when[1] > '6':
  176. raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
  177. self.dayOfWeek = int(self.when[1])
  178. self.suffix = "%Y-%m-%d"
  179. self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
  180. else:
  181. raise ValueError("Invalid rollover interval specified: %s" % self.when)
  182. self.extMatch = re.compile(self.extMatch)
  183. self.interval = self.interval * interval # multiply by units requested
  184. self.rolloverAt = self.computeRollover(int(time.time()))
  185. #print "Will rollover at %d, %d seconds from now" % (self.rolloverAt, self.rolloverAt - currentTime)
  186. def computeRollover(self, currentTime):
  187. """
  188. Work out the rollover time based on the specified time.
  189. """
  190. result = currentTime + self.interval
  191. # If we are rolling over at midnight or weekly, then the interval is already known.
  192. # What we need to figure out is WHEN the next interval is. In other words,
  193. # if you are rolling over at midnight, then your base interval is 1 day,
  194. # but you want to start that one day clock at midnight, not now. So, we
  195. # have to fudge the rolloverAt value in order to trigger the first rollover
  196. # at the right time. After that, the regular interval will take care of
  197. # the rest. Note that this code doesn't care about leap seconds. :)
  198. if self.when == 'MIDNIGHT' or self.when.startswith('W'):
  199. # This could be done with less code, but I wanted it to be clear
  200. if self.utc:
  201. t = time.gmtime(currentTime)
  202. else:
  203. t = time.localtime(currentTime)
  204. currentHour = t[3]
  205. currentMinute = t[4]
  206. currentSecond = t[5]
  207. # r is the number of seconds left between now and midnight
  208. r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
  209. currentSecond)
  210. result = currentTime + r
  211. # If we are rolling over on a certain day, add in the number of days until
  212. # the next rollover, but offset by 1 since we just calculated the time
  213. # until the next day starts. There are three cases:
  214. # Case 1) The day to rollover is today; in this case, do nothing
  215. # Case 2) The day to rollover is further in the interval (i.e., today is
  216. # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
  217. # next rollover is simply 6 - 2 - 1, or 3.
  218. # Case 3) The day to rollover is behind us in the interval (i.e., today
  219. # is day 5 (Saturday) and rollover is on day 3 (Thursday).
  220. # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
  221. # number of days left in the current week (1) plus the number
  222. # of days in the next week until the rollover day (3).
  223. # The calculations described in 2) and 3) above need to have a day added.
  224. # This is because the above time calculation takes us to midnight on this
  225. # day, i.e. the start of the next day.
  226. if self.when.startswith('W'):
  227. day = t[6] # 0 is Monday
  228. if day != self.dayOfWeek:
  229. if day < self.dayOfWeek:
  230. daysToWait = self.dayOfWeek - day
  231. else:
  232. daysToWait = 6 - day + self.dayOfWeek + 1
  233. newRolloverAt = result + (daysToWait * (60 * 60 * 24))
  234. if not self.utc:
  235. dstNow = t[-1]
  236. dstAtRollover = time.localtime(newRolloverAt)[-1]
  237. if dstNow != dstAtRollover:
  238. if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
  239. newRolloverAt = newRolloverAt - 3600
  240. else: # DST bows out before next rollover, so we need to add an hour
  241. newRolloverAt = newRolloverAt + 3600
  242. result = newRolloverAt
  243. return result
  244. def shouldRollover(self, record):
  245. """
  246. Determine if rollover should occur.
  247. record is not used, as we are just comparing times, but it is needed so
  248. the method signatures are the same
  249. """
  250. t = int(time.time())
  251. if t >= self.rolloverAt:
  252. return 1
  253. #print "No need to rollover: %d, %d" % (t, self.rolloverAt)
  254. return 0
  255. def getFilesToDelete(self):
  256. """
  257. Determine the files to delete when rolling over.
  258. More specific than the earlier method, which just used glob.glob().
  259. """
  260. dirName, baseName = os.path.split(self.baseFilename)
  261. fileNames = os.listdir(dirName)
  262. result = []
  263. prefix = baseName + "."
  264. plen = len(prefix)
  265. for fileName in fileNames:
  266. if fileName[:plen] == prefix:
  267. suffix = fileName[plen:]
  268. if self.extMatch.match(suffix):
  269. result.append(os.path.join(dirName, fileName))
  270. result.sort()
  271. if len(result) < self.backupCount:
  272. result = []
  273. else:
  274. result = result[:len(result) - self.backupCount]
  275. return result
  276. def doRollover(self):
  277. """
  278. do a rollover; in this case, a date/time stamp is appended to the filename
  279. when the rollover happens. However, you want the file to be named for the
  280. start of the interval, not the current time. If there is a backup count,
  281. then we have to get a list of matching filenames, sort them and remove
  282. the one with the oldest suffix.
  283. """
  284. if self.stream:
  285. self.stream.close()
  286. # get the time that this sequence started at and make it a TimeTuple
  287. t = self.rolloverAt - self.interval
  288. if self.utc:
  289. timeTuple = time.gmtime(t)
  290. else:
  291. timeTuple = time.localtime(t)
  292. dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
  293. if os.path.exists(dfn):
  294. os.remove(dfn)
  295. os.rename(self.baseFilename, dfn)
  296. if self.backupCount > 0:
  297. # find the oldest log file and delete it
  298. #s = glob.glob(self.baseFilename + ".20*")
  299. #if len(s) > self.backupCount:
  300. # s.sort()
  301. # os.remove(s[0])
  302. for s in self.getFilesToDelete():
  303. os.remove(s)
  304. #print "%s -> %s" % (self.baseFilename, dfn)
  305. self.mode = 'w'
  306. self.stream = self._open()
  307. currentTime = int(time.time())
  308. newRolloverAt = self.computeRollover(currentTime)
  309. while newRolloverAt <= currentTime:
  310. newRolloverAt = newRolloverAt + self.interval
  311. #If DST changes and midnight or weekly rollover, adjust for this.
  312. if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
  313. dstNow = time.localtime(currentTime)[-1]
  314. dstAtRollover = time.localtime(newRolloverAt)[-1]
  315. if dstNow != dstAtRollover:
  316. if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
  317. newRolloverAt = newRolloverAt - 3600
  318. else: # DST bows out before next rollover, so we need to add an hour
  319. newRolloverAt = newRolloverAt + 3600
  320. self.rolloverAt = newRolloverAt
  321. class WatchedFileHandler(logging.FileHandler):
  322. """
  323. A handler for logging to a file, which watches the file
  324. to see if it has changed while in use. This can happen because of
  325. usage of programs such as newsyslog and logrotate which perform
  326. log file rotation. This handler, intended for use under Unix,
  327. watches the file to see if it has changed since the last emit.
  328. (A file has changed if its device or inode have changed.)
  329. If it has changed, the old file stream is closed, and the file
  330. opened to get a new stream.
  331. This handler is not appropriate for use under Windows, because
  332. under Windows open files cannot be moved or renamed - logging
  333. opens the files with exclusive locks - and so there is no need
  334. for such a handler. Furthermore, ST_INO is not supported under
  335. Windows; stat always returns zero for this value.
  336. This handler is based on a suggestion and patch by Chad J.
  337. Schroeder.
  338. """
  339. def __init__(self, filename, mode='a', encoding=None, delay=0):
  340. logging.FileHandler.__init__(self, filename, mode, encoding, delay)
  341. if not os.path.exists(self.baseFilename):
  342. self.dev, self.ino = -1, -1
  343. else:
  344. stat = os.stat(self.baseFilename)
  345. self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
  346. def emit(self, record):
  347. """
  348. Emit a record.
  349. First check if the underlying file has changed, and if it
  350. has, close the old stream and reopen the file to get the
  351. current stream.
  352. """
  353. if not os.path.exists(self.baseFilename):
  354. stat = None
  355. changed = 1
  356. else:
  357. stat = os.stat(self.baseFilename)
  358. changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
  359. if changed and self.stream is not None:
  360. self.stream.flush()
  361. self.stream.close()
  362. self.stream = self._open()
  363. if stat is None:
  364. stat = os.stat(self.baseFilename)
  365. self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
  366. logging.FileHandler.emit(self, record)
  367. class SocketHandler(logging.Handler):
  368. """
  369. A handler class which writes logging records, in pickle format, to
  370. a streaming socket. The socket is kept open across logging calls.
  371. If the peer resets it, an attempt is made to reconnect on the next call.
  372. The pickle which is sent is that of the LogRecord's attribute dictionary
  373. (__dict__), so that the receiver does not need to have the logging module
  374. installed in order to process the logging event.
  375. To unpickle the record at the receiving end into a LogRecord, use the
  376. makeLogRecord function.
  377. """
  378. def __init__(self, host, port):
  379. """
  380. Initializes the handler with a specific host address and port.
  381. The attribute 'closeOnError' is set to 1 - which means that if
  382. a socket error occurs, the socket is silently closed and then
  383. reopened on the next logging call.
  384. """
  385. logging.Handler.__init__(self)
  386. self.host = host
  387. self.port = port
  388. self.sock = None
  389. self.closeOnError = 0
  390. self.retryTime = None
  391. #
  392. # Exponential backoff parameters.
  393. #
  394. self.retryStart = 1.0
  395. self.retryMax = 30.0
  396. self.retryFactor = 2.0
  397. def makeSocket(self, timeout=1):
  398. """
  399. A factory method which allows subclasses to define the precise
  400. type of socket they want.
  401. """
  402. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  403. if hasattr(s, 'settimeout'):
  404. s.settimeout(timeout)
  405. s.connect((self.host, self.port))
  406. return s
  407. def createSocket(self):
  408. """
  409. Try to create a socket, using an exponential backoff with
  410. a max retry time. Thanks to Robert Olson for the original patch
  411. (SF #815911) which has been slightly refactored.
  412. """
  413. now = time.time()
  414. # Either retryTime is None, in which case this
  415. # is the first time back after a disconnect, or
  416. # we've waited long enough.
  417. if self.retryTime is None:
  418. attempt = 1
  419. else:
  420. attempt = (now >= self.retryTime)
  421. if attempt:
  422. try:
  423. self.sock = self.makeSocket()
  424. self.retryTime = None # next time, no delay before trying
  425. except socket.error:
  426. #Creation failed, so set the retry time and return.
  427. if self.retryTime is None:
  428. self.retryPeriod = self.retryStart
  429. else:
  430. self.retryPeriod = self.retryPeriod * self.retryFactor
  431. if self.retryPeriod > self.retryMax:
  432. self.retryPeriod = self.retryMax
  433. self.retryTime = now + self.retryPeriod
  434. def send(self, s):
  435. """
  436. Send a pickled string to the socket.
  437. This function allows for partial sends which can happen when the
  438. network is busy.
  439. """
  440. if self.sock is None:
  441. self.createSocket()
  442. #self.sock can be None either because we haven't reached the retry
  443. #time yet, or because we have reached the retry time and retried,
  444. #but are still unable to connect.
  445. if self.sock:
  446. try:
  447. if hasattr(self.sock, "sendall"):
  448. self.sock.sendall(s)
  449. else:
  450. sentsofar = 0
  451. left = len(s)
  452. while left > 0:
  453. sent = self.sock.send(s[sentsofar:])
  454. sentsofar = sentsofar + sent
  455. left = left - sent
  456. except socket.error:
  457. self.sock.close()
  458. self.sock = None # so we can call createSocket next time
  459. def makePickle(self, record):
  460. """
  461. Pickles the record in binary format with a length prefix, and
  462. returns it ready for transmission across the socket.
  463. """
  464. ei = record.exc_info
  465. if ei:
  466. dummy = self.format(record) # just to get traceback text into record.exc_text
  467. record.exc_info = None # to avoid Unpickleable error
  468. s = cPickle.dumps(record.__dict__, 1)
  469. if ei:
  470. record.exc_info = ei # for next handler
  471. slen = struct.pack(">L", len(s))
  472. return slen + s
  473. def handleError(self, record):
  474. """
  475. Handle an error during logging.
  476. An error has occurred during logging. Most likely cause -
  477. connection lost. Close the socket so that we can retry on the
  478. next event.
  479. """
  480. if self.closeOnError and self.sock:
  481. self.sock.close()
  482. self.sock = None #try to reconnect next time
  483. else:
  484. logging.Handler.handleError(self, record)
  485. def emit(self, record):
  486. """
  487. Emit a record.
  488. Pickles the record and writes it to the socket in binary format.
  489. If there is an error with the socket, silently drop the packet.
  490. If there was a problem with the socket, re-establishes the
  491. socket.
  492. """
  493. try:
  494. s = self.makePickle(record)
  495. self.send(s)
  496. except (KeyboardInterrupt, SystemExit):
  497. raise
  498. except:
  499. self.handleError(record)
  500. def close(self):
  501. """
  502. Closes the socket.
  503. """
  504. if self.sock:
  505. self.sock.close()
  506. self.sock = None
  507. logging.Handler.close(self)
  508. class DatagramHandler(SocketHandler):
  509. """
  510. A handler class which writes logging records, in pickle format, to
  511. a datagram socket. The pickle which is sent is that of the LogRecord's
  512. attribute dictionary (__dict__), so that the receiver does not need to
  513. have the logging module installed in order to process the logging event.
  514. To unpickle the record at the receiving end into a LogRecord, use the
  515. makeLogRecord function.
  516. """
  517. def __init__(self, host, port):
  518. """
  519. Initializes the handler with a specific host address and port.
  520. """
  521. SocketHandler.__init__(self, host, port)
  522. self.closeOnError = 0
  523. def makeSocket(self):
  524. """
  525. The factory method of SocketHandler is here overridden to create
  526. a UDP socket (SOCK_DGRAM).
  527. """
  528. s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
  529. return s
  530. def send(self, s):
  531. """
  532. Send a pickled string to a socket.
  533. This function no longer allows for partial sends which can happen
  534. when the network is busy - UDP does not guarantee delivery and
  535. can deliver packets out of sequence.
  536. """
  537. if self.sock is None:
  538. self.createSocket()
  539. self.sock.sendto(s, (self.host, self.port))
  540. class SysLogHandler(logging.Handler):
  541. """
  542. A handler class which sends formatted logging records to a syslog
  543. server. Based on Sam Rushing's syslog module:
  544. http://www.nightmare.com/squirl/python-ext/misc/syslog.py
  545. Contributed by Nicolas Untz (after which minor refactoring changes
  546. have been made).
  547. """
  548. # from <linux/sys/syslog.h>:
  549. # ======================================================================
  550. # priorities/facilities are encoded into a single 32-bit quantity, where
  551. # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
  552. # facility (0-big number). Both the priorities and the facilities map
  553. # roughly one-to-one to strings in the syslogd(8) source code. This
  554. # mapping is included in this file.
  555. #
  556. # priorities (these are ordered)
  557. LOG_EMERG = 0 # system is unusable
  558. LOG_ALERT = 1 # action must be taken immediately
  559. LOG_CRIT = 2 # critical conditions
  560. LOG_ERR = 3 # error conditions
  561. LOG_WARNING = 4 # warning conditions
  562. LOG_NOTICE = 5 # normal but significant condition
  563. LOG_INFO = 6 # informational
  564. LOG_DEBUG = 7 # debug-level messages
  565. # facility codes
  566. LOG_KERN = 0 # kernel messages
  567. LOG_USER = 1 # random user-level messages
  568. LOG_MAIL = 2 # mail system
  569. LOG_DAEMON = 3 # system daemons
  570. LOG_AUTH = 4 # security/authorization messages
  571. LOG_SYSLOG = 5 # messages generated internally by syslogd
  572. LOG_LPR = 6 # line printer subsystem
  573. LOG_NEWS = 7 # network news subsystem
  574. LOG_UUCP = 8 # UUCP subsystem
  575. LOG_CRON = 9 # clock daemon
  576. LOG_AUTHPRIV = 10 # security/authorization messages (private)
  577. # other codes through 15 reserved for system use
  578. LOG_LOCAL0 = 16 # reserved for local use
  579. LOG_LOCAL1 = 17 # reserved for local use
  580. LOG_LOCAL2 = 18 # reserved for local use
  581. LOG_LOCAL3 = 19 # reserved for local use
  582. LOG_LOCAL4 = 20 # reserved for local use
  583. LOG_LOCAL5 = 21 # reserved for local use
  584. LOG_LOCAL6 = 22 # reserved for local use
  585. LOG_LOCAL7 = 23 # reserved for local use
  586. priority_names = {
  587. "alert": LOG_ALERT,
  588. "crit": LOG_CRIT,
  589. "critical": LOG_CRIT,
  590. "debug": LOG_DEBUG,
  591. "emerg": LOG_EMERG,
  592. "err": LOG_ERR,
  593. "error": LOG_ERR, # DEPRECATED
  594. "info": LOG_INFO,
  595. "notice": LOG_NOTICE,
  596. "panic": LOG_EMERG, # DEPRECATED
  597. "warn": LOG_WARNING, # DEPRECATED
  598. "warning": LOG_WARNING,
  599. }
  600. facility_names = {
  601. "auth": LOG_AUTH,
  602. "authpriv": LOG_AUTHPRIV,
  603. "cron": LOG_CRON,
  604. "daemon": LOG_DAEMON,
  605. "kern": LOG_KERN,
  606. "lpr": LOG_LPR,
  607. "mail": LOG_MAIL,
  608. "news": LOG_NEWS,
  609. "security": LOG_AUTH, # DEPRECATED
  610. "syslog": LOG_SYSLOG,
  611. "user": LOG_USER,
  612. "uucp": LOG_UUCP,
  613. "local0": LOG_LOCAL0,
  614. "local1": LOG_LOCAL1,
  615. "local2": LOG_LOCAL2,
  616. "local3": LOG_LOCAL3,
  617. "local4": LOG_LOCAL4,
  618. "local5": LOG_LOCAL5,
  619. "local6": LOG_LOCAL6,
  620. "local7": LOG_LOCAL7,
  621. }
  622. #The map below appears to be trivially lowercasing the key. However,
  623. #there's more to it than meets the eye - in some locales, lowercasing
  624. #gives unexpected results. See SF #1524081: in the Turkish locale,
  625. #"INFO".lower() != "info"
  626. priority_map = {
  627. "DEBUG" : "debug",
  628. "INFO" : "info",
  629. "WARNING" : "warning",
  630. "ERROR" : "error",
  631. "CRITICAL" : "critical"
  632. }
  633. def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER):
  634. """
  635. Initialize a handler.
  636. If address is specified as a string, a UNIX socket is used. To log to a
  637. local syslogd, "SysLogHandler(address="/dev/log")" can be used.
  638. If facility is not specified, LOG_USER is used.
  639. """
  640. logging.Handler.__init__(self)
  641. self.address = address
  642. self.facility = facility
  643. if type(address) == types.StringType:
  644. self.unixsocket = 1
  645. self._connect_unixsocket(address)
  646. else:
  647. self.unixsocket = 0
  648. self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
  649. self.formatter = None
  650. def _connect_unixsocket(self, address):
  651. self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
  652. # syslog may require either DGRAM or STREAM sockets
  653. try:
  654. self.socket.connect(address)
  655. except socket.error:
  656. self.socket.close()
  657. self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
  658. self.socket.connect(address)
  659. # curious: when talking to the unix-domain '/dev/log' socket, a
  660. # zero-terminator seems to be required. this string is placed
  661. # into a class variable so that it can be overridden if
  662. # necessary.
  663. log_format_string = '<%d>%s\000'
  664. def encodePriority(self, facility, priority):
  665. """
  666. Encode the facility and priority. You can pass in strings or
  667. integers - if strings are passed, the facility_names and
  668. priority_names mapping dictionaries are used to convert them to
  669. integers.
  670. """
  671. if type(facility) == types.StringType:
  672. facility = self.facility_names[facility]
  673. if type(priority) == types.StringType:
  674. priority = self.priority_names[priority]
  675. return (facility << 3) | priority
  676. def close (self):
  677. """
  678. Closes the socket.
  679. """
  680. if self.unixsocket:
  681. self.socket.close()
  682. logging.Handler.close(self)
  683. def mapPriority(self, levelName):
  684. """
  685. Map a logging level name to a key in the priority_names map.
  686. This is useful in two scenarios: when custom levels are being
  687. used, and in the case where you can't do a straightforward
  688. mapping by lowercasing the logging level name because of locale-
  689. specific issues (see SF #1524081).
  690. """
  691. return self.priority_map.get(levelName, "warning")
  692. def emit(self, record):
  693. """
  694. Emit a record.
  695. The record is formatted, and then sent to the syslog server. If
  696. exception information is present, it is NOT sent to the server.
  697. """
  698. msg = self.format(record)
  699. """
  700. We need to convert record level to lowercase, maybe this will
  701. change in the future.
  702. """
  703. msg = self.log_format_string % (
  704. self.encodePriority(self.facility,
  705. self.mapPriority(record.levelname)),
  706. msg)
  707. try:
  708. if self.unixsocket:
  709. try:
  710. self.socket.send(msg)
  711. except socket.error:
  712. self._connect_unixsocket(self.address)
  713. self.socket.send(msg)
  714. else:
  715. self.socket.sendto(msg, self.address)
  716. except (KeyboardInterrupt, SystemExit):
  717. raise
  718. except:
  719. self.handleError(record)
  720. class SMTPHandler(logging.Handler):
  721. """
  722. A handler class which sends an SMTP email for each logging event.
  723. """
  724. def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None):
  725. """
  726. Initialize the handler.
  727. Initialize the instance with the from and to addresses and subject
  728. line of the email. To specify a non-standard SMTP port, use the
  729. (host, port) tuple format for the mailhost argument. To specify
  730. authentication credentials, supply a (username, password) tuple
  731. for the credentials argument.
  732. """
  733. logging.Handler.__init__(self)
  734. if type(mailhost) == types.TupleType:
  735. self.mailhost, self.mailport = mailhost
  736. else:
  737. self.mailhost, self.mailport = mailhost, None
  738. if type(credentials) == types.TupleType:
  739. self.username, self.password = credentials
  740. else:
  741. self.username = None
  742. self.fromaddr = fromaddr
  743. if type(toaddrs) == types.StringType:
  744. toaddrs = [toaddrs]
  745. self.toaddrs = toaddrs
  746. self.subject = subject
  747. def getSubject(self, record):
  748. """
  749. Determine the subject for the email.
  750. If you want to specify a subject line which is record-dependent,
  751. override this method.
  752. """
  753. return self.subject
  754. weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
  755. monthname = [None,
  756. 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
  757. 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
  758. def date_time(self):
  759. """
  760. Return the current date and time formatted for a MIME header.
  761. Needed for Python 1.5.2 (no email package available)
  762. """
  763. year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time())
  764. s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
  765. self.weekdayname[wd],
  766. day, self.monthname[month], year,
  767. hh, mm, ss)
  768. return s
  769. def emit(self, record):
  770. """
  771. Emit a record.
  772. Format the record and send it to the specified addressees.
  773. """
  774. try:
  775. import smtplib
  776. try:
  777. from email.utils import formatdate
  778. except ImportError:
  779. formatdate = self.date_time
  780. port = self.mailport
  781. if not port:
  782. port = smtplib.SMTP_PORT
  783. smtp = smtplib.SMTP(self.mailhost, port)
  784. msg = self.format(record)
  785. msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
  786. self.fromaddr,
  787. string.join(self.toaddrs, ","),
  788. self.getSubject(record),
  789. formatdate(), msg)
  790. if self.username:
  791. smtp.login(self.username, self.password)
  792. smtp.sendmail(self.fromaddr, self.toaddrs, msg)
  793. smtp.quit()
  794. except (KeyboardInterrupt, SystemExit):
  795. raise
  796. except:
  797. self.handleError(record)
  798. class NTEventLogHandler(logging.Handler):
  799. """
  800. A handler class which sends events to the NT Event Log. Adds a
  801. registry entry for the specified application name. If no dllname is
  802. provided, win32service.pyd (which contains some basic message
  803. placeholders) is used. Note that use of these placeholders will make
  804. your event logs big, as the entire message source is held in the log.
  805. If you want slimmer logs, you have to pass in the name of your own DLL
  806. which contains the message definitions you want to use in the event log.
  807. """
  808. def __init__(self, appname, dllname=None, logtype="Application"):
  809. logging.Handler.__init__(self)
  810. try:
  811. import win32evtlogutil, win32evtlog
  812. self.appname = appname
  813. self._welu = win32evtlogutil
  814. if not dllname:
  815. dllname = os.path.split(self._welu.__file__)
  816. dllname = os.path.split(dllname[0])
  817. dllname = os.path.join(dllname[0], r'win32service.pyd')
  818. self.dllname = dllname
  819. self.logtype = logtype
  820. self._welu.AddSourceToRegistry(appname, dllname, logtype)
  821. self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
  822. self.typemap = {
  823. logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
  824. logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
  825. logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
  826. logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
  827. logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
  828. }
  829. except ImportError:
  830. print "The Python Win32 extensions for NT (service, event "\
  831. "logging) appear not to be available."
  832. self._welu = None
  833. def getMessageID(self, record):
  834. """
  835. Return the message ID for the event record. If you are using your
  836. own messages, you could do this by having the msg passed to the
  837. logger being an ID rather than a formatting string. Then, in here,
  838. you could use a dictionary lookup to get the message ID. This
  839. version returns 1, which is the base message ID in win32service.pyd.
  840. """
  841. return 1
  842. def getEventCategory(self, record):
  843. """
  844. Return the event category for the record.
  845. Override this if you want to specify your own categories. This version
  846. returns 0.
  847. """
  848. return 0
  849. def getEventType(self, record):
  850. """
  851. Return the event type for the record.
  852. Override this if you want to specify your own types. This version does
  853. a mapping using the handler's typemap attribute, which is set up in
  854. __init__() to a dictionary which contains mappings for DEBUG, INFO,
  855. WARNING, ERROR and CRITICAL. If you are using your own levels you will
  856. either need to override this method or place a suitable dictionary in
  857. the handler's typemap attribute.
  858. """
  859. return self.typemap.get(record.levelno, self.deftype)
  860. def emit(self, record):
  861. """
  862. Emit a record.
  863. Determine the message ID, event category and event type. Then
  864. log the message in the NT event log.
  865. """
  866. if self._welu:
  867. try:
  868. id = self.getMessageID(record)
  869. cat = self.getEventCategory(record)
  870. type = self.getEventType(record)
  871. msg = self.format(record)
  872. self._welu.ReportEvent(self.appname, id, cat, type, [msg])
  873. except (KeyboardInterrupt, SystemExit):
  874. raise
  875. except:
  876. self.handleError(record)
  877. def close(self):
  878. """
  879. Clean up this handler.
  880. You can remove the application name from the registry as a
  881. source of event log entries. However, if you do this, you will
  882. not be able to see the events as you intended in the Event Log
  883. Viewer - it needs to be able to access the registry to get the
  884. DLL name.
  885. """
  886. #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
  887. logging.Handler.close(self)
  888. class HTTPHandler(logging.Handler):
  889. """
  890. A class which sends records to a Web server, using either GET or
  891. POST semantics.
  892. """
  893. def __init__(self, host, url, method="GET"):
  894. """
  895. Initialize the instance with the host, the request URL, and the method
  896. ("GET" or "POST")
  897. """
  898. logging.Handler.__init__(self)
  899. method = string.upper(method)
  900. if method not in ["GET", "POST"]:
  901. raise ValueError, "method must be GET or POST"
  902. self.host = host
  903. self.url = url
  904. self.method = method
  905. def mapLogRecord(self, record):
  906. """
  907. Default implementation of mapping the log record into a dict
  908. that is sent as the CGI data. Overwrite in your class.
  909. Contributed by Franz Glasner.
  910. """
  911. return record.__dict__
  912. def emit(self, record):
  913. """
  914. Emit a record.
  915. Send the record to the Web server as an URL-encoded dictionary
  916. """
  917. try:
  918. import httplib, urllib
  919. host = self.host
  920. h = httplib.HTTP(host)
  921. url = self.url
  922. data = urllib.urlencode(self.mapLogRecord(record))
  923. if self.method == "GET":
  924. if (string.find(url, '?') >= 0):
  925. sep = '&'
  926. else:
  927. sep = '?'
  928. url = url + "%c%s" % (sep, data)
  929. h.putrequest(self.method, url)
  930. # support multiple hosts on one IP address...
  931. # need to strip optional :port from host, if present
  932. i = string.find(host, ":")
  933. if i >= 0:
  934. host = host[:i]
  935. h.putheader("Host", host)
  936. if self.method == "POST":
  937. h.putheader("Content-type",
  938. "application/x-www-form-urlencoded")
  939. h.putheader("Content-length", str(len(data)))
  940. h.endheaders()
  941. if self.method == "POST":
  942. h.send(data)
  943. h.getreply() #can't do anything with the result
  944. except (KeyboardInterrupt, SystemExit):
  945. raise
  946. except:
  947. self.handleError(record)
  948. class BufferingHandler(logging.Handler):
  949. """
  950. A handler class which buffers logging records in memory. Whenever each
  951. record is added to the buffer, a check is made to see if the buffer should
  952. be flushed. If it should, then flush() is expected to do what's needed.
  953. """
  954. def __init__(self, capacity):
  955. """
  956. Initialize the handler with the buffer size.
  957. """
  958. logging.Handler.__init__(self)
  959. self.capacity = capacity
  960. self.buffer = []
  961. def shouldFlush(self, record):
  962. """
  963. Should the handler flush its buffer?
  964. Returns true if the buffer is up to capacity. This method can be
  965. overridden to implement custom flushing strategies.
  966. """
  967. return (len(self.buffer) >= self.capacity)
  968. def emit(self, record):
  969. """
  970. Emit a record.
  971. Append the record. If shouldFlush() tells us to, call flush() to process
  972. the buffer.
  973. """
  974. self.buffer.append(record)
  975. if self.shouldFlush(record):
  976. self.flush()
  977. def flush(self):
  978. """
  979. Override to implement custom flushing behaviour.
  980. This version just zaps the buffer to empty.
  981. """
  982. self.buffer = []
  983. def close(self):
  984. """
  985. Close the handler.
  986. This version just flushes and chains to the parent class' close().
  987. """
  988. self.flush()
  989. logging.Handler.close(self)
  990. class MemoryHandler(BufferingHandler):
  991. """
  992. A handler class which buffers logging records in memory, periodically
  993. flushing them to a target handler. Flushing occurs whenever the buffer
  994. is full, or when an event of a certain severity or greater is seen.
  995. """
  996. def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
  997. """
  998. Initialize the handler with the buffer size, the level at which
  999. flushing should occur and an optional target.
  1000. Note that without a target being set either here or via setTarget(),
  1001. a MemoryHandler is no use to anyone!
  1002. """
  1003. BufferingHandler.__init__(self, capacity)
  1004. self.flushLevel = flushLevel
  1005. self.target = target
  1006. def shouldFlush(self, record):
  1007. """
  1008. Check for buffer full or a record at the flushLevel or higher.
  1009. """
  1010. return (len(self.buffer) >= self.capacity) or \
  1011. (record.levelno >= self.flushLevel)
  1012. def setTarget(self, target):
  1013. """
  1014. Set the target handler for this handler.
  1015. """
  1016. self.target = target
  1017. def flush(self):
  1018. """
  1019. For a MemoryHandler, flushing means just sending the buffered
  1020. records to the target, if there is one. Override if you want
  1021. different behaviour.
  1022. """
  1023. if self.target:
  1024. for record in self.buffer:
  1025. self.target.handle(record)
  1026. self.buffer = []
  1027. def close(self):
  1028. """
  1029. Flush, set the target to None and lose the buffer.
  1030. """
  1031. self.flush()
  1032. self.target = None
  1033. BufferingHandler.close(self)