PageRenderTime 54ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 0ms

/lib-python/2.7/logging/handlers.py

https://bitbucket.org/dac_io/pypy
Python | 1158 lines | 856 code | 27 blank | 275 comment | 98 complexity | e1a46723ec5f56f1ad0a6c59cd86ce8f MD5 | raw file
  1. # Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
  2. #
  3. # Permission to use, copy, modify, and distribute this software and its
  4. # documentation for any purpose and without fee is hereby granted,
  5. # provided that the above copyright notice appear in all copies and that
  6. # both that copyright notice and this permission notice appear in
  7. # supporting documentation, and that the name of Vinay Sajip
  8. # not be used in advertising or publicity pertaining to distribution
  9. # of the software without specific, written prior permission.
  10. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
  11. # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
  12. # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
  13. # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
  14. # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
  15. # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. """
  17. Additional handlers for the logging package for Python. The core package is
  18. based on PEP 282 and comments thereto in comp.lang.python, and influenced by
  19. Apache's log4j system.
  20. Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
  21. To use, simply 'import logging.handlers' and log away!
  22. """
  23. import logging, socket, os, cPickle, struct, time, re
  24. from stat import ST_DEV, ST_INO, ST_MTIME
  25. try:
  26. import codecs
  27. except ImportError:
  28. codecs = None
  29. try:
  30. unicode
  31. _unicode = True
  32. except NameError:
  33. _unicode = False
  34. #
  35. # Some constants...
  36. #
  37. DEFAULT_TCP_LOGGING_PORT = 9020
  38. DEFAULT_UDP_LOGGING_PORT = 9021
  39. DEFAULT_HTTP_LOGGING_PORT = 9022
  40. DEFAULT_SOAP_LOGGING_PORT = 9023
  41. SYSLOG_UDP_PORT = 514
  42. SYSLOG_TCP_PORT = 514
  43. _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
  44. class BaseRotatingHandler(logging.FileHandler):
  45. """
  46. Base class for handlers that rotate log files at a certain point.
  47. Not meant to be instantiated directly. Instead, use RotatingFileHandler
  48. or TimedRotatingFileHandler.
  49. """
  50. def __init__(self, filename, mode, encoding=None, delay=0):
  51. """
  52. Use the specified filename for streamed logging
  53. """
  54. if codecs is None:
  55. encoding = None
  56. logging.FileHandler.__init__(self, filename, mode, encoding, delay)
  57. self.mode = mode
  58. self.encoding = encoding
  59. def emit(self, record):
  60. """
  61. Emit a record.
  62. Output the record to the file, catering for rollover as described
  63. in doRollover().
  64. """
  65. try:
  66. if self.shouldRollover(record):
  67. self.doRollover()
  68. logging.FileHandler.emit(self, record)
  69. except (KeyboardInterrupt, SystemExit):
  70. raise
  71. except:
  72. self.handleError(record)
  73. class RotatingFileHandler(BaseRotatingHandler):
  74. """
  75. Handler for logging to a set of files, which switches from one file
  76. to the next when the current file reaches a certain size.
  77. """
  78. def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
  79. """
  80. Open the specified file and use it as the stream for logging.
  81. By default, the file grows indefinitely. You can specify particular
  82. values of maxBytes and backupCount to allow the file to rollover at
  83. a predetermined size.
  84. Rollover occurs whenever the current log file is nearly maxBytes in
  85. length. If backupCount is >= 1, the system will successively create
  86. new files with the same pathname as the base file, but with extensions
  87. ".1", ".2" etc. appended to it. For example, with a backupCount of 5
  88. and a base file name of "app.log", you would get "app.log",
  89. "app.log.1", "app.log.2", ... through to "app.log.5". The file being
  90. written to is always "app.log" - when it gets filled up, it is closed
  91. and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
  92. exist, then they are renamed to "app.log.2", "app.log.3" etc.
  93. respectively.
  94. If maxBytes is zero, rollover never occurs.
  95. """
  96. # If rotation/rollover is wanted, it doesn't make sense to use another
  97. # mode. If for example 'w' were specified, then if there were multiple
  98. # runs of the calling application, the logs from previous runs would be
  99. # lost if the 'w' is respected, because the log file would be truncated
  100. # on each run.
  101. if maxBytes > 0:
  102. mode = 'a'
  103. BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
  104. self.maxBytes = maxBytes
  105. self.backupCount = backupCount
  106. def doRollover(self):
  107. """
  108. Do a rollover, as described in __init__().
  109. """
  110. if self.stream:
  111. self.stream.close()
  112. self.stream = None
  113. if self.backupCount > 0:
  114. for i in range(self.backupCount - 1, 0, -1):
  115. sfn = "%s.%d" % (self.baseFilename, i)
  116. dfn = "%s.%d" % (self.baseFilename, i + 1)
  117. if os.path.exists(sfn):
  118. #print "%s -> %s" % (sfn, dfn)
  119. if os.path.exists(dfn):
  120. os.remove(dfn)
  121. os.rename(sfn, dfn)
  122. dfn = self.baseFilename + ".1"
  123. if os.path.exists(dfn):
  124. os.remove(dfn)
  125. os.rename(self.baseFilename, dfn)
  126. #print "%s -> %s" % (self.baseFilename, dfn)
  127. self.mode = 'w'
  128. self.stream = self._open()
  129. def shouldRollover(self, record):
  130. """
  131. Determine if rollover should occur.
  132. Basically, see if the supplied record would cause the file to exceed
  133. the size limit we have.
  134. """
  135. if self.stream is None: # delay was set...
  136. self.stream = self._open()
  137. if self.maxBytes > 0: # are we rolling over?
  138. msg = "%s\n" % self.format(record)
  139. self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
  140. if self.stream.tell() + len(msg) >= self.maxBytes:
  141. return 1
  142. return 0
  143. class TimedRotatingFileHandler(BaseRotatingHandler):
  144. """
  145. Handler for logging to a file, rotating the log file at certain timed
  146. intervals.
  147. If backupCount is > 0, when rollover is done, no more than backupCount
  148. files are kept - the oldest ones are deleted.
  149. """
  150. def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
  151. BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
  152. self.when = when.upper()
  153. self.backupCount = backupCount
  154. self.utc = utc
  155. # Calculate the real rollover interval, which is just the number of
  156. # seconds between rollovers. Also set the filename suffix used when
  157. # a rollover occurs. Current 'when' events supported:
  158. # S - Seconds
  159. # M - Minutes
  160. # H - Hours
  161. # D - Days
  162. # midnight - roll over at midnight
  163. # W{0-6} - roll over on a certain day; 0 - Monday
  164. #
  165. # Case of the 'when' specifier is not important; lower or upper case
  166. # will work.
  167. if self.when == 'S':
  168. self.interval = 1 # one second
  169. self.suffix = "%Y-%m-%d_%H-%M-%S"
  170. self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
  171. elif self.when == 'M':
  172. self.interval = 60 # one minute
  173. self.suffix = "%Y-%m-%d_%H-%M"
  174. self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
  175. elif self.when == 'H':
  176. self.interval = 60 * 60 # one hour
  177. self.suffix = "%Y-%m-%d_%H"
  178. self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
  179. elif self.when == 'D' or self.when == 'MIDNIGHT':
  180. self.interval = 60 * 60 * 24 # one day
  181. self.suffix = "%Y-%m-%d"
  182. self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
  183. elif self.when.startswith('W'):
  184. self.interval = 60 * 60 * 24 * 7 # one week
  185. if len(self.when) != 2:
  186. raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
  187. if self.when[1] < '0' or self.when[1] > '6':
  188. raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
  189. self.dayOfWeek = int(self.when[1])
  190. self.suffix = "%Y-%m-%d"
  191. self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
  192. else:
  193. raise ValueError("Invalid rollover interval specified: %s" % self.when)
  194. self.extMatch = re.compile(self.extMatch)
  195. self.interval = self.interval * interval # multiply by units requested
  196. if os.path.exists(filename):
  197. t = os.stat(filename)[ST_MTIME]
  198. else:
  199. t = int(time.time())
  200. self.rolloverAt = self.computeRollover(t)
  201. def computeRollover(self, currentTime):
  202. """
  203. Work out the rollover time based on the specified time.
  204. """
  205. result = currentTime + self.interval
  206. # If we are rolling over at midnight or weekly, then the interval is already known.
  207. # What we need to figure out is WHEN the next interval is. In other words,
  208. # if you are rolling over at midnight, then your base interval is 1 day,
  209. # but you want to start that one day clock at midnight, not now. So, we
  210. # have to fudge the rolloverAt value in order to trigger the first rollover
  211. # at the right time. After that, the regular interval will take care of
  212. # the rest. Note that this code doesn't care about leap seconds. :)
  213. if self.when == 'MIDNIGHT' or self.when.startswith('W'):
  214. # This could be done with less code, but I wanted it to be clear
  215. if self.utc:
  216. t = time.gmtime(currentTime)
  217. else:
  218. t = time.localtime(currentTime)
  219. currentHour = t[3]
  220. currentMinute = t[4]
  221. currentSecond = t[5]
  222. # r is the number of seconds left between now and midnight
  223. r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
  224. currentSecond)
  225. result = currentTime + r
  226. # If we are rolling over on a certain day, add in the number of days until
  227. # the next rollover, but offset by 1 since we just calculated the time
  228. # until the next day starts. There are three cases:
  229. # Case 1) The day to rollover is today; in this case, do nothing
  230. # Case 2) The day to rollover is further in the interval (i.e., today is
  231. # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
  232. # next rollover is simply 6 - 2 - 1, or 3.
  233. # Case 3) The day to rollover is behind us in the interval (i.e., today
  234. # is day 5 (Saturday) and rollover is on day 3 (Thursday).
  235. # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
  236. # number of days left in the current week (1) plus the number
  237. # of days in the next week until the rollover day (3).
  238. # The calculations described in 2) and 3) above need to have a day added.
  239. # This is because the above time calculation takes us to midnight on this
  240. # day, i.e. the start of the next day.
  241. if self.when.startswith('W'):
  242. day = t[6] # 0 is Monday
  243. if day != self.dayOfWeek:
  244. if day < self.dayOfWeek:
  245. daysToWait = self.dayOfWeek - day
  246. else:
  247. daysToWait = 6 - day + self.dayOfWeek + 1
  248. newRolloverAt = result + (daysToWait * (60 * 60 * 24))
  249. if not self.utc:
  250. dstNow = t[-1]
  251. dstAtRollover = time.localtime(newRolloverAt)[-1]
  252. if dstNow != dstAtRollover:
  253. if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
  254. newRolloverAt = newRolloverAt - 3600
  255. else: # DST bows out before next rollover, so we need to add an hour
  256. newRolloverAt = newRolloverAt + 3600
  257. result = newRolloverAt
  258. return result
  259. def shouldRollover(self, record):
  260. """
  261. Determine if rollover should occur.
  262. record is not used, as we are just comparing times, but it is needed so
  263. the method signatures are the same
  264. """
  265. t = int(time.time())
  266. if t >= self.rolloverAt:
  267. return 1
  268. #print "No need to rollover: %d, %d" % (t, self.rolloverAt)
  269. return 0
  270. def getFilesToDelete(self):
  271. """
  272. Determine the files to delete when rolling over.
  273. More specific than the earlier method, which just used glob.glob().
  274. """
  275. dirName, baseName = os.path.split(self.baseFilename)
  276. fileNames = os.listdir(dirName)
  277. result = []
  278. prefix = baseName + "."
  279. plen = len(prefix)
  280. for fileName in fileNames:
  281. if fileName[:plen] == prefix:
  282. suffix = fileName[plen:]
  283. if self.extMatch.match(suffix):
  284. result.append(os.path.join(dirName, fileName))
  285. result.sort()
  286. if len(result) < self.backupCount:
  287. result = []
  288. else:
  289. result = result[:len(result) - self.backupCount]
  290. return result
  291. def doRollover(self):
  292. """
  293. do a rollover; in this case, a date/time stamp is appended to the filename
  294. when the rollover happens. However, you want the file to be named for the
  295. start of the interval, not the current time. If there is a backup count,
  296. then we have to get a list of matching filenames, sort them and remove
  297. the one with the oldest suffix.
  298. """
  299. if self.stream:
  300. self.stream.close()
  301. self.stream = None
  302. # get the time that this sequence started at and make it a TimeTuple
  303. t = self.rolloverAt - self.interval
  304. if self.utc:
  305. timeTuple = time.gmtime(t)
  306. else:
  307. timeTuple = time.localtime(t)
  308. dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
  309. if os.path.exists(dfn):
  310. os.remove(dfn)
  311. os.rename(self.baseFilename, dfn)
  312. if self.backupCount > 0:
  313. # find the oldest log file and delete it
  314. #s = glob.glob(self.baseFilename + ".20*")
  315. #if len(s) > self.backupCount:
  316. # s.sort()
  317. # os.remove(s[0])
  318. for s in self.getFilesToDelete():
  319. os.remove(s)
  320. #print "%s -> %s" % (self.baseFilename, dfn)
  321. self.mode = 'w'
  322. self.stream = self._open()
  323. currentTime = int(time.time())
  324. newRolloverAt = self.computeRollover(currentTime)
  325. while newRolloverAt <= currentTime:
  326. newRolloverAt = newRolloverAt + self.interval
  327. #If DST changes and midnight or weekly rollover, adjust for this.
  328. if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
  329. dstNow = time.localtime(currentTime)[-1]
  330. dstAtRollover = time.localtime(newRolloverAt)[-1]
  331. if dstNow != dstAtRollover:
  332. if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
  333. newRolloverAt = newRolloverAt - 3600
  334. else: # DST bows out before next rollover, so we need to add an hour
  335. newRolloverAt = newRolloverAt + 3600
  336. self.rolloverAt = newRolloverAt
  337. class WatchedFileHandler(logging.FileHandler):
  338. """
  339. A handler for logging to a file, which watches the file
  340. to see if it has changed while in use. This can happen because of
  341. usage of programs such as newsyslog and logrotate which perform
  342. log file rotation. This handler, intended for use under Unix,
  343. watches the file to see if it has changed since the last emit.
  344. (A file has changed if its device or inode have changed.)
  345. If it has changed, the old file stream is closed, and the file
  346. opened to get a new stream.
  347. This handler is not appropriate for use under Windows, because
  348. under Windows open files cannot be moved or renamed - logging
  349. opens the files with exclusive locks - and so there is no need
  350. for such a handler. Furthermore, ST_INO is not supported under
  351. Windows; stat always returns zero for this value.
  352. This handler is based on a suggestion and patch by Chad J.
  353. Schroeder.
  354. """
  355. def __init__(self, filename, mode='a', encoding=None, delay=0):
  356. logging.FileHandler.__init__(self, filename, mode, encoding, delay)
  357. if not os.path.exists(self.baseFilename):
  358. self.dev, self.ino = -1, -1
  359. else:
  360. stat = os.stat(self.baseFilename)
  361. self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
  362. def emit(self, record):
  363. """
  364. Emit a record.
  365. First check if the underlying file has changed, and if it
  366. has, close the old stream and reopen the file to get the
  367. current stream.
  368. """
  369. if not os.path.exists(self.baseFilename):
  370. stat = None
  371. changed = 1
  372. else:
  373. stat = os.stat(self.baseFilename)
  374. changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
  375. if changed and self.stream is not None:
  376. self.stream.flush()
  377. self.stream.close()
  378. self.stream = self._open()
  379. if stat is None:
  380. stat = os.stat(self.baseFilename)
  381. self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
  382. logging.FileHandler.emit(self, record)
  383. class SocketHandler(logging.Handler):
  384. """
  385. A handler class which writes logging records, in pickle format, to
  386. a streaming socket. The socket is kept open across logging calls.
  387. If the peer resets it, an attempt is made to reconnect on the next call.
  388. The pickle which is sent is that of the LogRecord's attribute dictionary
  389. (__dict__), so that the receiver does not need to have the logging module
  390. installed in order to process the logging event.
  391. To unpickle the record at the receiving end into a LogRecord, use the
  392. makeLogRecord function.
  393. """
  394. def __init__(self, host, port):
  395. """
  396. Initializes the handler with a specific host address and port.
  397. The attribute 'closeOnError' is set to 1 - which means that if
  398. a socket error occurs, the socket is silently closed and then
  399. reopened on the next logging call.
  400. """
  401. logging.Handler.__init__(self)
  402. self.host = host
  403. self.port = port
  404. self.sock = None
  405. self.closeOnError = 0
  406. self.retryTime = None
  407. #
  408. # Exponential backoff parameters.
  409. #
  410. self.retryStart = 1.0
  411. self.retryMax = 30.0
  412. self.retryFactor = 2.0
  413. def makeSocket(self, timeout=1):
  414. """
  415. A factory method which allows subclasses to define the precise
  416. type of socket they want.
  417. """
  418. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  419. if hasattr(s, 'settimeout'):
  420. s.settimeout(timeout)
  421. s.connect((self.host, self.port))
  422. return s
  423. def createSocket(self):
  424. """
  425. Try to create a socket, using an exponential backoff with
  426. a max retry time. Thanks to Robert Olson for the original patch
  427. (SF #815911) which has been slightly refactored.
  428. """
  429. now = time.time()
  430. # Either retryTime is None, in which case this
  431. # is the first time back after a disconnect, or
  432. # we've waited long enough.
  433. if self.retryTime is None:
  434. attempt = 1
  435. else:
  436. attempt = (now >= self.retryTime)
  437. if attempt:
  438. try:
  439. self.sock = self.makeSocket()
  440. self.retryTime = None # next time, no delay before trying
  441. except socket.error:
  442. #Creation failed, so set the retry time and return.
  443. if self.retryTime is None:
  444. self.retryPeriod = self.retryStart
  445. else:
  446. self.retryPeriod = self.retryPeriod * self.retryFactor
  447. if self.retryPeriod > self.retryMax:
  448. self.retryPeriod = self.retryMax
  449. self.retryTime = now + self.retryPeriod
  450. def send(self, s):
  451. """
  452. Send a pickled string to the socket.
  453. This function allows for partial sends which can happen when the
  454. network is busy.
  455. """
  456. if self.sock is None:
  457. self.createSocket()
  458. #self.sock can be None either because we haven't reached the retry
  459. #time yet, or because we have reached the retry time and retried,
  460. #but are still unable to connect.
  461. if self.sock:
  462. try:
  463. if hasattr(self.sock, "sendall"):
  464. self.sock.sendall(s)
  465. else:
  466. sentsofar = 0
  467. left = len(s)
  468. while left > 0:
  469. sent = self.sock.send(s[sentsofar:])
  470. sentsofar = sentsofar + sent
  471. left = left - sent
  472. except socket.error:
  473. self.sock.close()
  474. self.sock = None # so we can call createSocket next time
  475. def makePickle(self, record):
  476. """
  477. Pickles the record in binary format with a length prefix, and
  478. returns it ready for transmission across the socket.
  479. """
  480. ei = record.exc_info
  481. if ei:
  482. dummy = self.format(record) # just to get traceback text into record.exc_text
  483. record.exc_info = None # to avoid Unpickleable error
  484. s = cPickle.dumps(record.__dict__, 1)
  485. if ei:
  486. record.exc_info = ei # for next handler
  487. slen = struct.pack(">L", len(s))
  488. return slen + s
  489. def handleError(self, record):
  490. """
  491. Handle an error during logging.
  492. An error has occurred during logging. Most likely cause -
  493. connection lost. Close the socket so that we can retry on the
  494. next event.
  495. """
  496. if self.closeOnError and self.sock:
  497. self.sock.close()
  498. self.sock = None #try to reconnect next time
  499. else:
  500. logging.Handler.handleError(self, record)
  501. def emit(self, record):
  502. """
  503. Emit a record.
  504. Pickles the record and writes it to the socket in binary format.
  505. If there is an error with the socket, silently drop the packet.
  506. If there was a problem with the socket, re-establishes the
  507. socket.
  508. """
  509. try:
  510. s = self.makePickle(record)
  511. self.send(s)
  512. except (KeyboardInterrupt, SystemExit):
  513. raise
  514. except:
  515. self.handleError(record)
  516. def close(self):
  517. """
  518. Closes the socket.
  519. """
  520. if self.sock:
  521. self.sock.close()
  522. self.sock = None
  523. logging.Handler.close(self)
  524. class DatagramHandler(SocketHandler):
  525. """
  526. A handler class which writes logging records, in pickle format, to
  527. a datagram socket. The pickle which is sent is that of the LogRecord's
  528. attribute dictionary (__dict__), so that the receiver does not need to
  529. have the logging module installed in order to process the logging event.
  530. To unpickle the record at the receiving end into a LogRecord, use the
  531. makeLogRecord function.
  532. """
  533. def __init__(self, host, port):
  534. """
  535. Initializes the handler with a specific host address and port.
  536. """
  537. SocketHandler.__init__(self, host, port)
  538. self.closeOnError = 0
  539. def makeSocket(self):
  540. """
  541. The factory method of SocketHandler is here overridden to create
  542. a UDP socket (SOCK_DGRAM).
  543. """
  544. s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
  545. return s
  546. def send(self, s):
  547. """
  548. Send a pickled string to a socket.
  549. This function no longer allows for partial sends which can happen
  550. when the network is busy - UDP does not guarantee delivery and
  551. can deliver packets out of sequence.
  552. """
  553. if self.sock is None:
  554. self.createSocket()
  555. self.sock.sendto(s, (self.host, self.port))
  556. class SysLogHandler(logging.Handler):
  557. """
  558. A handler class which sends formatted logging records to a syslog
  559. server. Based on Sam Rushing's syslog module:
  560. http://www.nightmare.com/squirl/python-ext/misc/syslog.py
  561. Contributed by Nicolas Untz (after which minor refactoring changes
  562. have been made).
  563. """
  564. # from <linux/sys/syslog.h>:
  565. # ======================================================================
  566. # priorities/facilities are encoded into a single 32-bit quantity, where
  567. # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
  568. # facility (0-big number). Both the priorities and the facilities map
  569. # roughly one-to-one to strings in the syslogd(8) source code. This
  570. # mapping is included in this file.
  571. #
  572. # priorities (these are ordered)
  573. LOG_EMERG = 0 # system is unusable
  574. LOG_ALERT = 1 # action must be taken immediately
  575. LOG_CRIT = 2 # critical conditions
  576. LOG_ERR = 3 # error conditions
  577. LOG_WARNING = 4 # warning conditions
  578. LOG_NOTICE = 5 # normal but significant condition
  579. LOG_INFO = 6 # informational
  580. LOG_DEBUG = 7 # debug-level messages
  581. # facility codes
  582. LOG_KERN = 0 # kernel messages
  583. LOG_USER = 1 # random user-level messages
  584. LOG_MAIL = 2 # mail system
  585. LOG_DAEMON = 3 # system daemons
  586. LOG_AUTH = 4 # security/authorization messages
  587. LOG_SYSLOG = 5 # messages generated internally by syslogd
  588. LOG_LPR = 6 # line printer subsystem
  589. LOG_NEWS = 7 # network news subsystem
  590. LOG_UUCP = 8 # UUCP subsystem
  591. LOG_CRON = 9 # clock daemon
  592. LOG_AUTHPRIV = 10 # security/authorization messages (private)
  593. LOG_FTP = 11 # FTP daemon
  594. # other codes through 15 reserved for system use
  595. LOG_LOCAL0 = 16 # reserved for local use
  596. LOG_LOCAL1 = 17 # reserved for local use
  597. LOG_LOCAL2 = 18 # reserved for local use
  598. LOG_LOCAL3 = 19 # reserved for local use
  599. LOG_LOCAL4 = 20 # reserved for local use
  600. LOG_LOCAL5 = 21 # reserved for local use
  601. LOG_LOCAL6 = 22 # reserved for local use
  602. LOG_LOCAL7 = 23 # reserved for local use
  603. priority_names = {
  604. "alert": LOG_ALERT,
  605. "crit": LOG_CRIT,
  606. "critical": LOG_CRIT,
  607. "debug": LOG_DEBUG,
  608. "emerg": LOG_EMERG,
  609. "err": LOG_ERR,
  610. "error": LOG_ERR, # DEPRECATED
  611. "info": LOG_INFO,
  612. "notice": LOG_NOTICE,
  613. "panic": LOG_EMERG, # DEPRECATED
  614. "warn": LOG_WARNING, # DEPRECATED
  615. "warning": LOG_WARNING,
  616. }
  617. facility_names = {
  618. "auth": LOG_AUTH,
  619. "authpriv": LOG_AUTHPRIV,
  620. "cron": LOG_CRON,
  621. "daemon": LOG_DAEMON,
  622. "ftp": LOG_FTP,
  623. "kern": LOG_KERN,
  624. "lpr": LOG_LPR,
  625. "mail": LOG_MAIL,
  626. "news": LOG_NEWS,
  627. "security": LOG_AUTH, # DEPRECATED
  628. "syslog": LOG_SYSLOG,
  629. "user": LOG_USER,
  630. "uucp": LOG_UUCP,
  631. "local0": LOG_LOCAL0,
  632. "local1": LOG_LOCAL1,
  633. "local2": LOG_LOCAL2,
  634. "local3": LOG_LOCAL3,
  635. "local4": LOG_LOCAL4,
  636. "local5": LOG_LOCAL5,
  637. "local6": LOG_LOCAL6,
  638. "local7": LOG_LOCAL7,
  639. }
  640. #The map below appears to be trivially lowercasing the key. However,
  641. #there's more to it than meets the eye - in some locales, lowercasing
  642. #gives unexpected results. See SF #1524081: in the Turkish locale,
  643. #"INFO".lower() != "info"
  644. priority_map = {
  645. "DEBUG" : "debug",
  646. "INFO" : "info",
  647. "WARNING" : "warning",
  648. "ERROR" : "error",
  649. "CRITICAL" : "critical"
  650. }
  651. def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
  652. facility=LOG_USER, socktype=socket.SOCK_DGRAM):
  653. """
  654. Initialize a handler.
  655. If address is specified as a string, a UNIX socket is used. To log to a
  656. local syslogd, "SysLogHandler(address="/dev/log")" can be used.
  657. If facility is not specified, LOG_USER is used.
  658. """
  659. logging.Handler.__init__(self)
  660. self.address = address
  661. self.facility = facility
  662. self.socktype = socktype
  663. if isinstance(address, basestring):
  664. self.unixsocket = 1
  665. self._connect_unixsocket(address)
  666. else:
  667. self.unixsocket = 0
  668. self.socket = socket.socket(socket.AF_INET, socktype)
  669. if socktype == socket.SOCK_STREAM:
  670. self.socket.connect(address)
  671. self.formatter = None
  672. def _connect_unixsocket(self, address):
  673. self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
  674. # syslog may require either DGRAM or STREAM sockets
  675. try:
  676. self.socket.connect(address)
  677. except socket.error:
  678. self.socket.close()
  679. self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
  680. self.socket.connect(address)
  681. # curious: when talking to the unix-domain '/dev/log' socket, a
  682. # zero-terminator seems to be required. this string is placed
  683. # into a class variable so that it can be overridden if
  684. # necessary.
  685. log_format_string = '<%d>%s\000'
  686. def encodePriority(self, facility, priority):
  687. """
  688. Encode the facility and priority. You can pass in strings or
  689. integers - if strings are passed, the facility_names and
  690. priority_names mapping dictionaries are used to convert them to
  691. integers.
  692. """
  693. if isinstance(facility, basestring):
  694. facility = self.facility_names[facility]
  695. if isinstance(priority, basestring):
  696. priority = self.priority_names[priority]
  697. return (facility << 3) | priority
  698. def close (self):
  699. """
  700. Closes the socket.
  701. """
  702. if self.unixsocket:
  703. self.socket.close()
  704. logging.Handler.close(self)
  705. def mapPriority(self, levelName):
  706. """
  707. Map a logging level name to a key in the priority_names map.
  708. This is useful in two scenarios: when custom levels are being
  709. used, and in the case where you can't do a straightforward
  710. mapping by lowercasing the logging level name because of locale-
  711. specific issues (see SF #1524081).
  712. """
  713. return self.priority_map.get(levelName, "warning")
  714. def emit(self, record):
  715. """
  716. Emit a record.
  717. The record is formatted, and then sent to the syslog server. If
  718. exception information is present, it is NOT sent to the server.
  719. """
  720. msg = self.format(record) + '\000'
  721. """
  722. We need to convert record level to lowercase, maybe this will
  723. change in the future.
  724. """
  725. prio = '<%d>' % self.encodePriority(self.facility,
  726. self.mapPriority(record.levelname))
  727. # Message is a string. Convert to bytes as required by RFC 5424
  728. if type(msg) is unicode:
  729. msg = msg.encode('utf-8')
  730. if codecs:
  731. msg = codecs.BOM_UTF8 + msg
  732. msg = prio + msg
  733. try:
  734. if self.unixsocket:
  735. try:
  736. self.socket.send(msg)
  737. except socket.error:
  738. self._connect_unixsocket(self.address)
  739. self.socket.send(msg)
  740. elif self.socktype == socket.SOCK_DGRAM:
  741. self.socket.sendto(msg, self.address)
  742. else:
  743. self.socket.sendall(msg)
  744. except (KeyboardInterrupt, SystemExit):
  745. raise
  746. except:
  747. self.handleError(record)
  748. class SMTPHandler(logging.Handler):
  749. """
  750. A handler class which sends an SMTP email for each logging event.
  751. """
  752. def __init__(self, mailhost, fromaddr, toaddrs, subject,
  753. credentials=None, secure=None):
  754. """
  755. Initialize the handler.
  756. Initialize the instance with the from and to addresses and subject
  757. line of the email. To specify a non-standard SMTP port, use the
  758. (host, port) tuple format for the mailhost argument. To specify
  759. authentication credentials, supply a (username, password) tuple
  760. for the credentials argument. To specify the use of a secure
  761. protocol (TLS), pass in a tuple for the secure argument. This will
  762. only be used when authentication credentials are supplied. The tuple
  763. will be either an empty tuple, or a single-value tuple with the name
  764. of a keyfile, or a 2-value tuple with the names of the keyfile and
  765. certificate file. (This tuple is passed to the `starttls` method).
  766. """
  767. logging.Handler.__init__(self)
  768. if isinstance(mailhost, tuple):
  769. self.mailhost, self.mailport = mailhost
  770. else:
  771. self.mailhost, self.mailport = mailhost, None
  772. if isinstance(credentials, tuple):
  773. self.username, self.password = credentials
  774. else:
  775. self.username = None
  776. self.fromaddr = fromaddr
  777. if isinstance(toaddrs, basestring):
  778. toaddrs = [toaddrs]
  779. self.toaddrs = toaddrs
  780. self.subject = subject
  781. self.secure = secure
  782. def getSubject(self, record):
  783. """
  784. Determine the subject for the email.
  785. If you want to specify a subject line which is record-dependent,
  786. override this method.
  787. """
  788. return self.subject
  789. def emit(self, record):
  790. """
  791. Emit a record.
  792. Format the record and send it to the specified addressees.
  793. """
  794. try:
  795. import smtplib
  796. from email.utils import formatdate
  797. port = self.mailport
  798. if not port:
  799. port = smtplib.SMTP_PORT
  800. smtp = smtplib.SMTP(self.mailhost, port)
  801. msg = self.format(record)
  802. msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
  803. self.fromaddr,
  804. ",".join(self.toaddrs),
  805. self.getSubject(record),
  806. formatdate(), msg)
  807. if self.username:
  808. if self.secure is not None:
  809. smtp.ehlo()
  810. smtp.starttls(*self.secure)
  811. smtp.ehlo()
  812. smtp.login(self.username, self.password)
  813. smtp.sendmail(self.fromaddr, self.toaddrs, msg)
  814. smtp.quit()
  815. except (KeyboardInterrupt, SystemExit):
  816. raise
  817. except:
  818. self.handleError(record)
  819. class NTEventLogHandler(logging.Handler):
  820. """
  821. A handler class which sends events to the NT Event Log. Adds a
  822. registry entry for the specified application name. If no dllname is
  823. provided, win32service.pyd (which contains some basic message
  824. placeholders) is used. Note that use of these placeholders will make
  825. your event logs big, as the entire message source is held in the log.
  826. If you want slimmer logs, you have to pass in the name of your own DLL
  827. which contains the message definitions you want to use in the event log.
  828. """
  829. def __init__(self, appname, dllname=None, logtype="Application"):
  830. logging.Handler.__init__(self)
  831. try:
  832. import win32evtlogutil, win32evtlog
  833. self.appname = appname
  834. self._welu = win32evtlogutil
  835. if not dllname:
  836. dllname = os.path.split(self._welu.__file__)
  837. dllname = os.path.split(dllname[0])
  838. dllname = os.path.join(dllname[0], r'win32service.pyd')
  839. self.dllname = dllname
  840. self.logtype = logtype
  841. self._welu.AddSourceToRegistry(appname, dllname, logtype)
  842. self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
  843. self.typemap = {
  844. logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
  845. logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
  846. logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
  847. logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
  848. logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
  849. }
  850. except ImportError:
  851. print("The Python Win32 extensions for NT (service, event "\
  852. "logging) appear not to be available.")
  853. self._welu = None
  854. def getMessageID(self, record):
  855. """
  856. Return the message ID for the event record. If you are using your
  857. own messages, you could do this by having the msg passed to the
  858. logger being an ID rather than a formatting string. Then, in here,
  859. you could use a dictionary lookup to get the message ID. This
  860. version returns 1, which is the base message ID in win32service.pyd.
  861. """
  862. return 1
  863. def getEventCategory(self, record):
  864. """
  865. Return the event category for the record.
  866. Override this if you want to specify your own categories. This version
  867. returns 0.
  868. """
  869. return 0
  870. def getEventType(self, record):
  871. """
  872. Return the event type for the record.
  873. Override this if you want to specify your own types. This version does
  874. a mapping using the handler's typemap attribute, which is set up in
  875. __init__() to a dictionary which contains mappings for DEBUG, INFO,
  876. WARNING, ERROR and CRITICAL. If you are using your own levels you will
  877. either need to override this method or place a suitable dictionary in
  878. the handler's typemap attribute.
  879. """
  880. return self.typemap.get(record.levelno, self.deftype)
  881. def emit(self, record):
  882. """
  883. Emit a record.
  884. Determine the message ID, event category and event type. Then
  885. log the message in the NT event log.
  886. """
  887. if self._welu:
  888. try:
  889. id = self.getMessageID(record)
  890. cat = self.getEventCategory(record)
  891. type = self.getEventType(record)
  892. msg = self.format(record)
  893. self._welu.ReportEvent(self.appname, id, cat, type, [msg])
  894. except (KeyboardInterrupt, SystemExit):
  895. raise
  896. except:
  897. self.handleError(record)
  898. def close(self):
  899. """
  900. Clean up this handler.
  901. You can remove the application name from the registry as a
  902. source of event log entries. However, if you do this, you will
  903. not be able to see the events as you intended in the Event Log
  904. Viewer - it needs to be able to access the registry to get the
  905. DLL name.
  906. """
  907. #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
  908. logging.Handler.close(self)
  909. class HTTPHandler(logging.Handler):
  910. """
  911. A class which sends records to a Web server, using either GET or
  912. POST semantics.
  913. """
  914. def __init__(self, host, url, method="GET"):
  915. """
  916. Initialize the instance with the host, the request URL, and the method
  917. ("GET" or "POST")
  918. """
  919. logging.Handler.__init__(self)
  920. method = method.upper()
  921. if method not in ["GET", "POST"]:
  922. raise ValueError("method must be GET or POST")
  923. self.host = host
  924. self.url = url
  925. self.method = method
  926. def mapLogRecord(self, record):
  927. """
  928. Default implementation of mapping the log record into a dict
  929. that is sent as the CGI data. Overwrite in your class.
  930. Contributed by Franz Glasner.
  931. """
  932. return record.__dict__
  933. def emit(self, record):
  934. """
  935. Emit a record.
  936. Send the record to the Web server as a percent-encoded dictionary
  937. """
  938. try:
  939. import httplib, urllib
  940. host = self.host
  941. h = httplib.HTTP(host)
  942. url = self.url
  943. data = urllib.urlencode(self.mapLogRecord(record))
  944. if self.method == "GET":
  945. if (url.find('?') >= 0):
  946. sep = '&'
  947. else:
  948. sep = '?'
  949. url = url + "%c%s" % (sep, data)
  950. h.putrequest(self.method, url)
  951. # support multiple hosts on one IP address...
  952. # need to strip optional :port from host, if present
  953. i = host.find(":")
  954. if i >= 0:
  955. host = host[:i]
  956. h.putheader("Host", host)
  957. if self.method == "POST":
  958. h.putheader("Content-type",
  959. "application/x-www-form-urlencoded")
  960. h.putheader("Content-length", str(len(data)))
  961. h.endheaders(data if self.method == "POST" else None)
  962. h.getreply() #can't do anything with the result
  963. except (KeyboardInterrupt, SystemExit):
  964. raise
  965. except:
  966. self.handleError(record)
  967. class BufferingHandler(logging.Handler):
  968. """
  969. A handler class which buffers logging records in memory. Whenever each
  970. record is added to the buffer, a check is made to see if the buffer should
  971. be flushed. If it should, then flush() is expected to do what's needed.
  972. """
  973. def __init__(self, capacity):
  974. """
  975. Initialize the handler with the buffer size.
  976. """
  977. logging.Handler.__init__(self)
  978. self.capacity = capacity
  979. self.buffer = []
  980. def shouldFlush(self, record):
  981. """
  982. Should the handler flush its buffer?
  983. Returns true if the buffer is up to capacity. This method can be
  984. overridden to implement custom flushing strategies.
  985. """
  986. return (len(self.buffer) >= self.capacity)
  987. def emit(self, record):
  988. """
  989. Emit a record.
  990. Append the record. If shouldFlush() tells us to, call flush() to process
  991. the buffer.
  992. """
  993. self.buffer.append(record)
  994. if self.shouldFlush(record):
  995. self.flush()
  996. def flush(self):
  997. """
  998. Override to implement custom flushing behaviour.
  999. This version just zaps the buffer to empty.
  1000. """
  1001. self.buffer = []
  1002. def close(self):
  1003. """
  1004. Close the handler.
  1005. This version just flushes and chains to the parent class' close().
  1006. """
  1007. self.flush()
  1008. logging.Handler.close(self)
  1009. class MemoryHandler(BufferingHandler):
  1010. """
  1011. A handler class which buffers logging records in memory, periodically
  1012. flushing them to a target handler. Flushing occurs whenever the buffer
  1013. is full, or when an event of a certain severity or greater is seen.
  1014. """
  1015. def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
  1016. """
  1017. Initialize the handler with the buffer size, the level at which
  1018. flushing should occur and an optional target.
  1019. Note that without a target being set either here or via setTarget(),
  1020. a MemoryHandler is no use to anyone!
  1021. """
  1022. BufferingHandler.__init__(self, capacity)
  1023. self.flushLevel = flushLevel
  1024. self.target = target
  1025. def shouldFlush(self, record):
  1026. """
  1027. Check for buffer full or a record at the flushLevel or higher.
  1028. """
  1029. return (len(self.buffer) >= self.capacity) or \
  1030. (record.levelno >= self.flushLevel)
  1031. def setTarget(self, target):
  1032. """
  1033. Set the target handler for this handler.
  1034. """
  1035. self.target = target
  1036. def flush(self):
  1037. """
  1038. For a MemoryHandler, flushing means just sending the buffered
  1039. records to the target, if there is one. Override if you want
  1040. different behaviour.
  1041. """
  1042. if self.target:
  1043. for record in self.buffer:
  1044. self.target.handle(record)
  1045. self.buffer = []
  1046. def close(self):
  1047. """
  1048. Flush, set the target to None and lose the buffer.
  1049. """
  1050. self.flush()
  1051. self.target = None
  1052. BufferingHandler.close(self)