Source code for picologging.handlers

import os
import pickle
import queue
import re
import socket
import struct
import threading
import time

import picologging

_MIDNIGHT = 24 * 60 * 60  # number of seconds in a day


[docs] class WatchedFileHandler(picologging.FileHandler): """ A handler for logging to a file, which watches the file to see if it has changed while in use. This can happen because of usage of programs such as newsyslog and logrotate which perform log file rotation. This handler, intended for use under Unix, watches the file to see if it has changed since the last emit. (A file has changed if its device or inode have changed.) If it has changed, the old file stream is closed, and the file opened to get a new stream. This handler is not appropriate for use under Windows, because under Windows open files cannot be moved or renamed - logging opens the files with exclusive locks - and so there is no need for such a handler. Furthermore, ST_INO is not supported under Windows; stat always returns zero for this value. """ def __init__(self, filename, mode="a", encoding=None, delay=False): picologging.FileHandler.__init__( self, filename, mode=mode, encoding=encoding, delay=delay ) self.dev, self.ino = -1, -1 self._statstream() def _statstream(self): if self.stream: sres = os.fstat(self.stream.fileno()) self.dev, self.ino = sres.st_dev, sres.st_ino
[docs] def reopenIfNeeded(self): """ Reopen log file if needed. Checks if the underlying file has changed, and if it has, close the old stream and reopen the file to get the current stream. """ try: sres = os.stat(self.baseFilename) except FileNotFoundError: sres = None if not sres or sres.st_dev != self.dev or sres.st_ino != self.ino: if self.stream is not None: self.stream.flush() self.stream.close() self.stream = None # See Issue #21742: _open () might fail. self.stream = self._open() self._statstream()
[docs] def emit(self, record): """ Emit a record. If underlying file has changed, reopen the file before emitting the record to it. """ self.reopenIfNeeded() picologging.FileHandler.emit(self, record)
[docs] class BaseRotatingHandler(picologging.FileHandler): """ Base class for handlers that rotate log files at a certain point. Not meant to be instantiated directly. Instead, use RotatingFileHandler or TimedRotatingFileHandler. """ namer = None rotator = None def __init__(self, filename, mode, encoding=None, delay=False): """ Use the specified filename for streamed logging """ picologging.FileHandler.__init__( self, filename, mode=mode, encoding=encoding, delay=delay ) self.mode = mode self.encoding = encoding
[docs] def shouldRollover(self, record): """ Determine if rollover should occur. Should be implemented in inherited classes. """
[docs] def doRollover(self, record): """ Do a rollover. Should be implemented in inherited classes. """
[docs] def emit(self, record): """ Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() picologging.FileHandler.emit(self, record) except Exception: self.handleError(record)
[docs] def rotation_filename(self, default_name): """ Modify the filename of a log file when rotating. This is provided so that a custom filename can be provided. The default implementation calls the 'namer' attribute of the handler, if it's callable, passing the default name to it. If the attribute isn't callable (the default is None), the name is returned unchanged. :param default_name: The default name for the log file. """ if not callable(self.namer): result = default_name else: result = self.namer(default_name) return result
[docs] def rotate(self, source, dest): """ When rotating, rotate the current log. The default implementation calls the 'rotator' attribute of the handler, if it's callable, passing the source and dest arguments to it. If the attribute isn't callable (the default is None), the source is simply renamed to the destination. :param source: The source filename. This is normally the base filename, e.g. 'test.log' :param dest: The destination filename. This is normally what the source is rotated to, e.g. 'test.log.1'. """ if not callable(self.rotator): # Issue: https://bugs.python.org/issue18940 # A file may not have been created if delay is True. if os.path.exists(source): os.rename(source, dest) else: self.rotator(source, dest)
[docs] class RotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a set of files, which switches from one file to the next when the current file reaches a certain size. """ def __init__( self, filename, mode="a", maxBytes=0, backupCount=0, encoding=None, delay=False ): """ Open the specified file and use it as the stream for logging. By default, the file grows indefinitely. You can specify particular values of maxBytes and backupCount to allow the file to rollover at a predetermined size. Rollover occurs whenever the current log file is nearly maxBytes in length. If backupCount is >= 1, the system will successively create new files with the same pathname as the base file, but with extensions ".1", ".2" etc. appended to it. For example, with a backupCount of 5 and a base file name of "app.log", you would get "app.log", "app.log.1", "app.log.2", ... through to "app.log.5". The file being written to is always "app.log" - when it gets filled up, it is closed and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. exist, then they are renamed to "app.log.2", "app.log.3" etc. respectively. If maxBytes is zero, rollover never occurs. """ # If rotation/rollover is wanted, it doesn't make sense to use another # mode. If for example 'w' were specified, then if there were multiple # runs of the calling application, the logs from previous runs would be # lost if the 'w' is respected, because the log file would be truncated # on each run. if maxBytes > 0: mode = "a" BaseRotatingHandler.__init__( self, filename, mode, encoding=encoding, delay=delay ) self.maxBytes = maxBytes self.backupCount = backupCount
[docs] def doRollover(self): """ Do a rollover, as described in __init__(). """ if self.stream: self.stream.close() self.stream = None if self.backupCount > 0: for i in range(self.backupCount - 1, 0, -1): sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) dfn = self.rotation_filename("%s.%d" % (self.baseFilename, i + 1)) if os.path.exists(sfn): if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) dfn = self.rotation_filename(self.baseFilename + ".1") if os.path.exists(dfn): os.remove(dfn) self.rotate(self.baseFilename, dfn) if not self.delay: self.stream = self._open()
[docs] def shouldRollover(self, record): """ Determine if rollover should occur. Basically, see if the supplied record would cause the file to exceed the size limit we have. """ # See bpo-45401: Never rollover anything other than regular files if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): return False if self.stream is None: self.stream = self._open() if self.maxBytes > 0: msg = "%s\n" % self.format(record) self.stream.seek(0, 2) # Due to non-posix-compliant Windows feature if self.stream.tell() + len(msg) >= self.maxBytes: return True return False
[docs] class TimedRotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a file, rotating the log file at certain timed intervals. If backupCount is > 0, when rollover is done, no more than backupCount files are kept - the oldest ones are deleted. """ def __init__( self, filename, when="h", interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None, ): BaseRotatingHandler.__init__( self, filename, "a", encoding=encoding, delay=delay ) self.when = when.upper() self.backupCount = backupCount self.utc = utc self.atTime = atTime # Calculate the real rollover interval, which is just the number of # seconds between rollovers. Also set the filename suffix used when # a rollover occurs. Current 'when' events supported: # S - Seconds # M - Minutes # H - Hours # D - Days # midnight - roll over at midnight # W{0-6} - roll over on a certain day; 0 - Monday # # Case of the 'when' specifier is not important; lower or upper case # will work. if self.when == "S": self.interval = 1 # one second self.suffix = "%Y-%m-%d_%H-%M-%S" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$" elif self.when == "M": self.interval = 60 # one minute self.suffix = "%Y-%m-%d_%H-%M" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$" elif self.when == "H": self.interval = 60 * 60 # one hour self.suffix = "%Y-%m-%d_%H" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$" elif self.when == "D" or self.when == "MIDNIGHT": self.interval = 60 * 60 * 24 # one day self.suffix = "%Y-%m-%d" self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" elif self.when.startswith("W"): self.interval = 60 * 60 * 24 * 7 # one week if len(self.when) != 2: raise ValueError( "You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when ) if self.when[1] < "0" or self.when[1] > "6": raise ValueError( "Invalid day specified for weekly rollover: %s" % self.when ) self.dayOfWeek = int(self.when[1]) self.suffix = "%Y-%m-%d" self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" else: raise ValueError("Invalid rollover interval specified: %s" % self.when) self.extMatch = re.compile(self.extMatch, re.ASCII) self.interval = self.interval * interval # multiply by units requested # The following line added because the filename passed in could be a # path object (see Issue #27493), but self.baseFilename will be a string filename = self.baseFilename if os.path.exists(filename): t = os.stat(filename).st_mtime else: t = int(time.time()) self.rollover_at = self.computeRollover(t)
[docs] def computeRollover(self, current_time): """ Work out the rollover time based on the specified time. """ result = current_time + self.interval # If we are rolling over at midnight or weekly, then the interval is already known. # What we need to figure out is WHEN the next interval is. In other words, # if you are rolling over at midnight, then your base interval is 1 day, # but you want to start that one day clock at midnight, not now. So, we # have to fudge the rollover_at value in order to trigger the first rollover # at the right time. After that, the regular interval will take care of # the rest. Note that this code doesn"t care about leap seconds. :) if self.when == "MIDNIGHT" or self.when.startswith("W"): # This could be done with less code, but I wanted it to be clear if self.utc: t = time.gmtime(current_time) else: t = time.localtime(current_time) current_hour = t[3] current_minute = t[4] current_second = t[5] current_day = t[6] # r is the number of seconds left between now and the next rotation if self.atTime is None: rotate_ts = _MIDNIGHT else: rotate_ts = ( self.atTime.hour * 60 + self.atTime.minute ) * 60 + self.atTime.second r = rotate_ts - ((current_hour * 60 + current_minute) * 60 + current_second) if r < 0: # Rotate time is before the current time (for example when # self.rotateAt is 13:45 and it now 14:15), rotation is # tomorrow. r += _MIDNIGHT current_day = (current_day + 1) % 7 result = current_time + r # If we are rolling over on a certain day, add in the number of days until # the next rollover, but offset by 1 since we just calculated the time # until the next day starts. There are three cases: # Case 1) The day to rollover is today; in this case, do nothing # Case 2) The day to rollover is further in the interval (i.e., today is # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to # next rollover is simply 6 - 2 - 1, or 3. # Case 3) The day to rollover is behind us in the interval (i.e., today # is day 5 (Saturday) and rollover is on day 3 (Thursday). # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the # number of days left in the current week (1) plus the number # of days in the next week until the rollover day (3). # The calculations described in 2) and 3) above need to have a day added. # This is because the above time calculation takes us to midnight on this # day, i.e. the start of the next day. if self.when.startswith("W"): day = current_day # 0 is Monday if day != self.dayOfWeek: if day < self.dayOfWeek: daysToWait = self.dayOfWeek - day else: daysToWait = 6 - day + self.dayOfWeek + 1 now_rollover_at = result + (daysToWait * (60 * 60 * 24)) if not self.utc: dst_now = t[-1] dstAtRollover = time.localtime(now_rollover_at)[-1] if dst_now != dstAtRollover: if ( not dst_now ): # DST kicks in before next rollover, so we need to deduct an hour addend = -3600 else: # DST bows out before next rollover, so we need to add an hour addend = 3600 now_rollover_at += addend result = now_rollover_at return result
[docs] def shouldRollover(self, record): """ Determine if rollover should occur. record is not used, as we are just comparing times, but it is needed so the method signatures are the same """ # See bpo-45401: Never rollover anything other than regular files if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): return False t = int(time.time()) if t >= self.rollover_at: return True return False
[docs] def getFilesToDelete(self): """ Determine the files to delete when rolling over. More specific than the earlier method, which just used glob.glob(). """ dir_name, base_name = os.path.split(self.baseFilename) file_names = os.listdir(dir_name) result = [] # See bpo-44753: Don't use the extension when computing the prefix. n, e = os.path.splitext(base_name) prefix = n + "." plen = len(prefix) for file_name in file_names: if self.namer is None: # Our files will always start with base_name if not file_name.startswith(base_name): continue else: # Our files could be just about anything after custom naming, but # likely candidates are of the form # foo.log.DATETIME_SUFFIX or foo.DATETIME_SUFFIX.log if ( not file_name.startswith(base_name) and file_name.endswith(e) and len(file_name) > (plen + 1) and not file_name[plen + 1].isdigit() ): continue if file_name[:plen] == prefix: suffix = file_name[plen:] # See bpo-45628: The date/time suffix could be anywhere in the # filename parts = suffix.split(".") for part in parts: if self.extMatch.match(part): result.append(os.path.join(dir_name, file_name)) break if len(result) < self.backupCount: result = [] else: result.sort() result = result[: len(result) - self.backupCount] return result
[docs] def doRollover(self): """ do a rollover; in this case, a date/time stamp is appended to the filename when the rollover happens. However, you want the file to be named for the start of the interval, not the current time. If there is a backup count, then we have to get a list of matching filenames, sort them and remove the one with the oldest suffix. """ if self.stream: self.stream.close() self.stream = None # get the time that this sequence started at and make it a TimeTuple current_time = int(time.time()) dst_now = time.localtime(current_time)[-1] t = self.rollover_at - self.interval if self.utc: time_tuple = time.gmtime(t) else: time_tuple = time.localtime(t) dst_then = time_tuple[-1] if dst_now != dst_then: if dst_now: addend = 3600 else: addend = -3600 time_tuple = time.localtime(t + addend) dfn = self.rotation_filename( self.baseFilename + "." + time.strftime(self.suffix, time_tuple) ) if os.path.exists(dfn): os.remove(dfn) self.rotate(self.baseFilename, dfn) if self.backupCount > 0: for s in self.getFilesToDelete(): os.remove(s) if not self.delay: self.stream = self._open() now_rollover_at = self.computeRollover(current_time) while now_rollover_at <= current_time: now_rollover_at = now_rollover_at + self.interval # If DST changes and midnight or weekly rollover, adjust for this. if (self.when == "MIDNIGHT" or self.when.startswith("W")) and not self.utc: dstAtRollover = time.localtime(now_rollover_at)[-1] if dst_now != dstAtRollover: if ( not dst_now ): # DST kicks in before next rollover, so we need to deduct an hour addend = -3600 else: # DST bows out before next rollover, so we need to add an hour addend = 3600 now_rollover_at += addend self.rollover_at = now_rollover_at
[docs] class QueueHandler(picologging.Handler): """ This handler sends events to a queue. Typically, it would be used together with a multiprocessing Queue to centralise logging to file in one process (in a multi-process application), so as to avoid file write contention between processes. This code is new in Python 3.2, but this class can be copy pasted into user code for use with earlier Python versions. """ def __init__(self, queue): """ Initialise an instance, using the passed queue. """ super().__init__() self.queue = queue
[docs] def enqueue(self, record): """ Enqueue a record. The base implementation uses put_nowait. You may want to override this method if you want to use blocking, timeouts or custom queue implementations. """ self.queue.put_nowait(record)
[docs] def prepare(self, record): """ Prepare a record for queuing. The object returned by this method is enqueued. The base implementation formats the record to merge the message and arguments, and removes unpickleable items from the record in-place. Specifically, it overwrites the record's `msg` and `message` attributes with the merged message (obtained by calling the handler's `format` method), and sets the `args`, `exc_info` and `exc_text` attributes to None. You might want to override this method if you want to convert the record to a dict or JSON string, or send a modified copy of the record while leaving the original intact. """ # The format operation gets traceback text into record.exc_text # (if there's exception data), and also returns the formatted # message. We can then use this to replace the original # msg + args, as these might be unpickleable. We also zap the # exc_info, exc_text and stack_info attributes, as they are no longer # needed and, if not None, will typically not be pickleable. msg = self.format(record) # bpo-35726: make copy of record to avoid affecting other handlers in the chain. record = picologging.LogRecord( record.name, record.levelno, record.pathname, record.lineno, msg, None, None, record.funcName, record.stack_info, ) return record
[docs] def emit(self, record: picologging.LogRecord): """ Emit a record. Writes the LogRecord to the queue, copying it first. """ try: self.enqueue(self.prepare(record)) except Exception: self.handleError(record)
[docs] class QueueListener: """ This class implements an internal threaded listener which watches for LogRecords being added to a queue, removes them and passes them to a list of handlers for processing. """ _sentinel = None def __init__(self, queue, *handlers, respect_handler_level=False): """ Initialise an instance with the specified queue and handlers. """ self.queue = queue self.handlers = handlers self._thread = None self.respect_handler_level = respect_handler_level
[docs] def dequeue(self, block): """ Dequeue a record and return it, optionally blocking. The base implementation uses get. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ return self.queue.get(block)
[docs] def start(self): """ Start the listener. This starts up a background thread to monitor the queue for LogRecords to process. """ self._thread = t = threading.Thread(target=self._monitor) t.daemon = True t.start()
[docs] def prepare(self, record): """ Prepare a record for handling. This method just returns the passed-in record. You may want to override this method if you need to do any custom marshalling or manipulation of the record before passing it to the handlers. """ return record
[docs] def handle(self, record): """ Handle a record. This just loops through the handlers offering them the record to handle. """ record = self.prepare(record) for handler in self.handlers: if not self.respect_handler_level: process = True else: process = record.levelno >= handler.level if process: handler.handle(record)
def _monitor(self): """ Monitor the queue for records, and ask the handler to deal with them. This method runs on a separate, internal thread. The thread will terminate if it sees a sentinel object in the queue. """ q = self.queue has_task_done = hasattr(q, "task_done") while True: try: record = self.dequeue(True) if record is self._sentinel: if has_task_done: q.task_done() break self.handle(record) if has_task_done: q.task_done() except queue.Empty: break
[docs] def enqueue_sentinel(self): """ This is used to enqueue the sentinel record. The base implementation uses put_nowait. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ self.queue.put_nowait(self._sentinel)
[docs] def stop(self): """ Stop the listener. This asks the thread to terminate, and then waits for it to do so. Note that if you don't call this before your application exits, there may be some records still left on the queue, which won't be processed. """ self.enqueue_sentinel() self._thread.join() self._thread = None
[docs] class BufferingHandler(picologging.Handler): """ A handler class which buffers logging records in memory. Whenever each record is added to the buffer, a check is made to see if the buffer should be flushed. If it should, then flush() is expected to do what's needed. """ def __init__(self, capacity): """ Initialize the handler with the buffer size. """ picologging.Handler.__init__(self) self.capacity = capacity self.buffer = []
[docs] def emit(self, record): """ Emit a record. Append the record and call flush() if criteria is met. """ self.buffer.append(record) if len(self.buffer) >= self.capacity: self.flush()
[docs] def flush(self): """ Override to implement custom flushing behaviour. This version just zaps the buffer to empty. """ self.acquire() try: self.buffer.clear() finally: self.release()
[docs] def close(self): """ Close the handler. This version just flushes and chains to the parent class' close(). """ try: self.flush() finally: picologging.Handler.close(self)
[docs] class MemoryHandler(BufferingHandler): """ A handler class which buffers logging records in memory, periodically flushing them to a target handler. Flushing occurs whenever the buffer is full, or when an event of a certain severity or greater is seen. """ def __init__( self, capacity, flushLevel=picologging.ERROR, target=None, flushOnClose=True ): """ Initialize the handler with the buffer size, the level at which flushing should occur and an optional target. Note that without a target being set either here or via setTarget(), a MemoryHandler is no use to anyone! The ``flushOnClose`` argument is ``True`` for backward compatibility reasons - the old behaviour is that when the handler is closed, the buffer is flushed, even if the flush level hasn't been exceeded nor the capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. """ BufferingHandler.__init__(self, capacity) self.flushLevel = flushLevel self.target = target # See Issue #26559 for why this has been added self.flushOnClose = flushOnClose
[docs] def setTarget(self, target): """ Set the target handler for this handler. """ self.acquire() try: self.target = target finally: self.release()
[docs] def flush(self): """ For a MemoryHandler, flushing means just sending the buffered records to the target, if there is one. Override if you want different behaviour. The record buffer is also cleared by this operation. """ self.acquire() try: if self.target: for record in self.buffer: self.target.handle(record) self.buffer.clear() finally: self.release()
[docs] def close(self): """ Flush, if appropriately configured, set the target to None and lose the buffer. """ try: if self.flushOnClose: self.flush() finally: self.acquire() try: self.target = None BufferingHandler.close(self) finally: self.release()
[docs] def emit(self, record): """ Emit a record. Append the record and call flush() if criteria is met. """ self.buffer.append(record) if (len(self.buffer) >= self.capacity) or (record.levelno >= self.flushLevel): self.flush()
[docs] class SocketHandler(picologging.Handler): """ A handler class which writes logging records, in pickle format, to a streaming socket. The socket is kept open across logging calls. If the peer resets it, an attempt is made to reconnect on the next call. The pickle which is sent is that of the LogRecord's attribute dictionary (__dict__), so that the receiver does not need to have the logging module installed in order to process the logging event. To unpickle the record at the receiving end into a LogRecord, use the makeLogRecord function. """ def __init__(self, host, port): """ Initializes the handler with a specific host address and port. When the attribute *closeOnError* is set to True - if a socket error occurs, the socket is silently closed and then reopened on the next logging call. """ picologging.Handler.__init__(self) self.host = host self.port = port if port is None: self.address = host else: self.address = (host, port) self.sock = None self.closeOnError = False self.retryTime = None # Exponential backoff parameters. self.retryStart = 1.0 self.retryMax = 30.0 self.retryFactor = 2.0
[docs] def makeSocket(self, timeout=1): """ A factory method which allows subclasses to define the precise type of socket they want. """ if self.port is not None: result = socket.create_connection(self.address, timeout=timeout) else: result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) result.settimeout(timeout) try: result.connect(self.address) except OSError: result.close() # Issue 19182 raise return result
[docs] def createSocket(self): """ Try to create a socket, using an exponential backoff with a max retry time. """ now = time.time() # Either retryTime is None, in which case this # is the first time back after a disconnect, or # we've waited long enough. if self.retryTime is None: attempt = True else: attempt = now >= self.retryTime if attempt: try: self.sock = self.makeSocket() self.retryTime = None # next time, no delay before trying except OSError: # Creation failed, so set the retry time and return. if self.retryTime is None: self.retryPeriod = self.retryStart else: self.retryPeriod = self.retryPeriod * self.retryFactor if self.retryPeriod > self.retryMax: self.retryPeriod = self.retryMax self.retryTime = now + self.retryPeriod
[docs] def send(self, s): """ Send a pickled string to the socket. This function allows for partial sends which can happen when the network is busy. """ if self.sock is None: self.createSocket() # self.sock can be None either because we haven't reached the retry # time yet, or because we have reached the retry time and retried, # but are still unable to connect. if self.sock: try: self.sock.sendall(s) except OSError: # pragma: no cover self.sock.close() self.sock = None # so we can call createSocket next time
[docs] def makePickle(self, record): """ Pickles the record in binary format with a length prefix, and returns it ready for transmission across the socket. """ ei = record.exc_info if ei: # just to get traceback text into record.exc_text ... self.format(record) # See issue #14436: If msg or args are objects, they may not be # available on the receiving end. So we convert the msg % args # to a string, save it as msg and zap the args. d = dict(record.__dict__) d["msg"] = record.getMessage() d["args"] = None d["exc_info"] = None # Issue #25685: delete 'message' if present: redundant with 'msg' d.pop("message", None) s = pickle.dumps(d, 1) slen = struct.pack(">L", len(s)) return slen + s
[docs] def handleError(self, record): """ Handle an error during logging. An error has occurred during logging. Most likely cause - connection lost. Close the socket so that we can retry on the next event. """ if self.closeOnError and self.sock: self.sock.close() self.sock = None # try to reconnect next time else: picologging.Handler.handleError(self, record)
[docs] def emit(self, record): """ Emit a record. Pickles the record and writes it to the socket in binary format. If there is an error with the socket, silently drop the packet. If there was a problem with the socket, re-establishes the socket. """ try: s = self.makePickle(record) self.send(s) except Exception: self.handleError(record)
[docs] def close(self): """ Closes the socket. """ self.acquire() try: sock = self.sock if sock: self.sock = None sock.close() picologging.Handler.close(self) finally: self.release()
class DatagramHandler(SocketHandler): """ A handler class which writes logging records, in pickle format, to a datagram socket. The pickle which is sent is that of the LogRecord's attribute dictionary (__dict__), so that the receiver does not need to have the logging module installed in order to process the logging event. To unpickle the record at the receiving end into a LogRecord, use the makeLogRecord function. """ def __init__(self, host, port): """ Initializes the handler with a specific host address and port. """ SocketHandler.__init__(self, host, port) self.closeOnError = False def makeSocket(self): """ The factory method of SocketHandler is here overridden to create a UDP socket (SOCK_DGRAM). """ if self.port is None: family = socket.AF_UNIX else: family = socket.AF_INET s = socket.socket(family, socket.SOCK_DGRAM) return s def send(self, s): """ Send a pickled string to a socket. This function no longer allows for partial sends which can happen when the network is busy - UDP does not guarantee delivery and can deliver packets out of sequence. """ if self.sock is None: self.createSocket() self.sock.sendto(s, self.address)