Firebase and Gzipped Rotating File Logging Handlers
#!/usr/bin/env python
# coding=utf-8
import os
import glob
import subprocess
import logging
from logging.handlers import BaseRotatingHandler
__author__ = u'Ahmed Şeref GÜNEYSU'
# noinspection PyPep8Naming,PyTypeChecker
class GzippedRotatingFileHandler(BaseRotatingHandler):
def __init__(self, filename, mode='a', maxBytes=0, encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
path, file = os.path.join(os.path.split(self.baseFilename))
files = glob.glob("%s/*.gz" % path)
if files:
suffix = int(os.path.splitext(os.path.splitext(files[-1])[0])[-1].replace('.', '')) + 1
else:
suffix = 1
dfn = "%s.%s" % (self.baseFilename, suffix)
if os.path.exists(self.baseFilename):
os.rename(self.baseFilename, dfn)
subprocess.call(["gzip", dfn])
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) # due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class FirebaseHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, url):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
self.url = url
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
:type record: logging.LogRecord
:param record:
"""
# return record.__dict__
if record.msg:
try:
msg = record.msg.message
except Exception, e:
logging.exception(e)
msg = None
else:
msg = None
data = dict(
created=record.created,
filename=record.filename,
function=record.funcName,
levelname=record.levelname,
linenumber=record.lineno,
module=record.module,
msecs=record.msecs,
msg=msg,
name=record.name,
pathname=record.pathname,
process=record.process,
processname=record.processName,
relativecreated=record.relativeCreated,
thread=record.thread,
threadname=record.threadName,
)
return data
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
import requests
import json
try:
data = self.mapLogRecord(record=record)
requests.post(self.url, data=json.dumps(data))
except (KeyboardInterrupt, SystemExit):
raise
# except Exception, e:
# self.handleError(record)