logconfig.py
9228 ワード
python logging client&serverの構成を実現し、tornadowebのログ構成モジュールを参考にしました.
logging setting code
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
__VERSION__='0.1.0'
import datetime
import logging
import logging.handlers
import re
import sys
import time
import pickle
import SocketServer
import struct
# For pretty log messages, if available
try:
import curses
except:
curses = None
# max size of log files before rollover
LOG_FILE_MAX_SIZE = 128 * 1024 * 1024
# number of log files to keep
LOG_FILE_NUM_BACKUPS = 10
def configure( level='info', file_prefix=None, file_max_size=0, file_num_backups=0, servers='' ) :
"""Turns on formatted logging output as configured."""
root_logger = logging.getLogger()
if not level :
level = 'info'
root_logger.setLevel(getattr(logging, level.upper()))
if file_prefix:
if not file_max_size :
file_max_size = LOG_FILE_MAX_SIZE
if not file_num_backups :
file_num_backups = LOG_FILE_NUM_BACKUPS
channel = logging.handlers.RotatingFileHandler(
filename = file_prefix,
maxBytes = file_max_size,
backupCount = file_num_backups )
channel.setFormatter( _LogFormatter(color=False) )
root_logger.addHandler( channel )
elif not root_logger.handlers :
# Set up color if we are in a tty and curses is installed
color = False
if curses and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except:
pass
channel = logging.StreamHandler()
channel.setFormatter(_LogFormatter(color=color))
root_logger.addHandler(channel)
#host = '127.0.0.1'
#port = logging.handlers.DEFAULT_TCP_LOGGING_PORT
## servers FORMAT : SERVER1:PORT1,SERVER2:PORT2,SERVER3:PORT3
if servers :
server_list = [ x.strip() for x in servers.split(',') ]
for server in server_list :
host,port = server.split(':')
socketHandler = logging.handlers.SocketHandler(host, int(port))
root_logger.addHandler(socketHandler)
#### configure()
class _LogFormatter(logging.Formatter):
def __init__(self, color, *args, **kwargs):
logging.Formatter.__init__(self, *args, **kwargs)
self._color = color
if color:
fg_color = curses.tigetstr("setaf") or curses.tigetstr("setf") or ""
self._colors = {
logging.DEBUG: curses.tparm(fg_color, 4), # Blue
logging.INFO: curses.tparm(fg_color, 2), # Green
logging.WARNING: curses.tparm(fg_color, 3), # Yellow
logging.ERROR: curses.tparm(fg_color, 1), # Red
}
self._normal = curses.tigetstr("sgr0")
def format(self, record):
try:
record.message = record.getMessage()
except Exception, e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = time.strftime(
"%Y-%m-%d %H:%M:%S", self.converter(record.created))
prefix = '%(asctime)s %(levelname)1.1s %(name)s %(process)d %(module)s:%(lineno)d' % \
record.__dict__
if self._color:
prefix = (self._colors.get(record.levelno, self._normal) +
prefix + self._normal)
formatted = prefix + " " + record.message
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
formatted = formatted.rstrip() + "
" + record.exc_text
return formatted.replace("
", "
")
#### _LogFormatter
########---------------- ######## ----------------########
class LogRecordStreamHandler(SocketServer.StreamRequestHandler):
"""Handler for a streaming logging request.
This basically logs the record using whatever logging policy is
configured locally.
"""
def handle(self):
"""
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
def unPickle(self, data):
return pickle.loads(data)
def handleLogRecord(self, record):
# if a name is specified, we use the named logger rather than the one
# implied by the record.
if self.server.logname is not None:
name = self.server.logname
else:
name = record.name
logger = logging.getLogger(name)
# N.B. EVERY record gets logged. This is because Logger.handle
# is normally called AFTER logger-level filtering. If you want
# to do filtering, do it at the client end to save wasting
# cycles and network bandwidth!
logger.handle(record)
class LogServer(SocketServer.ThreadingTCPServer):
"""
Simple TCP socket-based logging receiver suitable for testing.
"""
allow_reuse_address = 1
def __init__(self, host='0.0.0.0',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler,
level='info', file_prefix=None, file_max_size=0, file_num_backups=0 ):
SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
self.logname = None
configure( level, file_prefix, file_max_size, file_num_backups )
#### __init__()
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
abort = self.abort
#### serve_until_stopped()
#### class LogServer
#def main():
# s = LogServer()
# #print('start LOG server...')
# s.serve_until_stopped()
def test3() :
configure(level='debug', file_prefix='test.log')
logger = logging.getLogger()
logger.fatal(">>>FATAL<<< %s", 'Hello, World.')
logger.error(">>>ERROR<<< %s", 'Hello, World.')
logger.warn(">>> WARN<<< %s", 'Hello, World.')
logger.info(">>> INFO<<< %s", 'Hello, World.')
logger.debug(">>>DEBUG<<< %s", 'Hello, World.')
logger = logging.getLogger('test3')
logger.fatal(">>>FATAL<<< %s", 'Hello, World.')
logger.error(">>>ERROR<<< %s", 'Hello, World.')
logger.warn(">>> WARN<<< %s", 'Hello, World.')
logger.info(">>> INFO<<< %s", 'Hello, World.')
logger.debug(">>>DEBUG<<< %s", 'Hello, World.')
#### test3()
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
import sys, os, os.path, time
import signal
from tbox import logconfig
HOST = '0.0.0.0'
PORT = 9020
LOG_PREFIX = '/home/s/logs/pylogd/9020.log'
LOG_SIZE = 512 * 1024 * 1024
LOG_BACKUP = 2000
log_server = None
def signal_handler(sig, flags) :
global log_server
if log_server : log_server.shutdown()
#### signal_handler()
if __name__ == '__main__' :
signal.signal( signal.SIGINT, signal_handler )
signal.signal( signal.SIGQUIT, signal_handler )
signal.signal( signal.SIGTERM, signal_handler )
log_server = logconfig.LogServer( host=HOST, port=PORT, file_prefix=LOG_PREFIX, file_max_size=LOG_SIZE, file_num_backups=LOG_BACKUP )
#s.serve_until_stopped()
log_server.serve_forever()
logging setting code
# Logging, obviously
logger = logging.getLogger('nsq')
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(filename)s@%(lineno)d: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)