-#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
-# Copyright (C) 2004-2011 OpenERP SA (<http://www.openerp.com>)
+# Copyright (C) 2004-2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
#
##############################################################################
+#.apidoc title: Common Services: netsvc
+#.apidoc module-mods: member-order: bysource
+
import errno
import logging
import logging.handlers
import types
from pprint import pformat
+try:
+ import psutil
+except ImportError:
+ psutil = None
+
# TODO modules that import netsvc only for things from loglevels must be changed to use loglevels.
from loglevels import *
import tools
_logger = logging.getLogger(__name__)
+
def close_socket(sock):
""" Closes a socket instance cleanly
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
- if e.errno != errno.ENOTCONN or platform.system() != 'Darwin':
+ if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
-
-#.apidoc title: Common Services: netsvc
-#.apidoc module-mods: member-order: bysource
-
def abort_response(dummy_1, description, dummy_2, details):
# TODO Replace except_{osv,orm} with these directly.
raise openerp.osv.osv.except_osv(description, details)
class Service(object):
- """ Base class for *Local* services
-
- Functionality here is trusted, no authentication.
+ """ Base class for Local services
+ Functionality here is trusted, no authentication.
+ Workflow engine and reports subclass this.
"""
_services = {}
def __init__(self, name):
cls._services.pop(name)
def LocalService(name):
- # Special case for addons support, will be removed in a few days when addons
- # are updated to directly use openerp.osv.osv.service.
- if name == 'object_proxy':
- return openerp.osv.osv.service
+ # Special case for addons support, will be removed in a few days when addons
+ # are updated to directly use openerp.osv.osv.service.
+ if name == 'object_proxy':
+ return openerp.osv.osv.service
- return Service._services[name]
+ return Service._services[name]
class ExportService(object):
""" Proxy for exported services.
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
- logging.DEBUG_SQL: (WHITE, MAGENTA),
- logging.DEBUG_RPC: (BLUE, WHITE),
- logging.DEBUG_RPC_ANSWER: (BLUE, WHITE),
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.TEST: (WHITE, BLUE),
class DBFormatter(logging.Formatter):
def format(self, record):
+ record.pid = os.getpid()
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
return logging.Formatter.format(self, record)
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
+_logger_init = False
def init_logger():
+ global _logger_init
+ if _logger_init:
+ return
+ _logger_init = True
+
from tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
- format = '[%(asctime)s][%(dbname)s] %(levelname)s:%(name)s:%(message)s'
+ format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s'
if tools.config['syslog']:
# SysLog Handler
# Normal Handler on standard output
handler = logging.StreamHandler(sys.stdout)
- if isinstance(handler, logging.StreamHandler) and os.isatty(handler.stream.fileno()):
+ # Check that handler.stream has a fileno() method: when running OpenERP
+ # behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
+ # which has no fileno() method. (mod_wsgi.Log is what is being bound to
+ # sys.stderr when the logging.StreamHandler is being constructed above.)
+ if isinstance(handler, logging.StreamHandler) \
+ and hasattr(handler.stream, 'fileno') \
+ and os.isatty(handler.stream.fileno()):
formatter = ColoredFormatter(format)
else:
formatter = DBFormatter(format)
handler.setFormatter(formatter)
- # Add the handler to the 'openerp' logger.
- logger = logging.getLogger('openerp')
- logger.addHandler(handler)
- logger.setLevel(int(tools.config['log_level'] or '0'))
+ logging.getLogger().addHandler(handler)
+
+ # Configure handlers
+ default_config = [
+ 'openerp.netsvc.rpc.request:INFO',
+ 'openerp.netsvc.rpc.response:INFO',
+ 'openerp.addons.web.http:INFO',
+ 'openerp.sql_db:INFO',
+ ':INFO',
+ ]
+
+ if tools.config['log_level'] == 'info':
+ pseudo_config = []
+ elif tools.config['log_level'] == 'debug_rpc':
+ pseudo_config = ['openerp:DEBUG','openerp.netsvc.rpc.request:DEBUG']
+ elif tools.config['log_level'] == 'debug_rpc_answer':
+ pseudo_config = ['openerp:DEBUG','openerp.netsvc.rpc.request:DEBUG', 'openerp.netsvc.rpc.response:DEBUG']
+ elif tools.config['log_level'] == 'debug':
+ pseudo_config = ['openerp:DEBUG']
+ elif tools.config['log_level'] == 'test':
+ pseudo_config = ['openerp:TEST']
+ elif tools.config['log_level'] == 'warn':
+ pseudo_config = ['openerp:WARNING']
+ elif tools.config['log_level'] == 'error':
+ pseudo_config = ['openerp:ERROR']
+ elif tools.config['log_level'] == 'critical':
+ pseudo_config = ['openerp:CRITICAL']
+ elif tools.config['log_level'] == 'debug_sql':
+ pseudo_config = ['openerp.sql_db:DEBUG']
+ else:
+ pseudo_config = []
+
+ logconfig = tools.config['log_handler']
+
+ for logconfig_item in default_config + pseudo_config + logconfig:
+ loggername, level = logconfig_item.split(':')
+ level = getattr(logging, level, logging.INFO)
+ logger = logging.getLogger(loggername)
+ logger.setLevel(level)
+
+ for logconfig_item in default_config + pseudo_config + logconfig:
+ _logger.debug('logger level set: "%s"', logconfig_item)
# A alternative logging scheme for automated runs of the
# server intended to test it.
def init_alternative_logger():
class H(logging.Handler):
- def emit(self, record):
- if record.levelno > 20:
- print record.levelno, record.pathname, record.msg
+ def emit(self, record):
+ if record.levelno > 20:
+ print record.levelno, record.pathname, record.msg
handler = H()
# Add the handler to the 'openerp' logger.
logger = logging.getLogger('openerp')
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
-class Server:
- """ Generic interface for all servers with an event loop etc.
- Override this to impement http, net-rpc etc. servers.
-
- Servers here must have threaded behaviour. start() must not block,
- there is no run().
- """
- __is_started = False
- __servers = []
- __starter_threads = []
-
- # we don't want blocking server calls (think select()) to
- # wait forever and possibly prevent exiting the process,
- # but instead we want a form of polling/busy_wait pattern, where
- # _server_timeout should be used as the default timeout for
- # all I/O blocking operations
- _busywait_timeout = 0.5
-
- def __init__(self):
- Server.__servers.append(self)
- if Server.__is_started:
- # raise Exception('All instances of servers must be inited before the startAll()')
- # Since the startAll() won't be called again, allow this server to
- # init and then start it after 1sec (hopefully). Register that
- # timer thread in a list, so that we can abort the start if quitAll
- # is called in the meantime
- t = threading.Timer(1.0, self._late_start)
- t.name = 'Late start timer for %s' % str(self.__class__)
- Server.__starter_threads.append(t)
- t.start()
-
- def start(self):
- _logger.debug("called stub Server.start")
-
- def _late_start(self):
- self.start()
- for thr in Server.__starter_threads:
- if thr.finished.is_set():
- Server.__starter_threads.remove(thr)
-
- def stop(self):
- _logger.debug("called stub Server.stop")
-
- def stats(self):
- """ This function should return statistics about the server """
- return "%s: No statistics" % str(self.__class__)
-
- @classmethod
- def startAll(cls):
- if cls.__is_started:
- return
- _logger.info("Starting %d services" % len(cls.__servers))
- for srv in cls.__servers:
- srv.start()
- cls.__is_started = True
-
- @classmethod
- def quitAll(cls):
- if not cls.__is_started:
- return
- _logger.info("Stopping %d services" % len(cls.__servers))
- for thr in cls.__starter_threads:
- if not thr.finished.is_set():
- thr.cancel()
- cls.__starter_threads.remove(thr)
-
- for srv in cls.__servers:
- srv.stop()
- cls.__is_started = False
-
- @classmethod
- def allStats(cls):
- res = ["Servers %s" % ('stopped', 'started')[cls.__is_started]]
- res.extend(srv.stats() for srv in cls.__servers)
- return '\n'.join(res)
-
- def _close_socket(self):
- close_socket(self.socket)
-
def replace_request_password(args):
# password is always 3rd argument in a request, we replace it in RPC logs
# so it's easier to forward logs for diagnostics/debugging purposes...
- args = list(args)
if len(args) > 2:
+ args = list(args)
args[2] = '*'
- return args
+ return tuple(args)
-def log(title, msg, channel=logging.DEBUG_RPC, depth=None, fn=""):
- logger = logging.getLogger(title)
- if logger.isEnabledFor(channel):
- indent=''
- indent_after=' '*len(fn)
- for line in (fn+pformat(msg, depth=depth)).split('\n'):
- logger.log(channel, indent+line)
- indent=indent_after
+def log(logger, level, prefix, msg, depth=None):
+ indent=''
+ indent_after=' '*len(prefix)
+ for line in (prefix+pformat(msg, depth=depth)).split('\n'):
+ logger.log(level, indent+line)
+ indent=indent_after
def dispatch_rpc(service_name, method, params):
""" Handle a RPC call.
This is pure Python code, the actual marshalling (from/to XML-RPC or
NET-RPC) is done in a upper layer.
"""
- def _log(title, msg, channel=logging.DEBUG_RPC, depth=None, fn=""):
- log(__name__, msg, channel=channel, depth=depth, fn=fn)
try:
- start_time = end_time = 0
- if _logger.isEnabledFor(logging.DEBUG_RPC_ANSWER):
- _log('service', tuple(replace_request_password(params)), depth=None, fn='%s.%s'%(service_name,method))
- if _logger.isEnabledFor(logging.DEBUG_RPC):
+ rpc_request = logging.getLogger(__name__ + '.rpc.request')
+ rpc_response = logging.getLogger(__name__ + '.rpc.response')
+ rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
+ rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
+ if rpc_request_flag or rpc_response_flag:
start_time = time.time()
+ start_rss, start_vms = 0, 0
+ if psutil:
+ start_rss, start_vms = psutil.Process(os.getpid()).get_memory_info()
+ if rpc_request and rpc_response_flag:
+ log(rpc_request,logging.DEBUG,'%s.%s'%(service_name,method), replace_request_password(params))
+
result = ExportService.getService(service_name).dispatch(method, params)
- if _logger.isEnabledFor(logging.DEBUG_RPC):
+
+ if rpc_request_flag or rpc_response_flag:
end_time = time.time()
- if not _logger.isEnabledFor(logging.DEBUG_RPC_ANSWER):
- _log('service (%.3fs)' % (end_time - start_time), tuple(replace_request_password(params)), depth=1, fn='%s.%s'%(service_name,method))
- _log('execution time', '%.3fs' % (end_time - start_time), channel=logging.DEBUG_RPC_ANSWER)
- _log('result', result, channel=logging.DEBUG_RPC_ANSWER)
+ end_rss, end_vms = 0, 0
+ if psutil:
+ end_rss, end_vms = psutil.Process(os.getpid()).get_memory_info()
+ logline = '%s.%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % (service_name, method, end_time - start_time, start_vms / 1024, end_vms / 1024, (end_vms - start_vms)/1024)
+ if rpc_response_flag:
+ log(rpc_response,logging.DEBUG, logline, result)
+ else:
+ log(rpc_request,logging.DEBUG, logline, replace_request_password(params), depth=1)
+
return result
except openerp.exceptions.AccessError:
raise
except openerp.exceptions.Warning:
raise
except openerp.exceptions.DeferredException, e:
- _log('exception', tools.exception_to_unicode(e))
+ _logger.exception(tools.exception_to_unicode(e))
post_mortem(e.traceback)
raise
except Exception, e:
- _log('exception', tools.exception_to_unicode(e))
+ _logger.exception(tools.exception_to_unicode(e))
post_mortem(sys.exc_info())
raise