2 # -*- coding: utf-8 -*-
3 ##############################################################################
5 # OpenERP, Open Source Management Solution
6 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
7 # The refactoring about the OpenSSL support come from Tryton
8 # Copyright (C) 2007-2009 Cédric Krier.
9 # Copyright (C) 2007-2009 Bertrand Chenal.
10 # Copyright (C) 2008 B2CK SPRL.
12 # This program is free software: you can redistribute it and/or modify
13 # it under the terms of the GNU General Public License as published by
14 # the Free Software Foundation, either version 3 of the License, or
15 # (at your option) any later version.
17 # This program is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
22 # You should have received a copy of the GNU General Public License
23 # along with this program. If not, see <http://www.gnu.org/licenses/>.
25 ##############################################################################
27 import SimpleXMLRPCServer
30 import logging.handlers
40 class Service(object):
41 """ Base class for *Local* services
43 Functionality here is trusted, no authentication.
46 def __init__(self, name, audience=''):
47 Service._services[name] = self
51 def joinGroup(self, name):
52 raise Exception("No group for local services")
53 #GROUPS.setdefault(name, {})[self.__name] = self
56 def exists(cls, name):
57 return name in cls._services
60 def remove(cls, name):
62 cls._services.pop(name)
64 def exportMethod(self, method):
66 self._methods[method.__name__] = method
68 def abortResponse(self, error, description, origin, details):
69 if not tools.config['debug_mode']:
70 raise Exception("%s -- %s\n\n%s"%(origin, description, details))
74 class LocalService(object):
75 """ Proxy for local services.
77 Any instance of this class will behave like the single instance
80 def __init__(self, name):
83 self._service = Service._services[name]
84 for method_name, method_definition in self._service._methods.items():
85 setattr(self, method_name, method_definition)
86 except KeyError, keyError:
87 Logger().notifyChannel('module', LOG_ERROR, 'This service does not exist: %s' % (str(keyError),) )
89 def __call__(self, method, *params):
90 return getattr(self, method)(*params)
92 class ExportService(object):
93 """ Proxy for exported services.
95 All methods here should take an AuthProxy as their first parameter. It
96 will be appended by the calling framework.
98 Note that this class has no direct proxy, capable of calling
99 eservice.method(). Rather, the proxy should call
100 dispatch(method,auth,params)
106 def __init__(self, name, audience=''):
107 ExportService._services[name] = self
110 def joinGroup(self, name):
111 ExportService._groups.setdefault(name, {})[self.__name] = self
114 def getService(cls,name):
115 return cls._services[name]
117 def dispatch(self, method, auth, params):
118 raise Exception("stub dispatch at %s" % self.__name)
120 def new_dispatch(self,method,auth,params):
121 raise Exception("stub dispatch at %s" % self.__name)
123 def abortResponse(self, error, description, origin, details):
124 if not tools.config['debug_mode']:
125 raise Exception("%s -- %s\n\n%s"%(origin, description, details))
129 LOG_NOTSET = 'notset'
130 LOG_DEBUG_RPC = 'debug_rpc'
132 LOG_DEBUG2 = 'debug2'
136 LOG_CRITICAL = 'critical'
138 # add new log level below DEBUG
139 logging.DEBUG2 = logging.DEBUG - 1
140 logging.DEBUG_RPC = logging.DEBUG2 - 1
144 from tools.translate import resetlocale
147 logger = logging.getLogger()
148 # create a format for log messages and dates
149 formatter = logging.Formatter('[%(asctime)s] %(levelname)s:%(name)s:%(message)s')
151 if tools.config['syslog']:
154 handler = logging.handlers.NTEventLogHandler("%s %s" %
155 (release.description,
158 handler = logging.handlers.SysLogHandler('/dev/log')
159 formatter = logging.Formatter("%s %s" % (release.description, release.version) + ':%(levelname)s:%(name)s:%(message)s')
161 elif tools.config['logfile']:
163 logf = tools.config['logfile']
165 dirname = os.path.dirname(logf)
166 if dirname and not os.path.isdir(dirname):
168 if tools.config['logrotate'] is not False:
169 handler = logging.handlers.TimedRotatingFileHandler(logf,'D',1,30)
170 elif os.name == 'posix':
171 handler = logging.handlers.WatchedFileHandler(logf)
173 handler = logging.handlers.FileHandler(logf)
174 except Exception, ex:
175 sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
176 handler = logging.StreamHandler(sys.stdout)
178 # Normal Handler on standard output
179 handler = logging.StreamHandler(sys.stdout)
182 # tell the handler to use this format
183 handler.setFormatter(formatter)
185 # add the handler to the root logger
186 logger.addHandler(handler)
187 logger.setLevel(int(tools.config['log_level'] or '0'))
189 if (not isinstance(handler, logging.FileHandler)) and os.name != 'nt':
190 # change color of level names
191 # uses of ANSI color codes
192 # see http://pueblo.sourceforge.net/doc/manual/ansi_color_codes.html
193 # maybe use http://code.activestate.com/recipes/574451/
194 colors = ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white', None, 'default']
195 foreground = lambda f: 30 + colors.index(f)
196 background = lambda f: 40 + colors.index(f)
199 'DEBUG_RPC': ('blue', 'white'),
200 'DEBUG2': ('green', 'white'),
201 'DEBUG': ('blue', 'default'),
202 'INFO': ('green', 'default'),
203 'WARNING': ('yellow', 'default'),
204 'ERROR': ('red', 'default'),
205 'CRITICAL': ('white', 'red'),
208 for level, (fg, bg) in mapping.items():
209 msg = "\x1b[%dm\x1b[%dm%s\x1b[0m" % (foreground(fg), background(bg), level)
210 logging.addLevelName(getattr(logging, level), msg)
213 class Logger(object):
215 def notifyChannel(self, name, level, msg):
216 from service.web_services import common
218 log = logging.getLogger(tools.ustr(name))
220 if level == LOG_DEBUG2 and not hasattr(log, level):
221 fct = lambda msg, *args, **kwargs: log.log(logging.DEBUG2, msg, *args, **kwargs)
222 setattr(log, LOG_DEBUG2, fct)
224 if level == LOG_DEBUG_RPC and not hasattr(log, level):
225 fct = lambda msg, *args, **kwargs: log.log(logging.DEBUG_RPC, msg, *args, **kwargs)
226 setattr(log, LOG_DEBUG_RPC, fct)
228 level_method = getattr(log, level)
230 if isinstance(msg, Exception):
231 msg = tools.exception_to_unicode(msg)
234 msg = tools.ustr(msg).strip()
235 if level in (LOG_ERROR,LOG_CRITICAL) and tools.config.get_misc('debug','env_info',False):
236 msg = common().exp_get_server_environment() + "\n" + msg
238 result = msg.split('\n')
239 except UnicodeDecodeError:
240 result = msg.strip().split('\n')
243 for idx, s in enumerate(result):
244 level_method('[%02d]: %s' % (idx+1, s,))
246 level_method(result[0])
248 # TODO: perhaps reset the logger streams?
249 #if logrotate closes our files, we end up here..
252 # better ignore the exception and carry on..
255 def set_loglevel(self, level):
256 log = logging.getLogger()
257 log.setLevel(logging.INFO) # make sure next msg is printed
258 log.info("Log level changed to %s" % logging.getLevelName(level))
271 def setAlarm(self, fn, dt, db_name, *args, **kwargs):
272 wait = dt - time.time()
274 self._logger.notifyChannel('timers', LOG_DEBUG, "Job scheduled in %.3g seconds for %s.%s" % (wait, fn.im_class.__name__, fn.func_name))
275 timer = threading.Timer(wait, fn, args, kwargs)
277 self._timers.setdefault(db_name, []).append(timer)
279 for db in self._timers:
280 for timer in self._timers[db]:
281 if not timer.isAlive():
282 self._timers[db].remove(timer)
285 def cancel(cls, db_name):
286 """Cancel all timers for a given database. If None passed, all timers are cancelled"""
287 for db in cls._timers:
288 if db_name is None or db == db_name:
289 for timer in cls._timers[db]:
299 """ Generic interface for all servers with an event loop etc.
300 Override this to impement http, net-rpc etc. servers.
302 Servers here must have threaded behaviour. start() must not block,
309 if Server.__is_started:
310 raise Exception('All instances of servers must be inited before the startAll()')
311 Server.__servers.append(self)
314 print "called stub Server.start"
318 print "called stub Server.stop"
322 """ This function should return statistics about the server """
323 return "%s: No statistics" % str(self.__class__)
329 Logger().notifyChannel("services", LOG_INFO,
330 "Starting %d services" % len(cls.__servers))
331 for srv in cls.__servers:
333 cls.__is_started = True
337 if not cls.__is_started:
339 Logger().notifyChannel("services", LOG_INFO,
340 "Stopping %d services" % len(cls.__servers))
341 for srv in cls.__servers:
343 cls.__is_started = False
349 res += "Servers started\n"
351 res += "Servers stopped\n"
352 for srv in cls.__servers:
354 res += srv.stats() + "\n"
359 class OpenERPDispatcherException(Exception):
360 def __init__(self, exception, traceback):
361 self.exception = exception
362 self.traceback = traceback
364 class OpenERPDispatcher:
365 def log(self, title, msg):
366 from pprint import pformat
367 Logger().notifyChannel('%s' % title, LOG_DEBUG_RPC, pformat(msg))
369 def dispatch(self, service_name, method, params):
371 self.log('service', service_name)
372 self.log('method', method)
373 self.log('params', params)
374 auth = getattr(self, 'auth_provider', None)
375 result = ExportService.getService(service_name).dispatch(method, auth, params)
376 self.log('result', result)
377 # We shouldn't marshall None,
382 self.log('exception', tools.exception_to_unicode(e))
383 tb = getattr(e, 'traceback', sys.exc_info())
384 tb_s = "".join(traceback.format_exception(*tb))
385 if tools.config['debug_mode']:
387 pdb.post_mortem(tb[2])
388 raise OpenERPDispatcherException(e, tb_s)
390 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: