1 #-----------------------------------------------------------
2 # Threaded, Gevent and Prefork Servers
3 #-----------------------------------------------------------
21 import werkzeug.serving
23 if os.name == 'posix':
24 # Unix only for workers
31 # Optional process names for workers
33 from setproctitle import setproctitle
35 setproctitle = lambda x: None
38 from openerp.modules.registry import RegistryManager
39 from openerp.release import nt_service_name
40 import openerp.tools.config as config
41 from openerp.tools.misc import stripped_sys_argv, dumpstacks
43 _logger = logging.getLogger(__name__)
45 SLEEP_INTERVAL = 60 # 1 min
47 #----------------------------------------------------------
48 # Werkzeug WSGI servers patched
49 #----------------------------------------------------------
50 class LoggingBaseWSGIServerMixIn(object):
51 def handle_error(self, request, client_address):
52 t, e, _ = sys.exc_info()
53 if t == socket.error and e.errno == errno.EPIPE:
54 # broken pipe, ignore error
56 _logger.exception('Exception happened during processing of request from %s', client_address)
58 class BaseWSGIServerNoBind(LoggingBaseWSGIServerMixIn, werkzeug.serving.BaseWSGIServer):
59 """ werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
60 use this class, sets the socket and calls the process_request() manually
62 def __init__(self, app):
63 werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
64 def server_bind(self):
65 # we dont bind beause we use the listen socket of PreforkServer#socket
66 # instead we close the socket
69 def server_activate(self):
70 # dont listen as we use PreforkServer#socket
74 class RequestHandler(werkzeug.serving.WSGIRequestHandler):
76 # flag the current thread as handling a http request
77 super(RequestHandler, self).setup()
78 me = threading.currentThread()
79 me.name = 'openerp.service.http.request.%s' % (me.ident,)
81 # _reexec() should set LISTEN_* to avoid connection refused during reload time. It
82 # should also work with systemd socket activation. This is currently untested
85 class ThreadedWSGIServerReloadable(LoggingBaseWSGIServerMixIn, werkzeug.serving.ThreadedWSGIServer):
86 """ werkzeug Threaded WSGI Server patched to allow reusing a listen socket
87 given by the environement, this is used by autoreload to keep the listen
88 socket open when a reload happens.
90 def __init__(self, host, port, app):
91 super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
92 handler=RequestHandler)
94 def server_bind(self):
95 envfd = os.environ.get('LISTEN_FDS')
96 if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
97 self.reload_socket = True
98 self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
99 # should we os.close(int(envfd)) ? it seem python duplicate the fd.
101 self.reload_socket = False
102 super(ThreadedWSGIServerReloadable, self).server_bind()
104 def server_activate(self):
105 if not self.reload_socket:
106 super(ThreadedWSGIServerReloadable, self).server_activate()
108 #----------------------------------------------------------
110 #----------------------------------------------------------
112 class AutoReload(object):
113 def __init__(self, server):
118 class EventHandler(pyinotify.ProcessEvent):
119 def __init__(self, autoreload):
120 self.autoreload = autoreload
122 def process_IN_CREATE(self, event):
123 _logger.debug('File created: %s', event.pathname)
124 self.autoreload.files[event.pathname] = 1
126 def process_IN_MODIFY(self, event):
127 _logger.debug('File modified: %s', event.pathname)
128 self.autoreload.files[event.pathname] = 1
130 self.wm = pyinotify.WatchManager()
131 self.handler = EventHandler(self)
132 self.notifier = pyinotify.Notifier(self.wm, self.handler, timeout=0)
133 mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE # IN_MOVED_FROM, IN_MOVED_TO ?
134 for path in openerp.modules.module.ad_paths:
135 _logger.info('Watching addons folder %s', path)
136 self.wm.add_watch(path, mask, rec=True)
138 def process_data(self, files):
139 xml_files = [i for i in files if i.endswith('.xml')]
141 for path in openerp.modules.module.ad_paths:
142 if i.startswith(path):
143 # find out wich addons path the file belongs to
144 # and extract it's module name
145 right = i[len(path) + 1:].split('/')
149 self.modules[module] = 1
151 _logger.info('autoreload: xml change detected, autoreload activated')
154 def process_python(self, files):
155 # process python changes
156 py_files = [i for i in files if i.endswith('.py')]
158 # TODO keep python errors until they are ok
162 source = open(i, 'rb').read() + '\n'
163 compile(source, i, 'exec')
167 _logger.info('autoreload: python code change detected, errors found')
169 _logger.info('autoreload: SyntaxError %s', i)
171 _logger.info('autoreload: python code updated, autoreload activated')
174 def check_thread(self):
175 # Check if some files have been touched in the addons path.
176 # If true, check if the touched file belongs to an installed module
177 # in any of the database used in the registry manager.
179 while self.notifier.check_events(1000):
180 self.notifier.read_events()
181 self.notifier.process_events()
182 l = self.files.keys()
185 self.process_python(l)
188 t = threading.Thread(target=self.check_thread)
191 _logger.info('AutoReload watcher running')
193 #----------------------------------------------------------
194 # Servers: Threaded, Gevented and Prefork
195 #----------------------------------------------------------
197 class CommonServer(object):
198 def __init__(self, app):
199 # TODO Change the xmlrpc_* options to http_*
202 self.interface = config['xmlrpc_interface'] or '0.0.0.0'
203 self.port = config['xmlrpc_port']
205 self.pid = os.getpid()
207 def close_socket(self, sock):
208 """ Closes a socket instance cleanly
209 :param sock: the network socket to close
210 :type sock: socket.socket
213 sock.shutdown(socket.SHUT_RDWR)
214 except socket.error, e:
215 # On OSX, socket shutdowns both sides if any side closes it
216 # causing an error 57 'Socket is not connected' on shutdown
217 # of the other side (or something), see
218 # http://bugs.python.org/issue4397
219 # note: stdlib fixed test, not behavior
220 if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
224 class ThreadedServer(CommonServer):
225 def __init__(self, app):
226 super(ThreadedServer, self).__init__(app)
227 self.main_thread_id = threading.currentThread().ident
228 # Variable keeping track of the number of calls to the signal handler defined
229 # below. This variable is monitored by ``quit_on_signals()``.
230 self.quit_signals_received = 0
235 def signal_handler(self, sig, frame):
236 if sig in [signal.SIGINT, signal.SIGTERM]:
237 # shutdown on kill -INT or -TERM
238 self.quit_signals_received += 1
239 if self.quit_signals_received > 1:
240 # logging.shutdown was already called at this point.
241 sys.stderr.write("Forced shutdown.\n")
243 elif sig == signal.SIGHUP:
244 # restart on kill -HUP
245 openerp.phoenix = True
246 self.quit_signals_received += 1
248 def cron_thread(self, number):
250 time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
251 registries = openerp.modules.registry.RegistryManager.registries
252 _logger.debug('cron%d polling for jobs', number)
253 for db_name, registry in registries.iteritems():
254 while registry.ready:
255 acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
259 def cron_spawn(self):
260 """ Start the above runner function in a daemon thread.
262 The thread is a typical daemon thread: it will never quit and must be
263 terminated when the main process exits - with no consequence (the processing
264 threads it spawns are not marked daemon).
267 # Force call to strptime just before starting the cron thread
268 # to prevent time.strptime AttributeError within the thread.
269 # See: http://bugs.python.org/issue7980
270 datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
271 for i in range(openerp.tools.config['max_cron_threads']):
274 t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
277 _logger.debug("cron%d started!" % i)
279 def http_thread(self):
281 return self.app(e, s)
282 self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
283 self.httpd.serve_forever()
285 def http_spawn(self):
286 t = threading.Thread(target=self.http_thread, name="openerp.service.httpd")
289 _logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
291 def start(self, stop=False):
292 _logger.debug("Setting signal handlers")
293 if os.name == 'posix':
294 signal.signal(signal.SIGINT, self.signal_handler)
295 signal.signal(signal.SIGTERM, self.signal_handler)
296 signal.signal(signal.SIGCHLD, self.signal_handler)
297 signal.signal(signal.SIGHUP, self.signal_handler)
298 signal.signal(signal.SIGQUIT, dumpstacks)
299 elif os.name == 'nt':
301 win32api.SetConsoleCtrlHandler(lambda sig: self.signal_handler(sig, None), 1)
303 test_mode = config['test_enable'] or config['test_file']
304 if not stop or test_mode:
305 # some tests need the http deamon to be available...
309 # only relevant if we are not in "--stop-after-init" mode
313 """ Shutdown the WSGI server. Wait for non deamon threads.
315 _logger.info("Initiating shutdown")
316 _logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
319 self.httpd.shutdown()
320 self.close_socket(self.httpd.socket)
322 # Manually join() all threads before calling sys.exit() to allow a second signal
323 # to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
324 # threading.Thread.join() should not mask signals (at least in python 2.5).
325 me = threading.currentThread()
326 _logger.debug('current thread: %r', me)
327 for thread in threading.enumerate():
328 _logger.debug('process %r (%r)', thread, thread.isDaemon())
329 if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
330 while thread.isAlive():
331 _logger.debug('join and sleep')
332 # Need a busyloop here as thread.join() masks signals
333 # and would prevent the forced shutdown.
338 openerp.modules.registry.RegistryManager.delete_all()
341 def run(self, preload=None, stop=False):
342 """ Start the http server and the cron thread then wait for a signal.
344 The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
345 a second one if any will force an immediate exit.
347 self.start(stop=stop)
349 rc = preload_registries(preload)
355 # Wait for a first signal to be handled. (time.sleep will be interrupted
356 # by the signal handler.) The try/except is for the win32 case.
358 while self.quit_signals_received == 0:
360 except KeyboardInterrupt:
366 os.kill(self.pid, signal.SIGHUP)
368 class GeventServer(CommonServer):
369 def __init__(self, app):
370 super(GeventServer, self).__init__(app)
371 self.port = config['longpolling_port']
374 def watch_parent(self, beat=4):
378 if ppid != os.getppid():
380 _logger.info("LongPolling (%s) Parent changed", pid)
382 os.kill(pid, signal.SIGTERM)
388 from gevent.wsgi import WSGIServer
390 if os.name == 'posix':
391 signal.signal(signal.SIGQUIT, dumpstacks)
393 gevent.spawn(self.watch_parent)
394 self.httpd = WSGIServer((self.interface, self.port), self.app)
395 _logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
397 self.httpd.serve_forever()
399 _logger.exception("Evented Service (longpolling): uncaught error during main loop")
407 def run(self, preload, stop):
411 class PreforkServer(CommonServer):
412 """ Multiprocessing inspired by (g)unicorn.
413 PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
414 method between workers but we plan to replace it by a more intelligent
415 dispatcher to will parse the first HTTP request line.
417 def __init__(self, app):
419 self.address = (config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
420 self.population = config['workers']
421 self.timeout = config['limit_time_real']
422 self.limit_request = config['limit_request']
426 self.pid = os.getpid()
428 self.workers_http = {}
429 self.workers_cron = {}
433 self.long_polling_pid = None
439 flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
440 fcntl.fcntl(fd, fcntl.F_SETFL, flags)
442 flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
443 fcntl.fcntl(fd, fcntl.F_SETFD, flags)
446 def pipe_ping(self, pipe):
448 os.write(pipe[1], '.')
450 if e.errno not in [errno.EAGAIN, errno.EINTR]:
453 def signal_handler(self, sig, frame):
454 if len(self.queue) < 5 or sig == signal.SIGCHLD:
455 self.queue.append(sig)
456 self.pipe_ping(self.pipe)
458 _logger.warn("Dropping signal: %s", sig)
460 def worker_spawn(self, klass, workers_registry):
466 self.workers[pid] = worker
467 workers_registry[pid] = worker
473 def long_polling_spawn(self):
474 nargs = stripped_sys_argv()
476 cmd = os.path.join(os.path.dirname(cmd), "openerp-gevent")
478 popen = subprocess.Popen([sys.executable] + nargs)
479 self.long_polling_pid = popen.pid
481 def worker_pop(self, pid):
482 if pid == self.long_polling_pid:
483 self.long_polling_pid = None
484 if pid in self.workers:
485 _logger.debug("Worker (%s) unregistered", pid)
487 self.workers_http.pop(pid, None)
488 self.workers_cron.pop(pid, None)
489 u = self.workers.pop(pid)
494 def worker_kill(self, pid, sig):
498 if e.errno == errno.ESRCH:
501 def process_signals(self):
502 while len(self.queue):
503 sig = self.queue.pop(0)
504 if sig in [signal.SIGINT, signal.SIGTERM]:
505 raise KeyboardInterrupt
506 elif sig == signal.SIGHUP:
507 # restart on kill -HUP
508 openerp.phoenix = True
509 raise KeyboardInterrupt
510 elif sig == signal.SIGQUIT:
511 # dump stacks on kill -3
513 elif sig == signal.SIGTTIN:
514 # increase number of workers
516 elif sig == signal.SIGTTOU:
517 # decrease number of workers
520 def process_zombie(self):
524 wpid, status = os.waitpid(-1, os.WNOHANG)
527 if (status >> 8) == 3:
528 msg = "Critial worker error (%s)"
529 _logger.critical(msg, wpid)
530 raise Exception(msg % wpid)
531 self.worker_pop(wpid)
533 if e.errno == errno.ECHILD:
537 def process_timeout(self):
539 for (pid, worker) in self.workers.items():
540 if worker.watchdog_timeout is not None and \
541 (now - worker.watchdog_time) >= worker.watchdog_timeout:
542 _logger.error("Worker (%s) timeout", pid)
543 self.worker_kill(pid, signal.SIGKILL)
545 def process_spawn(self):
546 while len(self.workers_http) < self.population:
547 self.worker_spawn(WorkerHTTP, self.workers_http)
548 while len(self.workers_cron) < config['max_cron_threads']:
549 self.worker_spawn(WorkerCron, self.workers_cron)
550 if not self.long_polling_pid:
551 self.long_polling_spawn()
555 # map of fd -> worker
556 fds = dict([(w.watchdog_pipe[0], w) for k, w in self.workers.items()])
557 fd_in = fds.keys() + [self.pipe[0]]
558 # check for ping or internal wakeups
559 ready = select.select(fd_in, [], [], self.beat)
560 # update worker watchdogs
563 fds[fd].watchdog_time = time.time()
566 while os.read(fd, 1):
569 if e.errno not in [errno.EAGAIN]:
571 except select.error, e:
572 if e[0] not in [errno.EINTR]:
576 # wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
577 # by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
578 # signal handler to overcome this behaviour
579 self.pipe = self.pipe_new()
580 # set signal handlers
581 signal.signal(signal.SIGINT, self.signal_handler)
582 signal.signal(signal.SIGTERM, self.signal_handler)
583 signal.signal(signal.SIGHUP, self.signal_handler)
584 signal.signal(signal.SIGCHLD, self.signal_handler)
585 signal.signal(signal.SIGTTIN, self.signal_handler)
586 signal.signal(signal.SIGTTOU, self.signal_handler)
587 signal.signal(signal.SIGQUIT, dumpstacks)
590 self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
591 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
592 self.socket.setblocking(0)
593 self.socket.bind(self.address)
594 self.socket.listen(8 * self.population)
596 def stop(self, graceful=True):
597 if self.long_polling_pid is not None:
598 # FIXME make longpolling process handle SIGTERM correctly
599 self.worker_kill(self.long_polling_pid, signal.SIGKILL)
600 self.long_polling_pid = None
602 _logger.info("Stopping gracefully")
603 limit = time.time() + self.timeout
604 for pid in self.workers.keys():
605 self.worker_kill(pid, signal.SIGTERM)
606 while self.workers and time.time() < limit:
607 self.process_zombie()
610 _logger.info("Stopping forcefully")
611 for pid in self.workers.keys():
612 self.worker_kill(pid, signal.SIGTERM)
615 def run(self, preload, stop):
618 rc = preload_registries(preload)
624 # Empty the cursor pool, we dont want them to be shared among forked workers.
625 openerp.sql_db.close_all()
627 _logger.debug("Multiprocess starting")
630 #_logger.debug("Multiprocess beat (%s)",time.time())
631 self.process_signals()
632 self.process_zombie()
633 self.process_timeout()
636 except KeyboardInterrupt:
637 _logger.debug("Multiprocess clean stop")
645 class Worker(object):
647 def __init__(self, multi):
649 self.watchdog_time = time.time()
650 self.watchdog_pipe = multi.pipe_new()
651 # Can be set to None if no watchdog is desired.
652 self.watchdog_timeout = multi.timeout
653 self.ppid = os.getpid()
656 # should we rename into lifetime ?
657 self.request_max = multi.limit_request
658 self.request_count = 0
660 def setproctitle(self, title=""):
661 setproctitle('openerp: %s %s %s' % (self.__class__.__name__, self.pid, title))
664 os.close(self.watchdog_pipe[0])
665 os.close(self.watchdog_pipe[1])
667 def signal_handler(self, sig, frame):
672 select.select([self.multi.socket], [], [], self.multi.beat)
673 except select.error, e:
674 if e[0] not in [errno.EINTR]:
677 def process_limit(self):
678 # If our parent changed sucide
679 if self.ppid != os.getppid():
680 _logger.info("Worker (%s) Parent changed", self.pid)
683 if self.request_count >= self.request_max:
684 _logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
686 # Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
687 rss, vms = psutil.Process(os.getpid()).get_memory_info()
688 if vms > config['limit_memory_soft']:
689 _logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
690 self.alive = False # Commit suicide after the request.
692 # VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
693 soft, hard = resource.getrlimit(resource.RLIMIT_AS)
694 resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
696 # SIGXCPU (exceeded CPU time) signal handler will raise an exception.
697 r = resource.getrusage(resource.RUSAGE_SELF)
698 cpu_time = r.ru_utime + r.ru_stime
699 def time_expired(n, stack):
700 _logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
701 # We dont suicide in such case
702 raise Exception('CPU time limit exceeded.')
703 signal.signal(signal.SIGXCPU, time_expired)
704 soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
705 resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
707 def process_work(self):
711 self.pid = os.getpid()
713 _logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
714 # Reseed the random number generator
716 # Prevent fd inherientence close_on_exec
717 flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
718 fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
719 # reset blocking status
720 self.multi.socket.setblocking(0)
721 signal.signal(signal.SIGINT, self.signal_handler)
722 signal.signal(signal.SIGTERM, signal.SIG_DFL)
723 signal.signal(signal.SIGCHLD, signal.SIG_DFL)
733 self.multi.pipe_ping(self.watchdog_pipe)
736 _logger.info("Worker (%s) exiting. request_count: %s.", self.pid, self.request_count)
739 _logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
740 # should we use 3 to abort everything ?
743 class WorkerHTTP(Worker):
744 """ HTTP Request workers """
745 def process_request(self, client, addr):
746 client.setblocking(1)
747 client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
748 # Prevent fd inherientence close_on_exec
749 flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
750 fcntl.fcntl(client, fcntl.F_SETFD, flags)
751 # do request using BaseWSGIServerNoBind monkey patched with socket
752 self.server.socket = client
753 # tolerate broken pipe when the http client closes the socket before
754 # receiving the full reply
756 self.server.process_request(client, addr)
758 if e.errno != errno.EPIPE:
760 self.request_count += 1
762 def process_work(self):
764 client, addr = self.multi.socket.accept()
765 self.process_request(client, addr)
766 except socket.error, e:
767 if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
772 self.server = BaseWSGIServerNoBind(self.multi.app)
774 class WorkerCron(Worker):
777 def __init__(self, multi):
778 super(WorkerCron, self).__init__(multi)
779 # process_work() below process a single database per call.
780 # The variable db_index is keeping track of the next database to
785 # Really sleep once all the databases have been processed.
786 if self.db_index == 0:
787 interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
791 if config['db_name']:
792 db_names = config['db_name'].split(',')
794 db_names = openerp.service.db.exp_list(True)
797 def process_work(self):
798 rpc_request = logging.getLogger('openerp.netsvc.rpc.request')
799 rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
800 _logger.debug("WorkerCron (%s) polling for jobs", self.pid)
801 db_names = self._db_list()
803 self.db_index = (self.db_index + 1) % len(db_names)
804 db_name = db_names[self.db_index]
805 self.setproctitle(db_name)
807 start_time = time.time()
808 start_rss, start_vms = psutil.Process(os.getpid()).get_memory_info()
810 import openerp.addons.base as base
811 base.ir.ir_cron.ir_cron._acquire_job(db_name)
812 openerp.modules.registry.RegistryManager.delete(db_name)
814 # dont keep cursors in multi database mode
815 if len(db_names) > 1:
816 openerp.sql_db.close_db(db_name)
818 run_time = time.time() - start_time
819 end_rss, end_vms = psutil.Process(os.getpid()).get_memory_info()
820 vms_diff = (end_vms - start_vms) / 1024
821 logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
822 (db_name, run_time, start_vms / 1024, end_vms / 1024, vms_diff)
823 _logger.debug("WorkerCron (%s) %s", self.pid, logline)
825 self.request_count += 1
826 if self.request_count >= self.request_max and self.request_max < len(db_names):
827 _logger.error("There are more dabatases to process than allowed "
828 "by the `limit_request` configuration variable: %s more.",
829 len(db_names) - self.request_max)
834 os.nice(10) # mommy always told me to be nice with others...
836 self.multi.socket.close()
838 #----------------------------------------------------------
839 # start/stop public api
840 #----------------------------------------------------------
844 def load_server_wide_modules():
845 for m in openerp.conf.server_wide_modules:
847 openerp.modules.module.load_openerp_module(m)
852 The `web` module is provided by the addons found in the `openerp-web` project.
853 Maybe you forgot to add those addons in your addons_path configuration."""
854 _logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
856 def _reexec(updated_modules=None):
857 """reexecute openerp-server process with (nearly) the same arguments"""
858 if openerp.tools.osutil.is_running_as_nt_service():
859 subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
860 exe = os.path.basename(sys.executable)
861 args = stripped_sys_argv()
862 args += ["-u", ','.join(updated_modules)]
863 if not args or args[0] != exe:
865 os.execv(sys.executable, args)
867 def load_test_file_yml(registry, test_file):
868 with registry.cursor() as cr:
869 openerp.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'init')
870 if config['test_commit']:
871 _logger.info('test %s has been commited', test_file)
874 _logger.info('test %s has been rollbacked', test_file)
877 def load_test_file_py(registry, test_file):
878 # Locate python module based on its filename and run the tests
879 test_path, _ = os.path.splitext(os.path.abspath(test_file))
880 for mod_name, mod_mod in sys.modules.items():
882 mod_path, _ = os.path.splitext(getattr(mod_mod, '__file__', ''))
883 if test_path == mod_path:
884 suite = unittest2.TestSuite()
885 for t in unittest2.TestLoader().loadTestsFromModule(mod_mod):
887 _logger.log(logging.INFO, 'running tests %s.', mod_mod.__name__)
888 stream = openerp.modules.module.TestStream()
889 result = unittest2.TextTestRunner(verbosity=2, stream=stream).run(suite)
890 success = result.wasSuccessful()
891 if hasattr(registry._assertion_report,'report_result'):
892 registry._assertion_report.report_result(success)
894 _logger.error('%s: at least one error occurred in a test', test_file)
896 def preload_registries(dbnames):
897 """ Preload a registries, possibly run a test file."""
898 # TODO: move all config checks to args dont check tools.config here
899 config = openerp.tools.config
900 test_file = config['test_file']
901 dbnames = dbnames or []
903 for dbname in dbnames:
905 update_module = config['init'] or config['update']
906 registry = RegistryManager.new(dbname, update_module=update_module)
907 # run test_file if provided
909 _logger.info('loading test file %s', test_file)
910 with openerp.api.Environment.manage():
911 if test_file.endswith('yml'):
912 load_test_file_yml(registry, test_file)
913 elif test_file.endswith('py'):
914 load_test_file_py(registry, test_file)
916 if registry._assertion_report.failures:
919 _logger.critical('Failed to initialize database `%s`.', dbname, exc_info=True)
923 def start(preload=None, stop=False):
924 """ Start the openerp http server and cron processor.
927 load_server_wide_modules()
929 server = GeventServer(openerp.service.wsgi_server.application)
930 elif config['workers']:
931 server = PreforkServer(openerp.service.wsgi_server.application)
933 server = ThreadedServer(openerp.service.wsgi_server.application)
935 if config['auto_reload']:
936 autoreload = AutoReload(server)
939 rc = server.run(preload, stop)
941 # like the legend of the phoenix, all ends with beginnings
942 if getattr(openerp, 'phoenix', False):
944 if config['auto_reload']:
945 modules = autoreload.modules.keys()
948 return rc if rc else 0
951 """ Restart the server
954 # run in a thread to let the current thread return response to the caller.
955 threading.Thread(target=_reexec).start()
957 os.kill(server.pid, signal.SIGHUP)
959 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: