import os
import sys
import threading
+import time
import openerp
import openerp.modules.db
if kind in ('demo', 'test'):
threading.currentThread().testing = True
for filename in _get_files_of_kind(kind):
- _logger.info("module %s: loading %s", module_name, filename)
+ _logger.info("loading %s/%s", module_name, filename)
noupdate = False
if kind in ('demo', 'demo_xml') or (filename.endswith('.csv') and kind in ('init', 'init_xml')):
noupdate = True
registry.fields_by_model.setdefault(field['model'], []).append(field)
# register, instantiate and initialize models for each modules
+ ta0 = time.time()
+
for index, package in enumerate(graph):
module_name = package.name
module_id = package.id
if skip_modules and module_name in skip_modules:
continue
- _logger.debug('module %s: loading objects', package.name)
+ tm0 = time.time()
+
migrations.migrate_module(package, 'pre')
load_openerp_module(package.name)
if hasattr(package, kind):
delattr(package, kind)
+ _logger.log(25, "%s loaded in %.2fs", package.name, time.time() - tm0)
+
registry._init_modules.add(package.name)
cr.commit()
+ _logger.log(25, "%s modules loaded in %.2fs", len(graph), time.time() - ta0)
+
# The query won't be valid for models created later (i.e. custom model
# created after the registry has been loaded), so empty its result.
registry.fields_by_model = None
# STEP 9: Run the post-install tests
cr.commit()
+
+ ta0 = time.time()
if openerp.tools.config['test_enable']:
cr.execute("SELECT name FROM ir_module_module WHERE state='installed'")
for module_name in cr.fetchall():
report.record_result(openerp.modules.module.run_unit_tests(module_name[0], cr.dbname, position=runs_post_install))
+ _logger.log(25, "All post-tested in %.2fs", time.time() - ta0)
finally:
cr.close()
import os
import re
import sys
+import time
import unittest
from os.path import join as opj
for m in mods:
tests = unwrap_suite(unittest2.TestLoader().loadTestsFromModule(m))
suite = unittest2.TestSuite(itertools.ifilter(position, tests))
- _logger.info('running %s tests.', m.__name__)
- result = unittest2.TextTestRunner(verbosity=2, stream=TestStream(m.__name__)).run(suite)
+ if suite.countTestCases():
+ tm0 = time.time()
+ _logger.info('%s running tests.', m.__name__)
+ result = unittest2.TextTestRunner(verbosity=2, stream=TestStream(m.__name__)).run(suite)
+ _logger.log(25, "%s tested in %.2fs", m.__name__, time.time() - tm0)
+ if not result.wasSuccessful():
+ r = False
+ _logger.error("Module %s: %d failures, %d errors", module_name, len(result.failures), len(result.errors))
- if not result.wasSuccessful():
- r = False
- _logger.error("Module %s: %d failures, %d errors",
- module_name, len(result.failures), len(result.errors))
current_test = None
return r
class ColoredFormatter(DBFormatter):
def format(self, record):
- fg_color, bg_color = LEVEL_COLOR_MAPPING[record.levelno]
+ fg_color, bg_color = LEVEL_COLOR_MAPPING.get(record.levelno, (GREEN, DEFAULT))
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
return
_logger_init = True
+ logging.addLevelName(25, "INFO")
+
from tools.translate import resetlocale
resetlocale()
if tools.config['log_db']:
postgresqlHandler = PostgreSQLHandler()
- postgresqlHandler.setLevel(logging.WARNING)
+ postgresqlHandler.setLevel(25)
logging.getLogger().addHandler(postgresqlHandler)
# Configure loggers levels