[MRG] Port of ocb-server specific changes to git. Basically a diff of lp:ocb-server...
authorStefan Rijnhart <stefan@therp.nl>
Wed, 2 Jul 2014 19:28:36 +0000 (21:28 +0200)
committerStefan Rijnhart <stefan@therp.nl>
Wed, 2 Jul 2014 19:28:36 +0000 (21:28 +0200)
18 files changed:
openerp/addons/base/ir/ir_attachment.py
openerp/addons/base/res/res_bank.py
openerp/addons/base/res/res_bank_view.xml
openerp/addons/base/res/res_country_data.xml
openerp/addons/base/res/res_lang.py
openerp/addons/base/res/res_partner.py
openerp/modules/loading.py
openerp/osv/orm.py
openerp/report/render/rml2pdf/trml2pdf.py
openerp/report/render/rml2pdf/utils.py
openerp/report/report_sxw.py
openerp/service/workers.py
openerp/service/wsgi_server.py
openerp/tools/assertion_report.py
openerp/tools/config.py
openerp/tools/convert.py
openerp/tools/translate.py
openerp/tools/yaml_import.py

index 43d1312..efc9c92 100644 (file)
@@ -177,11 +177,12 @@ class ir_attachment(osv.osv):
     }
 
     def _auto_init(self, cr, context=None):
-        super(ir_attachment, self)._auto_init(cr, context)
+        result = super(ir_attachment, self)._auto_init(cr, context)
         cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
         if not cr.fetchone():
             cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
             cr.commit()
+        return result
 
     def check(self, cr, uid, ids, mode, context=None, values=None):
         """Restricts the access to an ir.attachment, according to referred model
index 00d18d0..ad803a7 100644 (file)
@@ -62,7 +62,7 @@ class res_partner_bank_type(osv.osv):
         'name': fields.char('Name', size=64, required=True, translate=True),
         'code': fields.char('Code', size=64, required=True),
         'field_ids': fields.one2many('res.partner.bank.type.field', 'bank_type_id', 'Type Fields'),
-        'format_layout': fields.text('Format Layout', translate=True)
+        'format_layout': fields.text('Format Layout', translate=False)
     }
     _defaults = {
         'format_layout': lambda *args: "%(bank_name)s: %(acc_number)s"
@@ -139,6 +139,7 @@ class res_partner_bank(osv.osv):
         'state': fields.selection(_bank_type_get, 'Bank Account Type', required=True,
             change_default=True),
         'sequence': fields.integer('Sequence'),
+        'active': fields.boolean('Active', select=True),
         'footer': fields.boolean("Display on Reports", help="Display this bank account on the footer of printed documents like invoices and sales orders.")
     }
 
@@ -155,7 +156,8 @@ class res_partner_bank(osv.osv):
             cursor, user, 'country_id', context=context),
         'state_id': lambda obj, cursor, user, context: obj._default_value(
             cursor, user, 'state_id', context=context),
-        'name': '/'
+        'name': '/',
+        'active': True,
     }
 
     def fields_get(self, cr, uid, allfields=None, context=None):
index 7c1e07e..a7b90a3 100644 (file)
@@ -97,6 +97,8 @@
                     <group col="4">
                         <field name="state"/>
                         <field name="acc_number" placeholder="Account Number"/>
+                        <field name="active"/>
+                        <newline/>
                         <field name="company_id" groups="base.group_multi_company" on_change="onchange_company_id(company_id)"
                             invisible="context.get('company_hide', True)" widget="selection"/>
                         <field name="footer" invisible="context.get('footer_hide', True)"/>
index 2e22687..f684241 100644 (file)
         <record id="pl" model="res.country">
             <field name="name">Poland</field>
             <field name="code">pl</field>
-            <field name="currency_id" ref="PLZ"/>
+            <field name="currency_id" ref="PLN"/>
         </record>
         <record id="pm" model="res.country">
             <field name="name">Saint Pierre and Miquelon</field>
index 78bb972..0acb252 100644 (file)
@@ -148,7 +148,7 @@ class lang(osv.osv):
         'direction': 'ltr',
         'date_format':_get_default_date_format,
         'time_format':_get_default_time_format,
-        'grouping': '[]',
+        'grouping': '[3, 0]',
         'decimal_point': '.',
         'thousands_sep': ',',
     }
index 30d79bd..4cde333 100644 (file)
@@ -28,7 +28,7 @@ import re
 import openerp
 from openerp import SUPERUSER_ID
 from openerp import pooler, tools
-from openerp.osv import osv, fields
+from openerp.osv import osv, fields, orm
 from openerp.osv.expression import get_unaccent_wrapper
 from openerp.tools.translate import _
 from openerp.tools.yaml_import import is_comment
@@ -696,6 +696,7 @@ class res_partner(osv.osv, format_address):
             adr_pref.add('default')
         result = {}
         visited = set()
+        partner = orm.browse_null()
         for partner in self.browse(cr, uid, filter(None, ids), context=context):
             current_partner = partner
             while current_partner:
index 6112ef0..f8e5ccd 100644 (file)
@@ -197,13 +197,21 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=
                 # 'data' section, but should probably not alter the data,
                 # as there is no rollback.
                 if tools.config.options['test_enable']:
-                    report.record_result(load_test(module_name, idref, mode))
-
+                    report.record_result(load_test(module_name, idref, mode),
+                                         details=(dict(module=module_name,
+                                                       msg="Exception during load of legacy "
+                                                       "data-based tests (yml...)")))
                     # Run the `fast_suite` and `checks` tests given by the module.
                     if module_name == 'base':
                         # Also run the core tests after the database is created.
-                        report.record_result(openerp.modules.module.run_unit_tests('openerp'))
-                    report.record_result(openerp.modules.module.run_unit_tests(module_name))
+                        report.record_result(openerp.modules.module.run_unit_tests('openerp'),
+                                             details=dict(module='openerp',
+                                                          msg="Failure or error in server core "
+                                                          "unit tests"))
+                    report.record_result(openerp.modules.module.run_unit_tests(module_name),
+                                         details=dict(module=module_name,
+                                                      msg="Failure or error in unit tests, "
+                                                      "check logs for more details"))
 
             processed_modules.append(package.name)
 
index fc38169..3a38424 100644 (file)
@@ -3282,7 +3282,8 @@ class BaseModel(object):
 
         cr.commit()     # start a new transaction
 
-        self._add_sql_constraints(cr)
+        if getattr(self, '_auto', True):
+            self._add_sql_constraints(cr)
 
         if create:
             self._execute_sql(cr)
index ec5e948..4175afe 100644 (file)
@@ -766,8 +766,17 @@ class _rml_flowable(object):
             if extra_style:
                 style.__dict__.update(extra_style)
             result = []
-            for i in self._textual(node).split('\n'):
-                result.append(platypus.Paragraph(i, style, **(utils.attr_get(node, [], {'bulletText':'str'}))))
+            textuals = self._textual(node).split('\n')
+            keep_empty_lines = (len(textuals) > 1) and len(node.text.strip())
+            for i in textuals:
+                if keep_empty_lines and len(i.strip()) == 0:
+                    i = '<font color="white">&nbsp;</font>'
+                result.append(
+                    platypus.Paragraph(
+                        i, style, **(
+                            utils.attr_get(node, [], {'bulletText':'str'}))
+                    )
+                )
             return result
         elif node.tag=='barCode':
             try:
index 954ef84..94b3d9d 100644 (file)
@@ -86,7 +86,7 @@ def _child_get(node, self=None, tagname=None):
                 n2.tag = tag
                 n2.attrib.update(attr or {})
                 yield n2
-                tagname = ''
+                continue
             except GeneratorExit:
                 pass
             except Exception, e:
index 493d0b6..fecbf95 100644 (file)
@@ -322,8 +322,8 @@ class rml_parse(object):
                 res='%s %s'%(currency_obj.symbol, res)
         return res
 
-    def display_address(self, address_browse_record):
-        return self.pool.get('res.partner')._display_address(self.cr, self.uid, address_browse_record)
+    def display_address(self, address_browse_record, without_company=False):
+        return self.pool.get('res.partner')._display_address(self.cr, self.uid, address_browse_record, without_company=without_company)
 
     def repeatIn(self, lst, name,nodes_parent=False):
         ret_lst = []
index db64b62..cfb07c9 100644 (file)
@@ -269,7 +269,7 @@ class Worker(object):
         r = resource.getrusage(resource.RUSAGE_SELF)
         cpu_time = r.ru_utime + r.ru_stime
         def time_expired(n, stack):
-            _logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
+            _logger.info('Worker (%d) CPU time limit (%s) reached.', os.getpid(), config['limit_time_cpu'])
             # We dont suicide in such case
             raise Exception('CPU time limit exceeded.')
         signal.signal(signal.SIGXCPU, time_expired)
index 23eb6a9..4076c64 100644 (file)
@@ -79,7 +79,22 @@ def xmlrpc_return(start_response, service, method, params, legacy_exceptions=Fal
     # This also mimics SimpleXMLRPCDispatcher._marshaled_dispatch() for
     # exception handling.
     try:
-        result = openerp.netsvc.dispatch_rpc(service, method, params)
+        def fix(res):
+            """
+            This fix is a minor hook to avoid xmlrpclib to raise TypeError exception: 
+            - To respect the XML-RPC protocol, all "int" and "float" keys must be cast to string to avoid
+              TypeError, "dictionary key must be string"
+            - And since "allow_none" is disabled, we replace all None values with a False boolean to avoid
+              TypeError, "cannot marshal None unless allow_none is enabled"
+            """
+            if res is None:
+                return False
+            elif type(res) == dict:
+                return dict((str(key), fix(value)) for key, value in res.items())
+            else:
+                return res
+            
+        result = fix(openerp.netsvc.dispatch_rpc(service, method, params))
         response = xmlrpclib.dumps((result,), methodresponse=1, allow_none=False, encoding=None)
     except Exception, e:
         if legacy_exceptions:
index 60606d1..64db017 100644 (file)
@@ -8,20 +8,28 @@ class assertion_report(object):
     def __init__(self):
         self.successes = 0
         self.failures = 0
+        self.failures_details = []
 
     def record_success(self):
         self.successes += 1
 
-    def record_failure(self):
+    def record_failure(self, details=None):
         self.failures += 1
+        if details is not None:
+            self.failures_details.append(details)
 
-    def record_result(self, result):
+    def record_result(self, result, details=None):
+        """Record either success or failure, with the provided details in the latter case.
+
+        :param result: a boolean
+        :param details: a dict with keys ``'module'``, ``'testfile'``, ``'msg'``, ``'msg_args'``
+        """
         if result is None:
             pass
         elif result is True:
             self.record_success()
         elif result is False:
-            self.record_failure()
+            self.record_failure(details=details)
 
     def __str__(self):
         res = 'Assertions report: %s successes, %s failures' % (self.successes, self.failures)
index 71d1d01..3e7fc33 100644 (file)
@@ -154,7 +154,7 @@ class configmanager(object):
         # WEB
         # TODO move to web addons after MetaOption merge
         group = optparse.OptionGroup(parser, "Web interface Configuration")
-        group.add_option("--db-filter", dest="dbfilter", default='.*',
+        group.add_option("--db-filter", dest="dbfilter", my_default='.*',
                          help="Filter listed database", metavar="REGEXP")
         parser.add_option_group(group)
 
index bd7c214..3fafa55 100644 (file)
@@ -697,13 +697,15 @@ form: module.record_id""" % (xml_id,)
             if rec_src_count:
                 count = int(rec_src_count)
                 if len(ids) != count:
-                    self.assertion_report.record_failure()
                     msg = 'assertion "%s" failed!\n'    \
                           ' Incorrect search count:\n'  \
                           ' expected count: %d\n'       \
-                          ' obtained count: %d\n'       \
-                          % (rec_string, count, len(ids))
-                    _logger.error(msg)
+                          ' obtained count: %d\n'
+                    msg_args = (rec_string, count, len(ids))
+                    _logger.error(msg, msg_args)
+                    self.assertion_report.record_failure(details=dict(module=self.module,
+                                                                      msg=msg,
+                                                                      msg_args=msg_args))
                     return
 
         assert ids is not None,\
@@ -725,13 +727,15 @@ form: module.record_id""" % (xml_id,)
                 expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
                 expression_value = unsafe_eval(f_expr, globals_dict)
                 if expression_value != expected_value: # assertion failed
-                    self.assertion_report.record_failure()
                     msg = 'assertion "%s" failed!\n'    \
                           ' xmltag: %s\n'               \
                           ' expected value: %r\n'       \
-                          ' obtained value: %r\n'       \
-                          % (rec_string, etree.tostring(test), expected_value, expression_value)
-                    _logger.error(msg)
+                          ' obtained value: %r\n'
+                    msg_args = (rec_string, etree.tostring(test), expected_value, expression_value)
+                    self.assertion_report.record_failure(details=dict(module=self.module,
+                                                                      msg=msg,
+                                                                      msg_args=msg_args))
+                    _logger.error(msg, msg_args)
                     return
         else: # all tests were successful for this assertion tag (no break)
             self.assertion_report.record_success()
index 4dc4990..6d759a6 100644 (file)
@@ -313,6 +313,10 @@ class TinyPoFile(object):
                     if not line.startswith('module:'):
                         comments.append(line)
                 elif line.startswith('#:'):
+                    # Process the `reference` comments. Each line can specify
+                    # multiple targets (e.g. model, view, code, selection,
+                    # ...). For each target, we will return an additional
+                    # entry.
                     for lpart in line[2:].strip().split(' '):
                         trans_info = lpart.strip().split(':',2)
                         if trans_info and len(trans_info) == 2:
@@ -362,6 +366,9 @@ class TinyPoFile(object):
                 line = self.lines.pop(0).strip()
 
             if targets and not fuzzy:
+                # Use the first target for the current entry (returned at the
+                # end of this next() call), and keep the others to generate
+                # additional entries (returned the next next() calls).
                 trans_type, name, res_id = targets.pop(0)
                 for t, n, r in targets:
                     if t == trans_type == 'code': continue
@@ -458,7 +465,7 @@ def trans_export(lang, modules, buffer, format, cr):
                 row.setdefault('tnrs', []).append((type, name, res_id))
                 row.setdefault('comments', set()).update(comments)
 
-            for src, row in grouped_rows.items():
+            for src, row in sorted(grouped_rows.items()):
                 if not lang:
                     # translation template, so no translation value
                     row['translation'] = ''
@@ -778,6 +785,11 @@ def trans_generate(lang, modules, cr):
                 except (IOError, etree.XMLSyntaxError):
                     _logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
 
+        elif model == 'ir.model':
+            model_pool = pool.get(obj.model)
+            if model_pool:
+                push_translation(module, 'code', '_description', 0, model_pool._description)
+
         for field_name,field_def in obj._table._columns.items():
             if field_def.translate:
                 name = model + "," + field_name
@@ -945,6 +957,10 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True,
             # lets create the language with locale information
             lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
 
+        # Parse also the POT: it will possibly provide additional targets.
+        # (Because the POT comments are correct on Launchpad but not the
+        # PO comments due to a Launchpad limitation.)
+        pot_reader = []
 
         # now, the serious things: we read the language file
         fileobj.seek(0)
@@ -957,19 +973,42 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True,
         elif fileformat == 'po':
             reader = TinyPoFile(fileobj)
             f = ['type', 'name', 'res_id', 'src', 'value', 'comments']
+
+            # Make a reade for the POT file and be somewhat defensive for the
+            # stable branch.
+            if fileobj.name.endswith('.po'):
+                try:
+                    # Normally the path looks like /path/to/xxx/i18n/lang.po
+                    # and we try to find the corresponding
+                    # /path/to/xxx/i18n/xxx.pot file.
+                    head, tail = os.path.split(fileobj.name)
+                    head2, tail2 = os.path.split(head)
+                    head3, tail3 = os.path.split(head2)
+                    pot_handle = misc.file_open(os.path.join(head3, tail3, 'i18n', tail3 + '.pot'))
+                    pot_reader = TinyPoFile(pot_handle)
+                except:
+                    pass
+
         else:
             _logger.error('Bad file format: %s', fileformat)
             raise Exception(_('Bad file format'))
 
+        # Read the POT `reference` comments, and keep them indexed by source
+        # string.
+        pot_targets = {}
+        for type, name, res_id, src, _, comments in pot_reader:
+            if type is not None:
+                pot_targets.setdefault(src, {'value': None, 'targets': []})
+                pot_targets[src]['targets'].append((type, name, res_id))
+
         # read the rest of the file
-        line = 1
         irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
 
-        for row in reader:
-            line += 1
+        def process_row(row):
+            """Process a single PO (or POT) entry."""
             # skip empty rows and rows where the translation field (=last fiefd) is empty
             #if (not row) or (not row[-1]):
-            #    continue
+            #    return
 
             # dictionary which holds values for this line of the csv file
             # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
@@ -979,9 +1018,17 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True,
             for i, field in enumerate(f):
                 dic[field] = row[i]
 
+            # Get the `reference` comments from the POT.
+            src = row[3]
+            if pot_reader and src in pot_targets:
+                pot_targets[src]['targets'] = filter(lambda x: x != row[:3], pot_targets[src]['targets'])
+                pot_targets[src]['value'] = row[4]
+                if not pot_targets[src]['targets']:
+                    del pot_targets[src]
+
             # This would skip terms that fail to specify a res_id
             if not dic.get('res_id'):
-                continue
+                return
 
             res_id = dic.pop('res_id')
             if res_id and isinstance(res_id, (int, long)) \
@@ -1002,6 +1049,21 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True,
 
             irt_cursor.push(dic)
 
+        # First process the entries from the PO file (doing so also fill/remove
+        # the entries from the POT file).
+        for row in reader:
+            process_row(row)
+
+        # Then process the entries implied by the POT file (which is more
+        # correct w.r.t. the targets) if some of them remain.
+        pot_rows = []
+        for src in pot_targets:
+            value = pot_targets[src]['value']
+            for type, name, res_id in pot_targets[src]['targets']:
+                pot_rows.append((type, name, res_id, src, value, comments))
+        for row in pot_rows:
+            process_row(row)
+
         irt_cursor.finish()
         trans_obj.clear_caches()
         if verbose:
index 3172caa..b79091c 100644 (file)
@@ -1,10 +1,12 @@
 # -*- coding: utf-8 -*-
+import os
 import threading
 import types
 import time # used to eval time.strftime expressions
 from datetime import datetime, timedelta
 import logging
 
+from copy import deepcopy
 import openerp.pooler as pooler
 import openerp.sql_db as sql_db
 import misc
@@ -193,7 +195,13 @@ class YamlInterpreter(object):
         return node
 
     def _log_assert_failure(self, msg, *args):
-        self.assertion_report.record_failure()
+        from openerp.modules import module  # cannot be made before (loop)
+        basepath = module.get_module_path(self.module)
+        self.assertion_report.record_failure(
+            details=dict(module=self.module,
+                         testfile=os.path.relpath(self.filename, basepath),
+                         msg=msg,
+                         msg_args=deepcopy(args)))
         _logger.error(msg, *args)
 
     def _get_assertion_id(self, assertion):