}
def _auto_init(self, cr, context=None):
- super(ir_attachment, self)._auto_init(cr, context)
+ result = super(ir_attachment, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
cr.commit()
+ return result
def check(self, cr, uid, ids, mode, context=None, values=None):
"""Restricts the access to an ir.attachment, according to referred model
'name': fields.char('Name', size=64, required=True, translate=True),
'code': fields.char('Code', size=64, required=True),
'field_ids': fields.one2many('res.partner.bank.type.field', 'bank_type_id', 'Type Fields'),
- 'format_layout': fields.text('Format Layout', translate=True)
+ 'format_layout': fields.text('Format Layout', translate=False)
}
_defaults = {
'format_layout': lambda *args: "%(bank_name)s: %(acc_number)s"
'state': fields.selection(_bank_type_get, 'Bank Account Type', required=True,
change_default=True),
'sequence': fields.integer('Sequence'),
+ 'active': fields.boolean('Active', select=True),
'footer': fields.boolean("Display on Reports", help="Display this bank account on the footer of printed documents like invoices and sales orders.")
}
cursor, user, 'country_id', context=context),
'state_id': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'state_id', context=context),
- 'name': '/'
+ 'name': '/',
+ 'active': True,
}
def fields_get(self, cr, uid, allfields=None, context=None):
<group col="4">
<field name="state"/>
<field name="acc_number" placeholder="Account Number"/>
+ <field name="active"/>
+ <newline/>
<field name="company_id" groups="base.group_multi_company" on_change="onchange_company_id(company_id)"
invisible="context.get('company_hide', True)" widget="selection"/>
<field name="footer" invisible="context.get('footer_hide', True)"/>
<record id="pl" model="res.country">
<field name="name">Poland</field>
<field name="code">pl</field>
- <field name="currency_id" ref="PLZ"/>
+ <field name="currency_id" ref="PLN"/>
</record>
<record id="pm" model="res.country">
<field name="name">Saint Pierre and Miquelon</field>
'direction': 'ltr',
'date_format':_get_default_date_format,
'time_format':_get_default_time_format,
- 'grouping': '[]',
+ 'grouping': '[3, 0]',
'decimal_point': '.',
'thousands_sep': ',',
}
import openerp
from openerp import SUPERUSER_ID
from openerp import pooler, tools
-from openerp.osv import osv, fields
+from openerp.osv import osv, fields, orm
from openerp.osv.expression import get_unaccent_wrapper
from openerp.tools.translate import _
from openerp.tools.yaml_import import is_comment
adr_pref.add('default')
result = {}
visited = set()
+ partner = orm.browse_null()
for partner in self.browse(cr, uid, filter(None, ids), context=context):
current_partner = partner
while current_partner:
# 'data' section, but should probably not alter the data,
# as there is no rollback.
if tools.config.options['test_enable']:
- report.record_result(load_test(module_name, idref, mode))
-
+ report.record_result(load_test(module_name, idref, mode),
+ details=(dict(module=module_name,
+ msg="Exception during load of legacy "
+ "data-based tests (yml...)")))
# Run the `fast_suite` and `checks` tests given by the module.
if module_name == 'base':
# Also run the core tests after the database is created.
- report.record_result(openerp.modules.module.run_unit_tests('openerp'))
- report.record_result(openerp.modules.module.run_unit_tests(module_name))
+ report.record_result(openerp.modules.module.run_unit_tests('openerp'),
+ details=dict(module='openerp',
+ msg="Failure or error in server core "
+ "unit tests"))
+ report.record_result(openerp.modules.module.run_unit_tests(module_name),
+ details=dict(module=module_name,
+ msg="Failure or error in unit tests, "
+ "check logs for more details"))
processed_modules.append(package.name)
cr.commit() # start a new transaction
- self._add_sql_constraints(cr)
+ if getattr(self, '_auto', True):
+ self._add_sql_constraints(cr)
if create:
self._execute_sql(cr)
if extra_style:
style.__dict__.update(extra_style)
result = []
- for i in self._textual(node).split('\n'):
- result.append(platypus.Paragraph(i, style, **(utils.attr_get(node, [], {'bulletText':'str'}))))
+ textuals = self._textual(node).split('\n')
+ keep_empty_lines = (len(textuals) > 1) and len(node.text.strip())
+ for i in textuals:
+ if keep_empty_lines and len(i.strip()) == 0:
+ i = '<font color="white"> </font>'
+ result.append(
+ platypus.Paragraph(
+ i, style, **(
+ utils.attr_get(node, [], {'bulletText':'str'}))
+ )
+ )
return result
elif node.tag=='barCode':
try:
n2.tag = tag
n2.attrib.update(attr or {})
yield n2
- tagname = ''
+ continue
except GeneratorExit:
pass
except Exception, e:
res='%s %s'%(currency_obj.symbol, res)
return res
- def display_address(self, address_browse_record):
- return self.pool.get('res.partner')._display_address(self.cr, self.uid, address_browse_record)
+ def display_address(self, address_browse_record, without_company=False):
+ return self.pool.get('res.partner')._display_address(self.cr, self.uid, address_browse_record, without_company=without_company)
def repeatIn(self, lst, name,nodes_parent=False):
ret_lst = []
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
- _logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
+ _logger.info('Worker (%d) CPU time limit (%s) reached.', os.getpid(), config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
# This also mimics SimpleXMLRPCDispatcher._marshaled_dispatch() for
# exception handling.
try:
- result = openerp.netsvc.dispatch_rpc(service, method, params)
+ def fix(res):
+ """
+ This fix is a minor hook to avoid xmlrpclib to raise TypeError exception:
+ - To respect the XML-RPC protocol, all "int" and "float" keys must be cast to string to avoid
+ TypeError, "dictionary key must be string"
+ - And since "allow_none" is disabled, we replace all None values with a False boolean to avoid
+ TypeError, "cannot marshal None unless allow_none is enabled"
+ """
+ if res is None:
+ return False
+ elif type(res) == dict:
+ return dict((str(key), fix(value)) for key, value in res.items())
+ else:
+ return res
+
+ result = fix(openerp.netsvc.dispatch_rpc(service, method, params))
response = xmlrpclib.dumps((result,), methodresponse=1, allow_none=False, encoding=None)
except Exception, e:
if legacy_exceptions:
def __init__(self):
self.successes = 0
self.failures = 0
+ self.failures_details = []
def record_success(self):
self.successes += 1
- def record_failure(self):
+ def record_failure(self, details=None):
self.failures += 1
+ if details is not None:
+ self.failures_details.append(details)
- def record_result(self, result):
+ def record_result(self, result, details=None):
+ """Record either success or failure, with the provided details in the latter case.
+
+ :param result: a boolean
+ :param details: a dict with keys ``'module'``, ``'testfile'``, ``'msg'``, ``'msg_args'``
+ """
if result is None:
pass
elif result is True:
self.record_success()
elif result is False:
- self.record_failure()
+ self.record_failure(details=details)
def __str__(self):
res = 'Assertions report: %s successes, %s failures' % (self.successes, self.failures)
# WEB
# TODO move to web addons after MetaOption merge
group = optparse.OptionGroup(parser, "Web interface Configuration")
- group.add_option("--db-filter", dest="dbfilter", default='.*',
+ group.add_option("--db-filter", dest="dbfilter", my_default='.*',
help="Filter listed database", metavar="REGEXP")
parser.add_option_group(group)
if rec_src_count:
count = int(rec_src_count)
if len(ids) != count:
- self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' Incorrect search count:\n' \
' expected count: %d\n' \
- ' obtained count: %d\n' \
- % (rec_string, count, len(ids))
- _logger.error(msg)
+ ' obtained count: %d\n'
+ msg_args = (rec_string, count, len(ids))
+ _logger.error(msg, msg_args)
+ self.assertion_report.record_failure(details=dict(module=self.module,
+ msg=msg,
+ msg_args=msg_args))
return
assert ids is not None,\
expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
expression_value = unsafe_eval(f_expr, globals_dict)
if expression_value != expected_value: # assertion failed
- self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' xmltag: %s\n' \
' expected value: %r\n' \
- ' obtained value: %r\n' \
- % (rec_string, etree.tostring(test), expected_value, expression_value)
- _logger.error(msg)
+ ' obtained value: %r\n'
+ msg_args = (rec_string, etree.tostring(test), expected_value, expression_value)
+ self.assertion_report.record_failure(details=dict(module=self.module,
+ msg=msg,
+ msg_args=msg_args))
+ _logger.error(msg, msg_args)
return
else: # all tests were successful for this assertion tag (no break)
self.assertion_report.record_success()
if not line.startswith('module:'):
comments.append(line)
elif line.startswith('#:'):
+ # Process the `reference` comments. Each line can specify
+ # multiple targets (e.g. model, view, code, selection,
+ # ...). For each target, we will return an additional
+ # entry.
for lpart in line[2:].strip().split(' '):
trans_info = lpart.strip().split(':',2)
if trans_info and len(trans_info) == 2:
line = self.lines.pop(0).strip()
if targets and not fuzzy:
+ # Use the first target for the current entry (returned at the
+ # end of this next() call), and keep the others to generate
+ # additional entries (returned the next next() calls).
trans_type, name, res_id = targets.pop(0)
for t, n, r in targets:
if t == trans_type == 'code': continue
row.setdefault('tnrs', []).append((type, name, res_id))
row.setdefault('comments', set()).update(comments)
- for src, row in grouped_rows.items():
+ for src, row in sorted(grouped_rows.items()):
if not lang:
# translation template, so no translation value
row['translation'] = ''
except (IOError, etree.XMLSyntaxError):
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
+ elif model == 'ir.model':
+ model_pool = pool.get(obj.model)
+ if model_pool:
+ push_translation(module, 'code', '_description', 0, model_pool._description)
+
for field_name,field_def in obj._table._columns.items():
if field_def.translate:
name = model + "," + field_name
# lets create the language with locale information
lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
+ # Parse also the POT: it will possibly provide additional targets.
+ # (Because the POT comments are correct on Launchpad but not the
+ # PO comments due to a Launchpad limitation.)
+ pot_reader = []
# now, the serious things: we read the language file
fileobj.seek(0)
elif fileformat == 'po':
reader = TinyPoFile(fileobj)
f = ['type', 'name', 'res_id', 'src', 'value', 'comments']
+
+ # Make a reade for the POT file and be somewhat defensive for the
+ # stable branch.
+ if fileobj.name.endswith('.po'):
+ try:
+ # Normally the path looks like /path/to/xxx/i18n/lang.po
+ # and we try to find the corresponding
+ # /path/to/xxx/i18n/xxx.pot file.
+ head, tail = os.path.split(fileobj.name)
+ head2, tail2 = os.path.split(head)
+ head3, tail3 = os.path.split(head2)
+ pot_handle = misc.file_open(os.path.join(head3, tail3, 'i18n', tail3 + '.pot'))
+ pot_reader = TinyPoFile(pot_handle)
+ except:
+ pass
+
else:
_logger.error('Bad file format: %s', fileformat)
raise Exception(_('Bad file format'))
+ # Read the POT `reference` comments, and keep them indexed by source
+ # string.
+ pot_targets = {}
+ for type, name, res_id, src, _, comments in pot_reader:
+ if type is not None:
+ pot_targets.setdefault(src, {'value': None, 'targets': []})
+ pot_targets[src]['targets'].append((type, name, res_id))
+
# read the rest of the file
- line = 1
irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
- for row in reader:
- line += 1
+ def process_row(row):
+ """Process a single PO (or POT) entry."""
# skip empty rows and rows where the translation field (=last fiefd) is empty
#if (not row) or (not row[-1]):
- # continue
+ # return
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
for i, field in enumerate(f):
dic[field] = row[i]
+ # Get the `reference` comments from the POT.
+ src = row[3]
+ if pot_reader and src in pot_targets:
+ pot_targets[src]['targets'] = filter(lambda x: x != row[:3], pot_targets[src]['targets'])
+ pot_targets[src]['value'] = row[4]
+ if not pot_targets[src]['targets']:
+ del pot_targets[src]
+
# This would skip terms that fail to specify a res_id
if not dic.get('res_id'):
- continue
+ return
res_id = dic.pop('res_id')
if res_id and isinstance(res_id, (int, long)) \
irt_cursor.push(dic)
+ # First process the entries from the PO file (doing so also fill/remove
+ # the entries from the POT file).
+ for row in reader:
+ process_row(row)
+
+ # Then process the entries implied by the POT file (which is more
+ # correct w.r.t. the targets) if some of them remain.
+ pot_rows = []
+ for src in pot_targets:
+ value = pot_targets[src]['value']
+ for type, name, res_id in pot_targets[src]['targets']:
+ pot_rows.append((type, name, res_id, src, value, comments))
+ for row in pot_rows:
+ process_row(row)
+
irt_cursor.finish()
trans_obj.clear_caches()
if verbose:
# -*- coding: utf-8 -*-
+import os
import threading
import types
import time # used to eval time.strftime expressions
from datetime import datetime, timedelta
import logging
+from copy import deepcopy
import openerp.pooler as pooler
import openerp.sql_db as sql_db
import misc
return node
def _log_assert_failure(self, msg, *args):
- self.assertion_report.record_failure()
+ from openerp.modules import module # cannot be made before (loop)
+ basepath = module.get_module_path(self.module)
+ self.assertion_report.record_failure(
+ details=dict(module=self.module,
+ testfile=os.path.relpath(self.filename, basepath),
+ msg=msg,
+ msg_args=deepcopy(args)))
_logger.error(msg, *args)
def _get_assertion_id(self, assertion):