"""
+import babel.dates
import calendar
import collections
import copy
import psycopg2
from lxml import etree
-import warnings
import fields
import openerp
from openerp.tools.config import config
from openerp.tools.misc import CountingStream
from openerp.tools.safe_eval import safe_eval as eval
-from ast import literal_eval
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from query import Query
raise KeyError(error_msg)
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
- if col._prefetch:
+ if col._prefetch and not col.groups:
# gen the list of "local" (ie not inherited) fields which are classic or many2one
- fields_to_fetch = filter(lambda x: x[1]._classic_write, self._table._columns.items())
+ field_filter = lambda x: x[1]._classic_write and x[1]._prefetch and not x[1].groups
+ fields_to_fetch = filter(field_filter, self._table._columns.items())
# gen the list of inherited fields
inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
# complete the field list with the inherited fields which are classic or many2one
- fields_to_fetch += filter(lambda x: x[1]._classic_write, inherits)
+ fields_to_fetch += filter(field_filter, inherits)
# otherwise we fetch only that field
else:
fields_to_fetch = [(name, col)]
+
ids = filter(lambda id: name not in self._data[id], self._data.keys())
# read the results
field_names = map(lambda x: x[0], fields_to_fetch)
- field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
+ try:
+ field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
+ except (openerp.exceptions.AccessError, except_orm):
+ if len(ids) == 1:
+ raise
+ # prefetching attempt failed, perhaps we're violating ACL restrictions involuntarily
+ _logger.info('Prefetching attempt for fields %s on %s failed for ids %s, re-trying just for id %s', field_names, self._model._name, ids, self._id)
+ ids = [self._id]
+ field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
# TODO: improve this, very slow for reports
if self._fields_process:
new_data[field_name] = browse_null()
elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
new_data[field_name] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in result_line[field_name]], self._context)
- elif field_column._type in ('reference'):
+ elif field_column._type == 'reference':
if result_line[field_name]:
if isinstance(result_line[field_name], browse_record):
new_data[field_name] = result_line[field_name]
self._module = module_name
# Remember which models to instanciate for this module.
- self.module_to_models.setdefault(self._module, []).append(self)
+ if not self._custom:
+ self.module_to_models.setdefault(self._module, []).append(self)
# Definition of log access columns, automatically added to models if
_name = None
_columns = {}
_constraints = []
+ _custom = False
_defaults = {}
_rec_name = None
_parent_name = 'parent_id'
# Transience
_transient = False # True in a TransientModel
- _transient_max_count = None
- _transient_max_hours = None
- _transient_check_time = 20
# structure:
# { 'parent_model': 'm2o_field', ... }
parent_names = [parent_names]
else:
name = cls._name
- # for res.parnter.address compatiblity, should be remove in v7
- if 'res.partner.address' in parent_names:
- parent_names.pop(parent_names.index('res.partner.address'))
- parent_names.append('res.partner')
if not name:
raise TypeError('_name is mandatory in case of multiple inheritance')
else:
new.extend(cls.__dict__.get(s, []))
nattr[s] = new
+
+ # Keep links to non-inherited constraints, e.g. useful when exporting translations
+ nattr['_local_constraints'] = cls.__dict__.get('_constraints', [])
+ nattr['_local_sql_constraints'] = cls.__dict__.get('_sql_constraints', [])
+
cls = type(name, (cls, parent_class), dict(nattr, _register=False))
+ else:
+ cls._local_constraints = getattr(cls, '_constraints', [])
+ cls._local_sql_constraints = getattr(cls, '_sql_constraints', [])
+
if not getattr(cls, '_original_module', None):
cls._original_module = cls._module
obj = object.__new__(cls)
# managed by the metaclass.
module_model_list = MetaModel.module_to_models.setdefault(cls._module, [])
if cls not in module_model_list:
- module_model_list.append(cls)
+ if not cls._custom:
+ module_model_list.append(cls)
# Since we don't return an instance here, the __init__
# method won't be called.
raise except_orm('Error',
('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
self.pool._store_function.setdefault(object, [])
- self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
- self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
+ t = (self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)
+ if not t in self.pool._store_function[object]:
+ self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
+ self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
for (key, _, msg) in self._sql_constraints:
self.pool._sql_error[self._table+'_'+key] = msg
# Load manual fields
- cr.execute("SELECT id FROM ir_model_fields WHERE name=%s AND model=%s", ('state', 'ir.model.fields'))
- if cr.fetchone():
+ # Check the query is already done for all modules of if we need to
+ # do it ourselves.
+ if self.pool.fields_by_model is not None:
+ manual_fields = self.pool.fields_by_model.get(self._name, [])
+ else:
cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual'))
- for field in cr.dictfetchall():
- if field['name'] in self._columns:
- continue
- attrs = {
- 'string': field['field_description'],
- 'required': bool(field['required']),
- 'readonly': bool(field['readonly']),
- 'domain': eval(field['domain']) if field['domain'] else None,
- 'size': field['size'],
- 'ondelete': field['on_delete'],
- 'translate': (field['translate']),
- 'manual': True,
- #'select': int(field['select_level'])
- }
+ manual_fields = cr.dictfetchall()
+ for field in manual_fields:
+ if field['name'] in self._columns:
+ continue
+ attrs = {
+ 'string': field['field_description'],
+ 'required': bool(field['required']),
+ 'readonly': bool(field['readonly']),
+ 'domain': eval(field['domain']) if field['domain'] else None,
+ 'size': field['size'] or None,
+ 'ondelete': field['on_delete'],
+ 'translate': (field['translate']),
+ 'manual': True,
+ #'select': int(field['select_level'])
+ }
+
+ if field['serialization_field_id']:
+ cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
+ attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
+ if field['ttype'] in ['many2one', 'one2many', 'many2many']:
+ attrs.update({'relation': field['relation']})
+ self._columns[field['name']] = fields.sparse(**attrs)
+ elif field['ttype'] == 'selection':
+ self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
+ elif field['ttype'] == 'reference':
+ self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
+ elif field['ttype'] == 'many2one':
+ self._columns[field['name']] = fields.many2one(field['relation'], **attrs)
+ elif field['ttype'] == 'one2many':
+ self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
+ elif field['ttype'] == 'many2many':
+ _rel1 = field['relation'].replace('.', '_')
+ _rel2 = field['model'].replace('.', '_')
+ _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
+ self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
+ else:
+ self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
- if field['serialization_field_id']:
- cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
- attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
- if field['ttype'] in ['many2one', 'one2many', 'many2many']:
- attrs.update({'relation': field['relation']})
- self._columns[field['name']] = fields.sparse(**attrs)
- elif field['ttype'] == 'selection':
- self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
- elif field['ttype'] == 'reference':
- self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
- elif field['ttype'] == 'many2one':
- self._columns[field['name']] = fields.many2one(field['relation'], **attrs)
- elif field['ttype'] == 'one2many':
- self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
- elif field['ttype'] == 'many2many':
- _rel1 = field['relation'].replace('.', '_')
- _rel2 = field['model'].replace('.', '_')
- _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
- self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
- else:
- self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
self._inherits_check()
self._inherits_reload()
if not self._sequence:
# Validate rec_name
if self._rec_name is not None:
- assert self._rec_name in self._columns.keys() + ['id'], "Invalid rec_name %s for model %s" % (self._rec_name, self._name)
+ assert self._rec_name in self._all_columns.keys() + ['id'], "Invalid rec_name %s for model %s" % (self._rec_name, self._name)
else:
self._rec_name = 'name'
if not model_data.search(cr, uid, [('name', '=', n)]):
break
postfix += 1
- model_data.create(cr, uid, {
+ model_data.create(cr, SUPERUSER_ID, {
'name': n,
'model': self._name,
'res_id': r['id'],
messages = []
fields = map(fix_import_export_id_paths, fields)
- ModelData = self.pool['ir.model.data']
+ ModelData = self.pool['ir.model.data'].clear_caches()
+
fg = self.fields_get(cr, uid, context=context)
mode = 'init'
noupdate=noupdate, res_id=id, context=context))
cr.execute('RELEASE SAVEPOINT model_load_save')
except psycopg2.Warning, e:
- cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
messages.append(dict(info, type='warning', message=str(e)))
- except psycopg2.Error, e:
- # Failed to write, log to messages, rollback savepoint (to
- # avoid broken transaction) and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
+ except psycopg2.Error, e:
messages.append(dict(
info, type='error',
**PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
+ # Failed to write, log to messages, rollback savepoint (to
+ # avoid broken transaction) and keep going
+ cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
if any(message['type'] == 'error' for message in messages):
cr.execute('ROLLBACK TO SAVEPOINT model_load')
ids = False
field_names = dict(
(f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
context.get('lang'))
- or column.string or f))
+ or column.string))
for f, column in columns.iteritems())
- converters = dict(
- (k, Converter.to_field(cr, uid, self, column, context=context))
- for k, column in columns.iteritems())
+
+ convert = Converter.for_model(cr, uid, self, context=context)
def _log(base, field, exception):
type = 'warning' if isinstance(exception, Warning) else 'error'
- record = dict(base, field=field, type=type,
+ # logs the logical (not human-readable) field name for automated
+ # processing of response, but injects human readable in message
+ record = dict(base, type=type, field=field,
message=unicode(exception.args[0]) % base)
if len(exception.args) > 1 and exception.args[1]:
record.update(exception.args[1])
for record, extras in stream:
dbid = False
xid = False
- converted = {}
# name_get/name_create
if None in record: pass
# xid
message=_(u"Unknown database identifier '%s'") % dbid))
dbid = False
- for field, strvalue in record.iteritems():
- if field in (None, 'id', '.id'): continue
- if not strvalue:
- converted[field] = False
- continue
-
- # In warnings and error messages, use translated string as
- # field name
- message_base = dict(
- extras, record=stream.index, field=field_names[field])
- try:
- converted[field], ws = converters[field](strvalue)
-
- for w in ws:
- if isinstance(w, basestring):
- # wrap warning string in an ImportWarning for
- # uniform handling
- w = ImportWarning(w)
- _log(message_base, field, w)
- except ValueError, e:
- _log(message_base, field, e)
+ converted = convert(record, lambda field, err:\
+ _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
yield dbid, xid, converted, dict(extras, record=stream.index)
error_msgs = []
for constraint in self._constraints:
fun, msg, fields = constraint
+ # We don't pass around the context here: validation code
+ # must always yield the same results.
if not fun(self, cr, uid, ids):
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
)
self._invalids.update(fields)
if error_msgs:
- cr.rollback()
raise except_orm('ValidateError', '\n'.join(error_msgs))
else:
self._invalids.clear()
if fld_def._type == 'many2many':
obj = self.pool.get(fld_def._obj)
field_value2 = []
- for i in range(len(field_value)):
+ for i in range(len(field_value or [])):
if not obj.search(cr, uid, [('id', '=',
field_value[i])]):
continue
if fld_def._type == 'one2many':
obj = self.pool.get(fld_def._obj)
field_value2 = []
- for i in range(len(field_value)):
+ for i in range(len(field_value or [])):
field_value2.append({})
for field2 in field_value[i]:
if field2 in obj._columns.keys() and obj._columns[field2]._type == 'many2one':
views = {}
xml = "<form>"
for f in node:
- if f.tag in ('field'):
+ if f.tag == 'field':
xml += etree.tostring(f, encoding="utf-8")
xml += "</form>"
new_xml = etree.fromstring(encode(xml))
# TODO: find a way to remove this hack, by allow dynamic domains
dom = []
if column._domain and not isinstance(column._domain, basestring):
- dom = column._domain
+ dom = list(column._domain)
dom += eval(node.get('domain', '[]'), {'uid': user, 'time': time})
search_context = dict(context)
if column._context and not isinstance(column._context, basestring):
field = model_fields.get(node.get('name'))
if field:
transfer_field_to_modifiers(field, modifiers)
- #evaluate the options as python code, but send it as json to the client
- if node.get('options'):
- try:
- node.set('options', simplejson.dumps(literal_eval(node.get('options'))))
- except Exception, e:
- _logger.exception('Invalid `options´ attribute, should be a valid python expression: %r', node.get('options'))
- raise except_orm('Invalid options', 'Invalid options: %r %s' % (node.get('options'), e))
+
elif node.tag in ('form', 'tree'):
result = self.view_header_get(cr, user, False, node.tag, context)
if trans:
node.set('string', trans)
- for attr_name in ('confirm', 'sum', 'help', 'placeholder'):
+ for attr_name in ('confirm', 'sum', 'avg', 'help', 'placeholder'):
attr_value = node.get(attr_name)
if attr_value:
trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], attr_value)
view = etree.Element('calendar', string=self._description)
etree.SubElement(view, 'field', self._rec_name_fallback(cr, user, context))
- if (self._date_name not in self._columns):
+ if self._date_name not in self._columns:
date_found = False
for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
if dt in self._columns:
self._columns, 'date_delay'):
raise except_orm(
_('Invalid Object Architecture!'),
- _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % (self._name)))
+ _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
return view
are applied
"""
- sql_inherit = self.pool.get('ir.ui.view').get_inheriting_views_arch(cr, user, inherit_id, self._name)
+ sql_inherit = self.pool.get('ir.ui.view').get_inheriting_views_arch(cr, user, inherit_id, self._name, context=context)
for (view_arch, view_id) in sql_inherit:
source = apply_inheritance_specs(source, view_arch, view_id)
source = apply_view_inheritance(cr, user, source, view_id)
if view_type == 'tree' or not action[2].get('multi')]
resprint = [clean(print_) for print_ in resprint
if view_type == 'tree' or not print_[2].get('multi')]
- resrelate = map(lambda x: x[2], resrelate)
+ #When multi="True" set it will display only in More of the list view
+ resrelate = [clean(action) for action in resrelate
+ if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
for x in itertools.chain(resprint, resaction, resrelate):
x['string'] = x['name']
:rtype: tuple
:return: the :meth:`~.name_get` pair value for the newly-created record.
"""
- rec_id = self.create(cr, uid, {self._rec_name: name}, context);
+ rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0]
# private implementation of name_search, allows passing a dedicated user for the name_get part to
try:
getattr(self, '_ormcache')
self._ormcache = {}
+ self.pool._any_cache_cleared = True
except AttributeError:
pass
groupby = group_by
for r in cr.dictfetchall():
for fld, val in r.items():
- if val == None: r[fld] = False
+ if val is None: r[fld] = False
alldata[r['id']] = r
del r['id']
order = orderby or groupby
data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=order, context=context)
- # the IDS of records that have groupby field value = False or '' should be sorted too
- data_ids += filter(lambda x:x not in data_ids, alldata.keys())
- data = self.read(cr, uid, data_ids, groupby and [groupby] or ['id'], context=context)
- # restore order of the search as read() uses the default _order (this is only for groups, so the size of data_read shoud be small):
- data.sort(lambda x,y: cmp(data_ids.index(x['id']), data_ids.index(y['id'])))
+
+ # the IDs of records that have groupby field value = False or '' should be included too
+ data_ids += set(alldata.keys()).difference(data_ids)
+
+ if groupby:
+ data = self.read(cr, uid, data_ids, [groupby], context=context)
+ # restore order of the search as read() uses the default _order (this is only for groups, so the footprint of data should be small):
+ data_dict = dict((d['id'], d[groupby] ) for d in data)
+ result = [{'id': i, groupby: data_dict[i]} for i in data_ids]
+ else:
+ result = [{'id': i} for i in data_ids]
- for d in data:
+ for d in result:
if groupby:
d['__domain'] = [(groupby, '=', alldata[d['id']][groupby] or False)] + domain
if not isinstance(groupby_list, (str, unicode)):
dt = datetime.datetime.strptime(alldata[d['id']][groupby][:7], '%Y-%m')
days = calendar.monthrange(dt.year, dt.month)[1]
- d[groupby] = datetime.datetime.strptime(d[groupby][:10], '%Y-%m-%d').strftime('%B %Y')
+ date_value = datetime.datetime.strptime(d[groupby][:10], '%Y-%m-%d')
+ d[groupby] = babel.dates.format_date(
+ date_value, format='MMMM yyyy', locale=context.get('lang', 'en_US'))
d['__domain'] = [(groupby, '>=', alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-01', '%Y-%m-%d').strftime('%Y-%m-%d') or False),\
(groupby, '<=', alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-' + str(days), '%Y-%m-%d').strftime('%Y-%m-%d') or False)] + domain
del alldata[d['id']][groupby]
del d['id']
if groupby and groupby in self._group_by_full:
- data = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
- aggregated_fields, data, read_group_order=order,
- context=context)
+ result = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
+ aggregated_fields, result, read_group_order=order,
+ context=context)
- return data
+ return result
- def _inherits_join_add(self, current_table, parent_model_name, query):
+ def _inherits_join_add(self, current_model, parent_model_name, query):
"""
Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
- :param current_table: current model object
+ :param current_model: current model object
:param parent_model_name: name of the parent model for which the clauses should be added
:param query: query object on which the JOIN should be added
"""
- inherits_field = current_table._inherits[parent_model_name]
+ inherits_field = current_model._inherits[parent_model_name]
parent_model = self.pool.get(parent_model_name)
- parent_table_name = parent_model._table
- quoted_parent_table_name = '"%s"' % parent_table_name
- if quoted_parent_table_name not in query.tables:
- query.tables.append(quoted_parent_table_name)
- query.where_clause.append('(%s.%s = %s.id)' % (current_table._table, inherits_field, parent_table_name))
-
-
+ parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
+ return parent_alias
def _inherits_join_calc(self, field, query):
"""
:return: qualified name of field, to be used in SELECT clause
"""
current_table = self
+ parent_alias = '"%s"' % current_table._table
while field in current_table._inherit_fields and not field in current_table._columns:
parent_model_name = current_table._inherit_fields[field][0]
parent_table = self.pool.get(parent_model_name)
- self._inherits_join_add(current_table, parent_model_name, query)
+ parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
current_table = parent_table
- return '"%s".%s' % (current_table._table, field)
+ return '%s."%s"' % (parent_alias, field)
def _parent_store_compute(self, cr):
if not self._parent_store:
"""
Record the creation of a constraint for this model, to make it possible
to delete it later when the module is uninstalled. Type can be either
- 'f' or 'u' depending on the constraing being a foreign key or not.
+ 'f' or 'u' depending on the constraint being a foreign key or not.
"""
+ if not self._module:
+ # no need to save constraints for custom models as they're not part
+ # of any module
+ return
assert type in ('f', 'u')
cr.execute("""
SELECT 1 FROM ir_model_constraint, ir_module_module
# usually because they could block deletion due to the FKs.
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or 'cascade'
- self._foreign_keys.append((self._table, source_field, dest_model._table, ondelete or 'set null'))
- _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
- self._table, source_field, dest_model._table, ondelete)
+ fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
+ self._foreign_keys.add(fk_def)
+ _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
- self._foreign_keys.append((source_table, source_field, dest_model._table, ondelete or 'set null'))
- _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s",
- source_table, source_field, dest_model._table, ondelete)
+ fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
+ self._foreign_keys.add(fk_def)
+ _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
def _drop_constraint(self, cr, source_table, constraint_name):
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
cons, = constraints
if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
or cons['foreign_table'] != dest_model._table:
+ # Wrong FK: drop it and recreate
_schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
- self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
- # else it's all good, nothing to do!
+ else:
+ # it's all good, nothing to do!
+ return
else:
# Multiple FKs found for the same field, drop them all, and re-create
for cons in constraints:
_schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
- self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
+
+ # (re-)create the FK
+ self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
_auto_end).
"""
- self._foreign_keys = []
+ self._foreign_keys = set()
raise_on_invalid_object_name(self._name)
if context is None:
context = {}
else:
default = self._defaults[k]
- if (default is not None):
+ if default is not None:
ss = self._columns[k]._symbol_set
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k)
cr.execute(query, (ss[1](default),))
# and add constraints if needed
if isinstance(f, fields.many2one):
if not self.pool.get(f._obj):
- raise except_orm('Programming Error', ('There is no reference available for %s') % (f._obj,))
+ raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
dest_model = self.pool.get(f._obj)
ref = dest_model._table
# ir_actions is inherited so foreign key doesn't work on it
def _create_table(self, cr):
- cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,))
+ cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
_schema.debug("Table '%s': created", self._table)
elif not self._columns['parent_right'].select:
_logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
- if self._columns[self._parent_name].ondelete != 'cascade':
- _logger.error("The column %s on object %s must be set as ondelete='cascade'",
+ if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
+ _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
self._parent_name, self._name)
cr.commit()
# TODO the condition could use fields_get_keys().
if f._fields_id not in other._columns.keys():
if f._fields_id not in other._inherit_fields.keys():
- raise except_orm('Programming Error', ("There is no reference field '%s' found for '%s'") % (f._fields_id, f._obj,))
+ raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
def _m2m_raise_or_create_relation(self, cr, f):
m2m_tbl, col1, col2 = f._sql_names(self)
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall():
if not self.pool.get(f._obj):
- raise except_orm('Programming Error', ('Many2Many destination model does not exist: `%s`') % (f._obj,))
+ raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
dest_model = self.pool.get(f._obj)
ref = dest_model._table
- cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s")) WITH OIDS' % (m2m_tbl, col1, col2, col1, col2))
+ cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
if not cr.fetchall():
_logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
required=True, ondelete="cascade")
- elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() != "cascade":
- _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade", forcing it.', field_name, self._name)
+ elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
+ _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
self._columns[field_name].required = True
self._columns[field_name].ondelete = "cascade"
:param cr: database cursor
:param user: current user id
- :param fields: list of fields
+ :param allfields: list of fields
:param context: context arguments, like lang, time zone
:return: dictionary of field dictionaries, each one describing a field of the business object
:raise AccessError: * if user has no create/write rights on the requested object
return res
+ def check_field_access_rights(self, cr, user, operation, fields, context=None):
+ """
+ Check the user access rights on the given fields. This raises Access
+ Denied if the user does not have the rights. Otherwise it returns the
+ fields (as is if the fields is not falsy, or the readable/writable
+ fields if fields is falsy).
+ """
+ def p(field_name):
+ """Predicate to test if the user has access to the given field name."""
+ # Ignore requested field if it doesn't exist. This is ugly but
+ # it seems to happen at least with 'name_alias' on res.partner.
+ if field_name not in self._all_columns:
+ return True
+ field = self._all_columns[field_name].column
+ if user != SUPERUSER_ID and field.groups:
+ return self.user_has_groups(cr, user, groups=field.groups, context=context)
+ else:
+ return True
+ if not fields:
+ fields = filter(p, self._all_columns.keys())
+ else:
+ filtered_fields = filter(lambda a: not p(a), fields)
+ if filtered_fields:
+ _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s', operation, user, self._name, ', '.join(filtered_fields))
+ raise except_orm(
+ _('Access Denied'),
+ _('The requested operation cannot be completed due to security restrictions. '
+ 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
+ (self._description, operation))
+ return fields
+
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
""" Read records with given ids with the given fields
if not context:
context = {}
self.check_access_rights(cr, user, 'read')
- if not fields:
- fields = list(set(self._columns.keys() + self._inherit_fields.keys()))
+ fields = self.check_field_access_rights(cr, user, 'read', fields)
if isinstance(ids, (int, long)):
select = [ids]
else:
context = {}
if not ids:
return []
- if fields_to_read == None:
+ if fields_to_read is None:
fields_to_read = self._columns.keys()
# Construct a clause for the security rules.
fields_pre2 = map(convert_field, fields_pre)
order_by = self._parent_order or self._order
- select_fields = ','.join(fields_pre2 + [self._table + '.id'])
+ select_fields = ','.join(fields_pre2 + ['%s.id' % self._table])
query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table)
if rule_clause:
query += " AND " + (' OR '.join(rule_clause))
query += " ORDER BY " + order_by
for sub_ids in cr.split_for_in_conditions(ids):
- if rule_clause:
- cr.execute(query, [tuple(sub_ids)] + rule_params)
- self._check_record_rules_result_count(cr, user, sub_ids, 'read', context=context)
- else:
- cr.execute(query, (tuple(sub_ids),))
- res.extend(cr.dictfetchall())
+ cr.execute(query, [tuple(sub_ids)] + rule_params)
+ results = cr.dictfetchall()
+ result_ids = [x['id'] for x in results]
+ self._check_record_rules_result_count(cr, user, sub_ids, result_ids, 'read', context=context)
+ res.extend(results)
else:
res = map(lambda x: {'id': x}, ids)
- for f in fields_pre:
- if f == self.CONCURRENCY_CHECK_FIELD:
- continue
- if self._columns[f].translate:
- ids = [x['id'] for x in res]
- #TODO: optimize out of this loop
- res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context.get('lang', False) or 'en_US', ids)
- for r in res:
- r[f] = res_trans.get(r['id'], False) or r[f]
+ if context.get('lang'):
+ for f in fields_pre:
+ if f == self.CONCURRENCY_CHECK_FIELD:
+ continue
+ if self._columns[f].translate:
+ ids = [x['id'] for x in res]
+ #TODO: optimize out of this loop
+ res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context['lang'], ids)
+ for r in res:
+ r[f] = res_trans.get(r['id'], False) or r[f]
for table in self._inherits:
col = self._inherits[table]
# mention the first one only to keep the error message readable
raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
- def _check_record_rules_result_count(self, cr, uid, ids, operation, context=None):
- """Verify that number of returned rows after applying record rules matches
+ def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
+ """Verify the returned rows after applying record rules matches
the length of `ids`, and raise an appropriate exception if it does not.
"""
- if cr.rowcount != len(ids):
+ ids, result_ids = set(ids), set(result_ids)
+ missing_ids = ids - result_ids
+ if missing_ids:
# Attempt to distinguish record rule restriction vs deleted records,
- # to provide a more specific error message
- cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(ids),))
- if cr.rowcount != len(ids):
- if operation == 'unlink':
- # no need to warn about deleting an already deleted record!
+ # to provide a more specific error message - check if the missinf
+ cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
+ if cr.rowcount:
+ # the missing ids are (at least partially) hidden by access rules
+ if uid == SUPERUSER_ID:
+ return
+ _logger.warning('Access Denied by record rules for operation: %s, uid: %s, model: %s', operation, uid, self._name)
+ raise except_orm(_('Access Denied'),
+ _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
+ (self._description, operation))
+ else:
+ # If we get here, the missing_ids are not in the database
+ if operation in ('read','unlink'):
+ # No need to warn about deleting an already deleted record.
+ # And no error when reading a record that was deleted, to prevent spurious
+ # errors for non-transactional search/read sequences coming from clients
return
_logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
raise except_orm(_('Missing document(s)'),
_('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
- _logger.warning('Access Denied by record rules for operation: %s, uid: %s, model: %s', operation, uid, self._name)
- raise except_orm(_('Access Denied'),
- _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
- (self._description, operation))
+
def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
"""Verifies that the operation given by ``operation`` is allowed for the user
cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
' WHERE ' + self._table + '.id IN %s' + where_clause,
[sub_ids] + where_params)
- self._check_record_rules_result_count(cr, uid, sub_ids, operation, context=context)
+ returned_ids = [x['id'] for x in cr.dictfetchall()]
+ self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
"""Call given workflow trigger as a result of a CRUD operation"""
getattr(wf_service, trigger)(uid, self._name, res_id, cr)
def _workflow_signal(self, cr, uid, ids, signal, context=None):
- """Send given workflow signal"""
+ """Send given workflow signal and return a dict mapping ids to workflow results"""
wf_service = netsvc.LocalService("workflow")
+ result = {}
for res_id in ids:
- wf_service.trg_validate(uid, self._name, res_id, signal, cr)
+ result[res_id] = wf_service.trg_validate(uid, self._name, res_id, signal, cr)
+ return result
def unlink(self, cr, uid, ids, context=None):
"""
if isinstance(ids, (int, long)):
ids = [ids]
- result_store = self._store_get_values(cr, uid, ids, None, context)
+ result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
self._check_concurrency(cr, ids, context)
"""
readonly = None
+ self.check_field_access_rights(cr, user, 'write', vals.keys())
for field in vals.copy():
fobj = None
if field in self._columns:
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
-
+
+ # When linking/creating parent records, force context without 'no_store_function' key that
+ # defers stored functions computing, as these won't be computed in batch at the end of create().
+ parent_context = dict(context)
+ parent_context.pop('no_store_function', None)
+
if record_id is None or not record_id:
- record_id = self.pool.get(table).create(cr, user, tocreate[table], context=context)
+ record_id = self.pool.get(table).create(cr, user, tocreate[table], context=parent_context)
else:
- self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=context)
+ self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=parent_context)
upd0 += ',' + self._inherits[table]
upd1 += ',%s'
upd0 = upd0 + ',"' + field + '"'
upd1 = upd1 + ',' + self._columns[field]._symbol_set[0]
upd2.append(self._columns[field]._symbol_set[1](vals[field]))
+ #for the function fields that receive a value, we set them directly in the database
+ #(they may be required), but we also need to trigger the _fct_inv()
+ if (hasattr(self._columns[field], '_fnct_inv')) and not isinstance(self._columns[field], fields.related):
+ #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
+ #one week of the release candidate. It seems the only good way to handle correctly this is to add an
+ #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
+ #if, for example, the related has a default value (for usability) then the fct_inv is called and it
+ #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
+ #after the release but, definitively, the behavior shouldn't be different for related and function
+ #fields.
+ upd_todo.append(field)
else:
+ #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
+ #related. See the above TODO comment for further explanations.
if not isinstance(self._columns[field], fields.related):
upd_todo.append(field)
if field in self._columns \
upd1 += ",%s,(now() at time zone 'UTC'),%s,(now() at time zone 'UTC')"
upd2.extend((user, user))
cr.execute('insert into "'+self._table+'" (id'+upd0+") values ("+str(id_new)+upd1+')', tuple(upd2))
- self.check_access_rule(cr, user, [id_new], 'create', context=context)
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
if self._parent_store and not context.get('defer_parent_store_computation'):
self.name_get(cr, user, [id_new], context=context)[0][1] + \
"' " + _("created.")
self.log(cr, user, id_new, message, True, context=context)
+ self.check_access_rule(cr, user, [id_new], 'create', context=context)
self._workflow_trigger(cr, user, [id_new], 'trg_create', context=context)
return id_new
if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
mapping = {}
+ fresults = {}
for function in to_compute:
- # use admin user for accessing objects having rules defined on store fields
- target_ids = [id for id in function[id_mapping_fnct_](self, cr, SUPERUSER_ID, ids, context) if id]
+ fid = id(function[id_mapping_fnct_])
+ if not fid in fresults:
+ # use admin user for accessing objects having rules defined on store fields
+ fresults[fid] = [id2 for id2 in function[id_mapping_fnct_](self, cr, SUPERUSER_ID, ids, context) if id2]
+ target_ids = fresults[fid]
# the compound key must consider the priority and model name
key = (function[priority_], function[model_name_])
functions_ids_maps = {}
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
- for id, functions in id_map.iteritems():
- functions_ids_maps.setdefault(tuple(functions), []).append(id)
+ for fid, functions in id_map.iteritems():
+ functions_ids_maps.setdefault(tuple(functions), []).append(fid)
for functions, ids in functions_ids_maps.iteritems():
call_map.setdefault((priority,model),[]).append((priority, model, ids,
[f[func_field_to_compute_] for f in functions]))
:param query: the current query object
"""
def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None):
+ """ :param string parent_model: string of the parent model
+ :param model child_object: model object, base of the rule application
+ """
if added_clause:
if parent_model and child_object:
# as inherited rules are being applied, we need to add the missing JOIN
# to reach the parent table (if it was not JOINed yet in the query)
- child_object._inherits_join_add(child_object, parent_model, query)
+ parent_alias = child_object._inherits_join_add(child_object, parent_model, query)
+ # inherited rules are applied on the external table -> need to get the alias and replace
+ parent_table = self.pool.get(parent_model)._table
+ added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
+ # change references to parent_table to parent_alias, because we now use the alias to refer to the table
+ new_tables = []
+ for table in added_tables:
+ # table is just a table name -> switch to the full alias
+ if table == '"%s"' % parent_table:
+ new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
+ # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
+ else:
+ new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
+ added_tables = new_tables
query.where_clause += added_clause
query.where_clause_params += added_params
for table in added_tables:
# apply main rules on the object
rule_obj = self.pool.get('ir.rule')
- apply_rule(*rule_obj.domain_get(cr, uid, self._name, mode, context=context))
+ rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
+ apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
# apply ir.rules from the parents (through _inherits)
for inherited_model in self._inherits:
- kwargs = dict(parent_model=inherited_model, child_object=self) #workaround for python2.5
- apply_rule(*rule_obj.domain_get(cr, uid, inherited_model, mode, context=context), **kwargs)
+ rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
+ apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
+ parent_model=inherited_model, child_object=self)
def _generate_m2o_order_by(self, order_field, query):
"""
# extract the field names, to be able to qualify them and add desc/asc
m2o_order_list = []
for order_part in m2o_order.split(","):
- m2o_order_list.append(order_part.strip().split(" ",1)[0].strip())
+ m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
m2o_order = m2o_order_list
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
# as we don't want to exclude results that have NULL values for the m2o
- src_table, src_field = qualified_field.replace('"','').split('.', 1)
- query.join((src_table, dest_model._table, src_field, 'id'), outer=True)
- qualify = lambda field: '"%s"."%s"' % (dest_model._table, field)
+ src_table, src_field = qualified_field.replace('"', '').split('.', 1)
+ dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
+ qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
-
def _generate_order_by(self, order_spec, query):
"""
Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
:raise" except_orm in case order_spec is malformed
"""
- order_by_clause = self._order
+ order_by_clause = ''
+ order_spec = order_spec or self._order
if order_spec:
order_by_elements = []
self._check_qorder(order_spec)
order_field = order_split[0].strip()
order_direction = order_split[1].strip() if len(order_split) == 2 else ''
inner_clause = None
- if order_field == 'id':
- order_by_clause = '"%s"."%s"' % (self._table, order_field)
+ if order_field == 'id' or (self._log_access and order_field in LOG_ACCESS_COLUMNS.keys()):
+ order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
elif order_field in self._columns:
order_column = self._columns[order_field]
if order_column._classic_read:
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
- continue # ignore non-readable or "non-joinable" fields
+ continue # ignore non-readable or "non-joinable" fields
elif order_field in self._inherit_fields:
parent_obj = self.pool.get(self._inherit_fields[order_field][3])
order_column = parent_obj._columns[order_field]
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
- continue # ignore non-readable or "non-joinable" fields
+ continue # ignore non-readable or "non-joinable" fields
+ else:
+ raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
if inner_clause:
if isinstance(inner_clause, list):
for clause in inner_clause:
return res[0][0]
cr.execute('SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str, where_clause_params)
res = cr.fetchall()
- return [x[0] for x in res]
+
+ # TDE note: with auto_join, we could have several lines about the same result
+ # i.e. a lead with several unread messages; we uniquify the result using
+ # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
+ def _uniquify_list(seq):
+ seen = set()
+ return [x for x in seq if x not in seen and not seen.add(x)]
+
+ return _uniquify_list([x[0] for x in res])
# returns the different values ever entered for one field
# this is used, for example, in the client when the user hits enter on
Copy given record's data with all its fields values
:param cr: database cursor
- :param user: current user id
+ :param uid: current user id
:param id: id of the record to copy
:param default: field values to override in the original values of the copied record
:type default: dictionary
else:
default['state'] = self._defaults['state']
- context_wo_lang = context.copy()
- if 'lang' in context:
- del context_wo_lang['lang']
- data = self.read(cr, uid, [id,], context=context_wo_lang)
+ data = self.read(cr, uid, [id,], context=context)
if data:
data = data[0]
else:
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fields = self.fields_get(cr, uid, context=context)
- translation_records = []
for field_name, field_def in fields.items():
+ # removing the lang to compare untranslated values
+ context_wo_lang = dict(context, lang=None)
+ old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
# we must recursively copy the translations for o2o and o2m
if field_def['type'] == 'one2many':
target_obj = self.pool.get(field_def['relation'])
- old_record, new_record = self.read(cr, uid, [old_id, new_id], [field_name], context=context)
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
- old_children = sorted(old_record[field_name])
- new_children = sorted(new_record[field_name])
+ old_children = sorted(r.id for r in old_record[field_name])
+ new_children = sorted(r.id for r in new_record[field_name])
for (old_child, new_child) in zip(old_children, new_children):
target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
# and for translatable fields we keep them for copy
elif field_def.get('translate'):
- trans_name = ''
if field_name in self._columns:
trans_name = self._name + "," + field_name
+ target_id = new_id
+ source_id = old_id
elif field_name in self._inherit_fields:
trans_name = self._inherit_fields[field_name][0] + "," + field_name
- if trans_name:
- trans_ids = trans_obj.search(cr, uid, [
- ('name', '=', trans_name),
- ('res_id', '=', old_id)
- ])
- translation_records.extend(trans_obj.read(cr, uid, trans_ids, context=context))
+ # get the id of the parent record to set the translation
+ inherit_field_name = self._inherit_fields[field_name][1]
+ target_id = new_record[inherit_field_name].id
+ source_id = old_record[inherit_field_name].id
+ else:
+ continue
- for record in translation_records:
- del record['id']
- record['res_id'] = new_id
- trans_obj.create(cr, uid, record, context=context)
+ trans_ids = trans_obj.search(cr, uid, [
+ ('name', '=', trans_name),
+ ('res_id', '=', source_id)
+ ])
+ user_lang = context.get('lang')
+ for record in trans_obj.read(cr, uid, trans_ids, context=context):
+ del record['id']
+ # remove source to avoid triggering _set_src
+ del record['source']
+ record.update({'res_id': target_id})
+ if user_lang and user_lang == record['lang']:
+ # 'source' to force the call to _set_src
+ # 'value' needed if value is changed in copy(), want to see the new_value
+ record['source'] = old_record[field_name]
+ record['value'] = new_record[field_name]
+ trans_obj.create(cr, uid, record, context=context)
def copy(self, cr, uid, id, default=None, context=None):
"""
if type(ids) in (int, long):
ids = [ids]
- query = 'SELECT id FROM "%s"' % (self._table)
+ query = 'SELECT id FROM "%s"' % self._table
cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
return [x[0] for x in cr.fetchall()]
:param parent: optional parent field name (default: ``self._parent_name = parent_id``)
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
-
if not parent:
parent = self._parent_name
- ids_parent = ids[:]
- query = 'SELECT distinct "%s" FROM "%s" WHERE id IN %%s' % (parent, self._table)
- while ids_parent:
- ids_parent2 = []
- for i in range(0, len(ids), cr.IN_MAX):
- sub_ids_parent = ids_parent[i:i+cr.IN_MAX]
- cr.execute(query, (tuple(sub_ids_parent),))
- ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
- ids_parent = ids_parent2
- for i in ids_parent:
- if i in ids:
+
+ # must ignore 'active' flag, ir.rules, etc. => direct SQL query
+ query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
+ for id in ids:
+ current_id = id
+ while current_id is not None:
+ cr.execute(query, (current_id,))
+ result = cr.fetchone()
+ current_id = result[0] if result else None
+ if current_id == id:
return False
return True
def _transient_clean_rows_older_than(self, cr, seconds):
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
- cr.execute("SELECT id FROM " + self._table + " WHERE"
- " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp <"
- " ((now() at time zone 'UTC') - interval %s)", ("%s seconds" % seconds,))
+ # Never delete rows used in last 5 minutes
+ seconds = max(seconds, 300)
+ query = ("SELECT id FROM " + self._table + " WHERE"
+ " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
+ " < ((now() at time zone 'UTC') - interval %s)")
+ cr.execute(query, ("%s seconds" % seconds,))
ids = [x[0] for x in cr.fetchall()]
self.unlink(cr, SUPERUSER_ID, ids)
- def _transient_clean_old_rows(self, cr, count):
- assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
- cr.execute(
- "SELECT id, COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
- " AS t FROM " + self._table +
- " ORDER BY t LIMIT %s", (count,))
- ids = [x[0] for x in cr.fetchall()]
- self.unlink(cr, SUPERUSER_ID, ids)
+ def _transient_clean_old_rows(self, cr, max_count):
+ # Check how many rows we have in the table
+ cr.execute("SELECT count(*) AS row_count FROM " + self._table)
+ res = cr.fetchall()
+ if res[0][0] <= max_count:
+ return # max not reached, nothing to do
+ self._transient_clean_rows_older_than(cr, 300)
def _transient_vacuum(self, cr, uid, force=False):
"""Clean the transient records.
Actual cleaning will happen only once every "_transient_check_time" calls.
This means this method can be called frequently called (e.g. whenever
a new record is created).
+ Example with both max_hours and max_count active:
+ Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
+ table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
+ 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
+ - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
+ - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
+ would immediately cause the maximum to be reached again.
+ - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
"""
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
+ _transient_check_time = 20 # arbitrary limit on vacuum executions
self._transient_check_count += 1
- if (not force) and (self._transient_check_count % self._transient_check_time):
- self._transient_check_count = 0
- return True
+ if not force and (self._transient_check_count < _transient_check_time):
+ return True # no vacuum cleaning this time
+ self._transient_check_count = 0
# Age-based expiration
if self._transient_max_hours:
# for backward compatibility
resolve_o2m_commands_to_record_dicts = resolve_2many_commands
+ def _register_hook(self, cr):
+ """ stuff to do right after the registry is built """
+ pass
+
# keep this import here, at top it will cause dependency cycle errors
import expression
"""
_auto = False # don't create any database backend for AbstractModels
_register = False # not visible in ORM registry, meant to be python-inherited only
+ _transient = False
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning
m = re.match(r'^null value in column "(?P<field>\w+)" violates '
r'not-null constraint\n',
str(e))
- if not m or m.group('field') not in fields:
+ field_name = m.group('field')
+ if not m or field_name not in fields:
+ return {'message': unicode(e)}
+ message = _(u"Missing required value for the field '%s'.") % field_name
+ field = fields.get(field_name)
+ if field:
+ message = _(u"%s This might be '%s' in the current model, or a field "
+ u"of the same name in an o2m.") % (message, field['string'])
+ return {
+ 'message': message,
+ 'field': field_name,
+ }
+def convert_pgerror_23505(model, fields, info, e):
+ m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
+ str(e))
+ field_name = m.group('field')
+ if not m or field_name not in fields:
return {'message': unicode(e)}
- field = fields[m.group('field')]
+ message = _(u"The value for the field '%s' already exists.") % field_name
+ field = fields.get(field_name)
+ if field:
+ message = _(u"%s This might be '%s' in the current model, or a field "
+ u"of the same name in an o2m.") % (message, field['string'])
return {
- 'message': _(u"Missing required value for the field '%(field)s'") % {
- 'field': field['string']
- },
- 'field': m.group('field'),
+ 'message': message,
+ 'field': field_name,
}
PGERROR_TO_OE = collections.defaultdict(
lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
# not_null_violation
'23502': convert_pgerror_23502,
+ # unique constraint error
+ '23505': convert_pgerror_23505,
})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: