import copy
import datetime
import logging
+import warnings
import operator
import pickle
import re
class browse_record(object):
logger = netsvc.Logger()
- def __init__(self, cr, uid, id, table, cache, context=None, list_class = None, fields_process={}):
+ def __init__(self, cr, uid, id, table, cache, context=None, list_class=None, fields_process=None):
'''
table : the object (inherited from orm)
context : dictionary with an optional context
'''
- if not context:
+ if fields_process is None:
+ fields_process = {}
+ if context is None:
context = {}
self._list_class = list_class or browse_record_list
self._cr = cr
CONCURRENCY_CHECK_FIELD = '__last_update'
def log(self, cr, uid, id, message, secondary=False, context=None):
- return self.pool.get('res.log').create(cr, uid, {
- 'name': message,
- 'res_model': self._name,
- 'secondary': secondary,
- 'res_id': id},
+ return self.pool.get('res.log').create(cr, uid,
+ {
+ 'name': message,
+ 'res_model': self._name,
+ 'secondary': secondary,
+ 'res_id': id,
+ 'context': context,
+ },
context=context
)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None):
raise NotImplementedError(_('The read_group method is not implemented on this object !'))
- def _field_create(self, cr, context={}):
+ def _field_create(self, cr, context=None):
+ if context is None:
+ context = {}
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
if not cr.rowcount:
cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
vals['relation'], bool(vals['view_load']),
vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], vals['model'], vals['name']
))
- continue
+ break
cr.commit()
- def _auto_init(self, cr, context={}):
- self._field_create(cr, context)
+ def _auto_init(self, cr, context=None):
+ self._field_create(cr, context=context)
def __init__(self, cr):
if not self._name and not hasattr(self, '_inherit'):
if not self._table:
self._table = self._name.replace('.', '_')
- def browse(self, cr, uid, select, context=None, list_class=None, fields_process={}):
+ def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None):
"""
Fetch records as objects allowing to use dot notation to browse fields and relations
:rtype: object or list of objects requested
"""
- if not context:
- context = {}
self._list_class = list_class or browse_record_list
cache = {}
# need to accepts ints and longs because ids coming from a method
if isinstance(select, (int, long)):
return browse_record(cr, uid, select, self, cache, context=context, list_class=self._list_class, fields_process=fields_process)
elif isinstance(select, list):
- return self._list_class([browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select], context)
+ return self._list_class([browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select], context=context)
else:
return browse_null()
lines = []
data = map(lambda x: '', range(len(fields)))
+ done = []
for fpos in range(len(fields)):
- done = []
f = fields[fpos]
if f:
r = row
fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \
or [], fields)
if fields2 in done:
- break
+ if [x for x in fields2 if x]:
+ break
done.append(fields2)
for row2 in r:
lines2 = self.__export_row(cr, uid, row2, fields2,
for key in self.pool._sql_error.keys():
if key in e[0]:
msg = self.pool._sql_error[key]
+ if hasattr(msg, '__call__'):
+ msg = msg(cr, uid, [res_id,], context=context)
break
return (-1, res, 'Line ' + str(counter) +' : ' + msg, '')
if isinstance(e, osv.orm.except_orm):
return (done, 0, 0, 0)
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
+ """
+ Read records with given ids with the given fields
+
+ :param cr: database cursor
+ :param user: current user id
+ :param ids: id or list of the ids of the records to read
+ :param fields: optional list of field names to return (default: all fields would be returned)
+ :type fields: list (example ['field_name_1', ...])
+ :param context: optional context dictionary - it may contains keys for specifying certain options
+ like ``context_lang``, ``context_tz`` to alter the results of the call.
+ A special ``bin_size`` boolean flag may also be passed in the context to request the
+ value of all fields.binary columns to be returned as the size of the binary instead of its
+ contents. This can also be selectively overriden by passing a field-specific flag
+ in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
+ Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
+ :return: list of dictionaries((dictionary per record asked)) with requested field values
+ :rtype: [{‘name_of_the_field’: value, ...}, ...]
+ :raise AccessError: * if user has no read rights on the requested object
+ * if user tries to bypass access rules for read on the requested object
+
+ """
raise NotImplementedError(_('The read method is not implemented on this object !'))
def get_invalid_fields(self, cr, uid):
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
if hasattr(msg, '__call__'):
- txt_msg, params = msg(self, cr, uid, ids)
- tmp_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, source=txt_msg) or txt_msg
- translated_msg = tmp_msg % params
+ tmp_msg = msg(self, cr, uid, ids, context=context)
+ # Why translate something that has been generated dynamically?
+ # tmp_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, source=txt_msg) or txt_msg
+ if isinstance(tmp_msg, tuple):
+ tmp_msg, params = tmp_msg
+ translated_msg = tmp_msg % params
+ else:
+ translated_msg = tmp_msg
else:
translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, source=msg) or msg
error_msgs.append(
:param fields_list: list of fields to get the default values for (example ['field1', 'field2',])
:type fields_list: list
- :param context: usual context dictionary - it may contains keys in the form ``default_XXX``,
- where XXX is a field name to set or override a default value.
+ :param context: optional context dictionary - it may contains keys for specifying certain options
+ like ``context_lang`` (language) or ``context_tz`` (timezone) to alter the results of the call.
+ It may contain keys in the form ``default_XXX`` (where XXX is a field name), to set
+ or override a default value for a field.
+ A special ``bin_size`` boolean flag may also be passed in the context to request the
+ value of all fields.binary columns to be returned as the size of the binary instead of its
+ contents. This can also be selectively overriden by passing a field-specific flag
+ in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
+ Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
:return: dictionary of the default values (set on the object model class, through user preferences, or in the context)
"""
# trigger view init hook
context = {}
result = False
fields = {}
- childs = True
+ children = True
def encode(s):
if isinstance(s, unicode):
'fields': xfields
}
attrs = {'views': views}
- view = False
fields = views.get('field', False) and views['field'].get('fields', False)
if node.get('name'):
attrs = {}
if column:
relation = self.pool.get(column._obj)
- childs = False
+ children = False
views = {}
for f in node:
if f.tag in ('form', 'tree', 'graph'):
node.set('sum', trans)
for f in node:
- if childs or (node.tag == 'field' and f.tag in ('filter','separator')):
+ if children or (node.tag == 'field' and f.tag in ('filter','separator')):
fields.update(self.__view_look_dom(cr, user, f, view_id, context))
return fields
- def __view_look_dom_arch(self, cr, user, node, view_id, context=None):
- fields_def = self.__view_look_dom(cr, user, node, view_id, context=context)
+ def _disable_workflow_buttons(self, cr, user, node):
+ if user == 1:
+ # admin user can always activate workflow buttons
+ return node
- rolesobj = self.pool.get('res.roles')
+ # TODO handle the case of more than one workflow for a model or multiple
+ # transitions with different groups and same signal
usersobj = self.pool.get('res.users')
-
buttons = (n for n in node.getiterator('button') if n.get('type') != 'object')
for button in buttons:
- can_click = True
- if user != 1: # admin user has all roles
- user_roles = usersobj.read(cr, user, [user], ['roles_id'])[0]['roles_id']
- # TODO handle the case of more than one workflow for a model
- cr.execute("""SELECT DISTINCT t.role_id
- FROM wkf
- INNER JOIN wkf_activity a ON a.wkf_id = wkf.id
- INNER JOIN wkf_transition t ON (t.act_to = a.id)
- WHERE wkf.osv = %s
- AND t.signal = %s
- """, (self._name, button.get('name'),))
- roles = cr.fetchall()
-
- # draft -> valid = signal_next (role X)
- # draft -> cancel = signal_cancel (no role)
- #
- # valid -> running = signal_next (role Y)
- # valid -> cancel = signal_cancel (role Z)
- #
- # running -> done = signal_next (role Z)
- # running -> cancel = signal_cancel (role Z)
-
- # As we don't know the object state, in this scenario,
- # the button "signal_cancel" will be always shown as there is no restriction to cancel in draft
- # the button "signal_next" will be show if the user has any of the roles (X Y or Z)
- # The verification will be made later in workflow process...
- if roles:
- can_click = any((not role) or rolesobj.check(cr, user, user_roles, role) for (role,) in roles)
-
+ user_groups = usersobj.read(cr, user, [user], ['groups_id'])[0]['groups_id']
+ cr.execute("""SELECT DISTINCT t.group_id
+ FROM wkf
+ INNER JOIN wkf_activity a ON a.wkf_id = wkf.id
+ INNER JOIN wkf_transition t ON (t.act_to = a.id)
+ WHERE wkf.osv = %s
+ AND t.signal = %s
+ AND t.group_id is NOT NULL
+ """, (self._name, button.get('name')))
+ group_ids = [x[0] for x in cr.fetchall() if x[0]]
+ can_click = not group_ids or bool(set(user_groups).intersection(group_ids))
button.set('readonly', str(int(not can_click)))
+ return node
+ def __view_look_dom_arch(self, cr, user, node, view_id, context=None):
+ fields_def = self.__view_look_dom(cr, user, node, view_id, context=context)
+ node = self._disable_workflow_buttons(cr, user, node)
arch = etree.tostring(node, encoding="utf-8").replace('\t', '')
-
fields = {}
if node.tag == 'diagram':
if node.getchildren()[0].tag == 'node':
fields = self.fields_get(cr, user, fields_def.keys(), context)
for field in fields_def:
if field == 'id':
- # sometime, the view may containt the (invisible) field 'id' needed for a domain (when 2 objects have cross references)
+ # sometime, the view may contain the (invisible) field 'id' needed for a domain (when 2 objects have cross references)
fields['id'] = {'readonly': True, 'type': 'integer', 'string': 'ID'}
elif field in fields:
fields[field].update(fields_def[field])
model = res[0][1]
res.insert(0, ("Can't find field '%s' in the following view parts composing the view of object model '%s':" % (field, model), None))
msg = "\n * ".join([r[0] for r in res])
- msg += "\n\nEither you wrongly customised this view, or some modules bringing those views are not compatible with your current data model"
+ msg += "\n\nEither you wrongly customized this view, or some modules bringing those views are not compatible with your current data model"
netsvc.Logger().notifyChannel('orm', netsvc.LOG_ERROR, msg)
raise except_orm('View error', msg)
return arch, fields
return arch
- def __get_default_search_view(self, cr, uid, context={}):
+ def __get_default_search_view(self, cr, uid, context=None):
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
- view = self.fields_view_get(cr, uid, False, 'form', context)
+ view = self.fields_view_get(cr, uid, False, 'form', context=context)
root = etree.fromstring(encode(view['arch']))
res = etree.XML("""<search string="%s"></search>""" % root.get("string", ""))
resprint = ir_values_obj.get(cr, user, 'action',
'client_print_multi', [(self._name, False)], False,
context)
- resaction = []
resaction = ir_values_obj.get(cr, user, 'action',
'client_action_multi', [(self._name, False)], False,
context)
# override defaults with the provided values, never allow the other way around
defaults = self.default_get(cr, uid, missing_defaults, context)
for dv in defaults:
- # FIXME: also handle inherited m2m
- if dv in self._columns and self._columns[dv]._type == 'many2many' \
+ if (dv in self._columns and self._columns[dv]._type == 'many2many') \
+ or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many') \
and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
defaults[dv] = [(6, 0, defaults[dv])]
+ if dv in self._columns and self._columns[dv]._type == 'one2many' \
+ or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many') \
+ and isinstance(defaults[dv], (list, tuple)) and isinstance(defaults[dv][0], dict):
+ defaults[dv] = [(0, 0, x) for x in defaults[dv]]
defaults.update(values)
values = defaults
return values
'create_date': create_date,
'write_uid': False,
'write_date': False,
- 'id': id
+ 'id': id,
+ 'xmlid' : False,
})
return result
fget = self.fields_get(cr, uid, fields)
float_int_fields = filter(lambda x: fget[x]['type'] in ('float', 'integer'), fields)
- sum = {}
flist = ''
group_by = groupby
if groupby:
or (f in self._columns and getattr(self._columns[f], '_classic_write'))]
for f in fields_pre:
if f not in ['id', 'sequence']:
- operator = fget[f].get('group_operator', 'sum')
+ group_operator = fget[f].get('group_operator', 'sum')
if flist:
flist += ','
- flist += operator+'('+f+') as '+f
+ flist += group_operator+'('+f+') as '+f
gb = groupby and (' GROUP BY '+groupby) or ''
where += ' order by '+self._parent_order
cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
pos2 = pos + 1
- childs = cr.fetchall()
- for id in childs:
+ for id in cr.fetchall():
pos2 = browse_rec(id[0], pos2)
cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
return pos2 + 1
self.__schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, column['attname'])
- def _auto_init(self, cr, context={}):
+ def _auto_init(self, cr, context=None):
+ if context is None:
+ context = {}
store_compute = False
create = False
todo_end = []
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITHOUT OIDS' % (self._table,))
cr.execute("COMMENT ON TABLE \"%s\" IS '%s'" % (self._table, self._description.replace("'", "''")))
create = True
-
self.__schema.debug("Table '%s': created", self._table)
cr.commit()
# iterate on the "object columns"
todo_update_store = []
update_custom_fields = context.get('update_custom_fields', False)
+
+ cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size " \
+ "FROM pg_class c,pg_attribute a,pg_type t " \
+ "WHERE c.relname=%s " \
+ "AND c.oid=a.attrelid " \
+ "AND a.atttypid=t.oid", (self._table,))
+ col_data = dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
+
+
for k in self._columns:
if k in ('id', 'write_uid', 'write_date', 'create_uid', 'create_date'):
continue
- #raise _('Can not define a column %s. Reserved keyword !') % (k,)
#Not Updating Custom fields
if k.startswith('x_') and not update_custom_fields:
continue
+
f = self._columns[k]
if isinstance(f, fields.one2many):
self.__schema.debug("Create table '%s': relation between '%s' and '%s'",
f._rel, self._table, ref)
else:
- cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size " \
- "FROM pg_class c,pg_attribute a,pg_type t " \
- "WHERE c.relname=%s " \
- "AND a.attname=%s " \
- "AND c.oid=a.attrelid " \
- "AND a.atttypid=t.oid", (self._table, k))
- res = cr.dictfetchall()
+ res = col_data.get(k, [])
+ res = res and [res] or []
if not res and hasattr(f, 'oldname'):
cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size " \
"FROM pg_class c,pg_attribute a,pg_type t " \
return super(orm, self).fields_get(cr, user, fields, context, write_access)
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
- """
- Read records with given ids with the given fields
-
- :param cr: database cursor
- :param user: current user id
- :param ids: id or list of the ids of the records to read
- :param fields: optional list of field names to return (default: all fields would be returned)
- :type fields: list (example ['field_name_1', ...])
- :param context: (optional) context arguments, like lang, time zone
- :return: list of dictionaries((dictionary per record asked)) with requested field values
- :rtype: [{‘name_of_the_field’: value, ...}, ...]
- :raise AccessError: * if user has no read rights on the requested object
- * if user tries to bypass access rules for read on the requested object
-
- """
if not context:
context = {}
self.pool.get('ir.model.access').check(cr, user, self._name, 'read', context=context)
for key, v in r.items():
if v is None:
r[key] = False
- if key in self._columns:
- column = self._columns[key]
- elif key in self._inherit_fields:
- column = self._inherit_fields[key][2]
- else:
- continue
-# TODO: removed this, it's too slow
-# if v and column._type == 'reference':
-# model_name, ref_id = v.split(',', 1)
-# model = self.pool.get(model_name)
-# if not model:
-# reset = True
-# else:
-# cr.execute('SELECT count(1) FROM "%s" WHERE id=%%s' % (model._table,), (ref_id,))
-# reset = not cr.fetchone()[0]
-# if reset:
-# if column._classic_write:
-# query = 'UPDATE "%s" SET "%s"=NULL WHERE id=%%s' % (self._table, key)
-# cr.execute(query, (r['id'],))
-# r[key] = False
if isinstance(ids, (int, long, dict)):
return result and result[0] or False
cr.execute(query, [tuple(sub_ids)] + rule_params)
if cr.rowcount != len(sub_ids):
raise except_orm(_('AccessError'),
- _('You try to bypass an access rule while reading (Document type: %s).') % self._description)
+ _('Operation prohibited by access rules, or performed on an already deleted document (Operation: read, Document type: %s).')
+ % (self._description,))
else:
cr.execute(query, (tuple(sub_ids),))
res.extend(cr.dictfetchall())
else:
res = map(lambda x: {'id': x}, ids)
- if not res:
- res = map(lambda x: {'id': x}, ids)
- for record in res:
- for f in fields_to_read:
- field_val = False
- if f in self._columns.keys():
- ftype = self._columns[f]._type
- elif f in self._inherit_fields.keys():
- ftype = self._inherit_fields[f][2]._type
- else:
- continue
- if ftype in ('one2many', 'many2many'):
- field_val = []
- record.update({f:field_val})
+# if not res:
+# res = map(lambda x: {'id': x}, ids)
+# for record in res:
+# for f in fields_to_read:
+# field_val = False
+# if f in self._columns.keys():
+# ftype = self._columns[f]._type
+# elif f in self._inherit_fields.keys():
+# ftype = self._inherit_fields[f][2]._type
+# else:
+# continue
+# if ftype in ('one2many', 'many2many'):
+# field_val = []
+# record.update({f:field_val})
for f in fields_pre:
if f == self.CONCURRENCY_CHECK_FIELD:
for pos in val:
for record in res:
if isinstance(res2[record['id']], str): res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6
- record[pos] = res2[record['id']][pos]
+ multi_fields = res2.get(record['id'],{})
+ if multi_fields:
+ record[pos] = multi_fields.get(pos,[])
else:
for f in val:
res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
def perm_read(self, cr, user, ids, context=None, details=True):
"""
- Read the permission for record of the given ids
+ Returns some metadata about the given records.
- :param cr: database cursor
- :param user: current user id
- :param ids: id or list of ids
- :param context: context arguments, like lang, time zone
:param details: if True, \*_uid fields are replaced with the name of the user
:return: list of ownership dictionaries for each requested record
:rtype: list of dictionaries with the following keys:
* create_date: date when the record was created
* write_uid: last user who changed the record
* write_date: date of the last change to the record
-
+ * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
"""
if not context:
context = {}
uniq = isinstance(ids, (int, long))
if uniq:
ids = [ids]
- fields = 'id'
+ fields = ['id']
if self._log_access:
- fields += ', create_uid, create_date, write_uid, write_date'
- query = 'SELECT %s FROM "%s" WHERE id IN %%s' % (fields, self._table)
- cr.execute(query, (tuple(ids),))
+ fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
+ quoted_table = '"%s"' % self._table
+ fields_str = ",".join('%s.%s'%(quoted_table, field) for field in fields)
+ query = '''SELECT %s, __imd.module, __imd.name
+ FROM %s LEFT JOIN ir_model_data __imd
+ ON (__imd.model = %%s and __imd.res_id = %s.id)
+ WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
+ cr.execute(query, (self._name, tuple(ids)))
res = cr.dictfetchall()
for r in res:
for key in r:
r[key] = r[key] or False
- if key in ('write_uid', 'create_uid', 'uid') and details:
+ if details and key in ('write_uid', 'create_uid'):
if r[key]:
r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0]
+ r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
+ del r['name'], r['module']
if uniq:
return res[ids[0]]
return res
def _check_concurrency(self, cr, ids, context):
if not context:
return
- if context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access:
- def key(oid):
- return "%s,%s" % (self._name, oid)
- santa = "(id = %s AND %s < COALESCE(write_date, create_date, now())::timestamp)"
- for i in range(0, len(ids), cr.IN_MAX):
- sub_ids = tools.flatten(((oid, context[self.CONCURRENCY_CHECK_FIELD][key(oid)])
- for oid in ids[i:i+cr.IN_MAX]
- if key(oid) in context[self.CONCURRENCY_CHECK_FIELD]))
- if sub_ids:
- cr.execute("SELECT count(1) FROM %s WHERE %s" % (self._table, " OR ".join([santa]*(len(sub_ids)/2))), sub_ids)
- res = cr.fetchone()
- if res and res[0]:
- raise except_orm('ConcurrencyException', _('Records were modified in the meanwhile'))
+ if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
+ return
+ check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, now())::timestamp)"
+ for sub_ids in cr.split_for_in_conditions(ids):
+ ids_to_check = []
+ for id in sub_ids:
+ id_ref = "%s,%s" % (self._name, id)
+ update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
+ if update_date:
+ ids_to_check.extend([id, update_date])
+ if not ids_to_check:
+ continue
+ cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
+ res = cr.fetchone()
+ if res:
+ # mention the first one only to keep the error message readable
+ raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
def check_access_rule(self, cr, uid, ids, operation, context=None):
"""Verifies that the operation given by ``operation`` is allowed for the user
[sub_ids] + where_params)
if cr.rowcount != len(sub_ids):
raise except_orm(_('AccessError'),
- _('Operation prohibited by access rules (Operation: %s, Document type: %s).')
- % (operation, self._name))
+ _('Operation prohibited by access rules, or performed on an already deleted document (Operation: %s, Document type: %s).')
+ % (operation, self._description))
def unlink(self, cr, uid, ids, context=None):
"""
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
'where id IN %s', upd1 + [sub_ids])
+ if cr.rowcount != len(sub_ids):
+ raise except_orm(_('AccessError'),
+ _('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
if totranslate:
# TODO: optimize
for val in updend:
if self._inherit_fields[val][0] == table:
v[val] = vals[val]
- self.pool.get(table).write(cr, user, nids, v, context)
+ if v:
+ self.pool.get(table).write(cr, user, nids, v, context)
self._validate(cr, user, ids, context)
else:
default['state'] = self._defaults['state']
- context_wo_lang = context
+ context_wo_lang = context.copy()
if 'lang' in context:
del context_wo_lang['lang']
- data = self.read(cr, uid, [id], context=context_wo_lang)[0]
+ data = self.read(cr, uid, [id,], context=context_wo_lang)
+ if data:
+ data = data[0]
+ else:
+ raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
fields = self.fields_get(cr, uid, context=context)
for f in fields:
old_record, new_record = self.read(cr, uid, [old_id, new_id], [field_name], context=context)
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
- old_childs = sorted(old_record[field_name])
- new_childs = sorted(new_record[field_name])
- for (old_child, new_child) in zip(old_childs, new_childs):
+ old_children = sorted(old_record[field_name])
+ new_children = sorted(new_record[field_name])
+ for (old_child, new_child) in zip(old_children, new_children):
# recursive copy of translations here
target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
# and for translatable fields we keep them for copy
return cr.fetchone()[0] == len(ids)
def check_recursion(self, cr, uid, ids, parent=None):
+ warnings.warn("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
+ self._name, DeprecationWarning, stacklevel=3)
+ assert ' ' not in (parent or '')
+ return self._check_recursion(cr, uid, ids, parent)
+
+ def _check_recursion(self, cr, uid, ids, parent=None):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop