#
##############################################################################
-#.apidoc title: Object Relational Mapping
-#.apidoc module-mods: member-order: bysource
"""
Object relational mapping to database (postgresql) module
import fields
import openerp
-import openerp.netsvc as netsvc
import openerp.tools as tools
from openerp.tools.config import config
from openerp.tools.misc import CountingStream
for field_name, field_column in fields_to_fetch:
if field_column._type == 'many2one':
if result_line[field_name]:
- obj = self._table.pool.get(field_column._obj)
+ obj = self._table.pool[field_column._obj]
if isinstance(result_line[field_name], (list, tuple)):
value = result_line[field_name][0]
else:
else:
new_data[field_name] = browse_null()
elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
- new_data[field_name] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in result_line[field_name]], self._context)
+ new_data[field_name] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool[field_column._obj], self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in result_line[field_name]], self._context)
elif field_column._type == 'reference':
if result_line[field_name]:
if isinstance(result_line[field_name], browse_record):
ref_obj, ref_id = result_line[field_name].split(',')
ref_id = long(ref_id)
if ref_id:
- obj = self._table.pool.get(ref_obj)
+ obj = self._table.pool[ref_obj]
new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
else:
new_data[field_name] = browse_null()
try:
return self[name]
except KeyError, e:
- raise AttributeError(e)
+ import sys
+ exc_info = sys.exc_info()
+ raise AttributeError, "Got %r while trying to get attribute %s on a %s record." % (e, name, self._table._name), exc_info[2]
def __contains__(self, name):
return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
raise TypeError('_name is mandatory in case of multiple inheritance')
for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
- parent_model = pool.get(parent_name)
- if not parent_model:
+ if parent_name not in pool:
raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
+ parent_model = pool[parent_name]
if not getattr(cls, '_original_module', None) and name == parent_model._name:
cls._original_module = parent_model._original_module
parent_class = parent_model.__class__
'ondelete': field['on_delete'],
'translate': (field['translate']),
'manual': True,
+ '_prefetch': False,
#'select': int(field['select_level'])
}
return ''
def selection_field(in_field):
- col_obj = self.pool.get(in_field.keys()[0])
+ col_obj = self.pool[in_field.keys()[0]]
if f[i] in col_obj._columns.keys():
return col_obj._columns[f[i]]
elif f[i] in col_obj._inherits.keys():
if not data[fpos]:
dt = ''
for rr in r:
- name_relation = self.pool.get(rr._table_name)._rec_name
+ name_relation = self.pool[rr._table_name]._rec_name
if isinstance(rr[name_relation], browse_record):
rr = rr[name_relation]
- rr_name = self.pool.get(rr._table_name).name_get(cr, uid, [rr.id], context=context)
+ rr_name = self.pool[rr._table_name].name_get(cr, uid, [rr.id], context=context)
rr_name = rr_name and rr_name[0] and rr_name[0][1] or ''
dt += tools.ustr(rr_name or '') + ','
data[fpos] = dt[:-1]
i += 1
if i == len(f):
if isinstance(r, browse_record):
- r = self.pool.get(r._table_name).name_get(cr, uid, [r.id], context=context)
+ r = self.pool[r._table_name].name_get(cr, uid, [r.id], context=context)
r = r and r[0] and r[0][1] or ''
data[fpos] = tools.ustr(r or '')
return [data] + lines
# get the default values for the inherited fields
for t in self._inherits.keys():
- defaults.update(self.pool.get(t).default_get(cr, uid, fields_list,
- context))
+ defaults.update(self.pool[t].default_get(cr, uid, fields_list, context))
# get the default values defined in the object
for f in fields_list:
if field in fields_list:
fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
if fld_def._type == 'many2one':
- obj = self.pool.get(fld_def._obj)
+ obj = self.pool[fld_def._obj]
if not obj.search(cr, uid, [('id', '=', field_value or False)]):
continue
if fld_def._type == 'many2many':
- obj = self.pool.get(fld_def._obj)
+ obj = self.pool[fld_def._obj]
field_value2 = []
for i in range(len(field_value or [])):
if not obj.search(cr, uid, [('id', '=',
field_value2.append(field_value[i])
field_value = field_value2
if fld_def._type == 'one2many':
- obj = self.pool.get(fld_def._obj)
+ obj = self.pool[fld_def._obj]
field_value2 = []
for i in range(len(field_value or [])):
field_value2.append({})
for field2 in field_value[i]:
if field2 in obj._columns.keys() and obj._columns[field2]._type == 'many2one':
- obj2 = self.pool.get(obj._columns[field2]._obj)
+ obj2 = self.pool[obj._columns[field2]._obj]
if not obj2.search(cr, uid,
[('id', '=', field_value[i][field2])]):
continue
elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type == 'many2one':
- obj2 = self.pool.get(obj._inherit_fields[field2][2]._obj)
+ obj2 = self.pool[obj._inherit_fields[field2][2]._obj]
if not obj2.search(cr, uid,
[('id', '=', field_value[i][field2])]):
continue
# TODO I believe this loop can be replace by
# res.extend(self._inherit_fields.key())
for parent in self._inherits:
- res.extend(self.pool.get(parent).fields_get_keys(cr, user, context))
+ res.extend(self.pool[parent].fields_get_keys(cr, user, context))
return res
def _rec_name_fallback(self, cr, uid, context=None):
new_xml = etree.fromstring(encode(xml))
ctx = context.copy()
ctx['base_model_name'] = self._name
- xarch, xfields = self.pool.get(node.get('object')).__view_look_dom_arch(cr, user, new_xml, view_id, ctx)
+ xarch, xfields = self.pool[node.get('object')].__view_look_dom_arch(cr, user, new_xml, view_id, ctx)
views['form'] = {
'arch': xarch,
'fields': xfields
column = False
if column:
- relation = self.pool.get(column._obj)
+ relation = self.pool[column._obj] if column._obj else None
children = False
views = {}
fields = {}
if node.tag == 'diagram':
if node.getchildren()[0].tag == 'node':
- node_model = self.pool.get(node.getchildren()[0].get('object'))
+ node_model = self.pool[node.getchildren()[0].get('object')]
node_fields = node_model.fields_get(cr, user, None, context)
fields.update(node_fields)
if not node.get("create") and not node_model.check_access_rights(cr, user, 'create', raise_exception=False):
node.set("create", 'false')
if node.getchildren()[1].tag == 'arrow':
- arrow_fields = self.pool.get(node.getchildren()[1].get('object')).fields_get(cr, user, None, context)
+ arrow_fields = self.pool[node.getchildren()[1].get('object')].fields_get(cr, user, None, context)
fields.update(arrow_fields)
else:
fields = self.fields_get(cr, user, None, context)
if view_type == 'tree' or not action[2].get('multi')]
resprint = [clean(print_) for print_ in resprint
if view_type == 'tree' or not print_[2].get('multi')]
- #When multi="True" set it will display only in More of the list view
+ #When multi="True" set it will display only in More of the list view
resrelate = [clean(action) for action in resrelate
if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
res[lang][f] = self._columns[f].string
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), fields)
- res2 = self.pool.get(table).read_string(cr, uid, id, langs, cols, context)
+ res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
for lang in res2:
if lang in res:
res[lang]['code'] = lang
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), vals)
if cols:
- self.pool.get(table).write_string(cr, uid, id, langs, vals, context)
+ self.pool[table].write_string(cr, uid, id, langs, vals, context)
return True
def _add_missing_default_values(self, cr, uid, values, context=None):
order = orderby or groupby
data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=order, context=context)
-
+
# the IDs of records that have groupby field value = False or '' should be included too
data_ids += set(alldata.keys()).difference(data_ids)
-
- if groupby:
+
+ if groupby:
data = self.read(cr, uid, data_ids, [groupby], context=context)
# restore order of the search as read() uses the default _order (this is only for groups, so the footprint of data should be small):
data_dict = dict((d['id'], d[groupby] ) for d in data)
result = [{'id': i, groupby: data_dict[i]} for i in data_ids]
else:
- result = [{'id': i} for i in data_ids]
+ result = [{'id': i} for i in data_ids]
for d in result:
if groupby:
:param query: query object on which the JOIN should be added
"""
inherits_field = current_model._inherits[parent_model_name]
- parent_model = self.pool.get(parent_model_name)
+ parent_model = self.pool[parent_model_name]
parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
return parent_alias
parent_alias = '"%s"' % current_table._table
while field in current_table._inherit_fields and not field in current_table._columns:
parent_model_name = current_table._inherit_fields[field][0]
- parent_table = self.pool.get(parent_model_name)
+ parent_table = self.pool[parent_model_name]
parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
current_table = parent_table
return '%s."%s"' % (parent_alias, field)
return
_logger.info('Computing parent left and right for table %s...', self._table)
def browse_rec(root, pos=0):
-# TODO: set order
+ # TODO: set order
where = self._parent_name+'='+str(root)
if not root:
where = self._parent_name+' IS NULL'
_schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one):
- dest_model = self.pool.get(f._obj)
+ dest_model = self.pool[f._obj]
if dest_model._table != 'ir_actions':
self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
# and add constraints if needed
if isinstance(f, fields.many2one):
- if not self.pool.get(f._obj):
+ if f._obj not in self.pool:
raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
- dest_model = self.pool.get(f._obj)
+ dest_model = self.pool[f._obj]
ref = dest_model._table
# ir_actions is inherited so foreign key doesn't work on it
if ref != 'ir_actions':
def _o2m_raise_on_missing_reference(self, cr, f):
# TODO this check should be a method on fields.one2many.
-
- other = self.pool.get(f._obj)
- if other:
+ if f._obj in self.pool:
+ other = self.pool[f._obj]
# TODO the condition could use fields_get_keys().
if f._fields_id not in other._columns.keys():
if f._fields_id not in other._inherit_fields.keys():
self._save_relation_table(cr, m2m_tbl)
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall():
- if not self.pool.get(f._obj):
+ if f._obj not in self.pool:
raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
- dest_model = self.pool.get(f._obj)
+ dest_model = self.pool[f._obj]
ref = dest_model._table
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
# create foreign key references with ondelete=cascade, unless the targets are SQL views
"""
res = {}
for table in self._inherits:
- other = self.pool.get(table)
+ other = self.pool[table]
for col in other._columns.keys():
res[col] = (table, self._inherits[table], other._columns[col], table)
for col in other._inherit_fields.keys():
translation_obj = self.pool.get('ir.translation')
for parent in self._inherits:
- res.update(self.pool.get(parent).fields_get(cr, user, allfields, context))
+ res.update(self.pool[parent].fields_get(cr, user, allfields, context))
for f, field in self._columns.iteritems():
if (allfields and f not in allfields) or \
return res
+ def get_empty_list_help(self, cr, user, help, context=None):
+ """ Generic method giving the help message displayed when having
+ no result to display in a list or kanban view. By default it returns
+ the help given in parameter that is generally the help message
+ defined in the action.
+ """
+ return help
+
def check_field_access_rights(self, cr, user, operation, fields, context=None):
"""
Check the user access rights on the given fields. This raises Access
"""
- if not context:
- context = {}
self.check_access_rights(cr, user, 'read')
fields = self.check_field_access_rights(cr, user, 'read', fields)
if isinstance(ids, (int, long)):
select = map(lambda x: isinstance(x, dict) and x['id'] or x, select)
result = self._read_flat(cr, user, select, fields, context, load)
- for r in result:
- for key, v in r.items():
- if v is None:
- r[key] = False
-
- if isinstance(ids, (int, long, dict)):
+ if isinstance(ids, (int, long)):
return result and result[0] or False
return result
cols = [x for x in intersect(self._inherit_fields.keys(), fields_to_read) if x not in self._columns.keys()]
if not cols:
continue
- res2 = self.pool.get(table).read(cr, user, [x[col] for x in res], cols, context, load)
+ res2 = self.pool[table].read(cr, user, [x[col] for x in res], cols, context, load)
res3 = {}
for r in res2:
if field in self._columns:
fobj = self._columns[field]
- if not fobj:
- continue
- groups = fobj.read
- if groups:
- edit = False
- for group in groups:
- module = group.split(".")[0]
- grp = group.split(".")[1]
- cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
- (grp, module, 'res.groups', user))
- readonly = cr.fetchall()
- if readonly[0][0] >= 1:
- edit = True
- break
- elif readonly[0][0] == 0:
- edit = False
- else:
- edit = False
-
- if not edit:
- if type(vals[field]) == type([]):
- vals[field] = []
- elif type(vals[field]) == type(0.0):
- vals[field] = 0
- elif type(vals[field]) == type(''):
- vals[field] = '=No Permission='
- else:
- vals[field] = False
+ if fobj:
+ groups = fobj.read
+ if groups:
+ edit = False
+ for group in groups:
+ module = group.split(".")[0]
+ grp = group.split(".")[1]
+ cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
+ (grp, module, 'res.groups', user))
+ readonly = cr.fetchall()
+ if readonly[0][0] >= 1:
+ edit = True
+ break
+ elif readonly[0][0] == 0:
+ edit = False
+ else:
+ edit = False
+
+ if not edit:
+ if type(vals[field]) == type([]):
+ vals[field] = []
+ elif type(vals[field]) == type(0.0):
+ vals[field] = 0
+ elif type(vals[field]) == type(''):
+ vals[field] = '=No Permission='
+ else:
+ vals[field] = False
+
+ if vals[field] is None:
+ vals[field] = False
+
return res
# TODO check READ access
# Attempt to distinguish record rule restriction vs deleted records,
# to provide a more specific error message - check if the missinf
cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
- if cr.rowcount:
+ forbidden_ids = [x[0] for x in cr.fetchall()]
+ if forbidden_ids:
# the missing ids are (at least partially) hidden by access rules
if uid == SUPERUSER_ID:
return
- _logger.warning('Access Denied by record rules for operation: %s, uid: %s, model: %s', operation, uid, self._name)
+ _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
raise except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
if operation in ('read','unlink'):
# No need to warn about deleting an already deleted record.
# And no error when reading a record that was deleted, to prevent spurious
- # errors for non-transactional search/read sequences coming from clients
+ # errors for non-transactional search/read sequences coming from clients
return
_logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
raise except_orm(_('Missing document(s)'),
returned_ids = [x['id'] for x in cr.dictfetchall()]
self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
- def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
- """Call given workflow trigger as a result of a CRUD operation"""
- wf_service = netsvc.LocalService("workflow")
+ def create_workflow(self, cr, uid, ids, context=None):
+ """Create a workflow instance for each given record IDs."""
+ from openerp import workflow
+ for res_id in ids:
+ workflow.trg_create(uid, self._name, res_id, cr)
+ return True
+
+ def delete_workflow(self, cr, uid, ids, context=None):
+ """Delete the workflow instances bound to the given record IDs."""
+ from openerp import workflow
+ for res_id in ids:
+ workflow.trg_delete(uid, self._name, res_id, cr)
+ return True
+
+ def step_workflow(self, cr, uid, ids, context=None):
+ """Reevaluate the workflow instances of the given record IDs."""
+ from openerp import workflow
for res_id in ids:
- getattr(wf_service, trigger)(uid, self._name, res_id, cr)
+ workflow.trg_write(uid, self._name, res_id, cr)
+ return True
- def _workflow_signal(self, cr, uid, ids, signal, context=None):
+ def signal_workflow(self, cr, uid, ids, signal, context=None):
"""Send given workflow signal and return a dict mapping ids to workflow results"""
- wf_service = netsvc.LocalService("workflow")
+ from openerp import workflow
result = {}
for res_id in ids:
- result[res_id] = wf_service.trg_validate(uid, self._name, res_id, signal, cr)
+ result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
return result
+ def redirect_workflow(self, cr, uid, old_new_ids, context=None):
+ """ Rebind the workflow instance bound to the given 'old' record IDs to
+ the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
+ """
+ from openerp import workflow
+ for old_id, new_id in old_new_ids:
+ workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
+ return True
+
def unlink(self, cr, uid, ids, context=None):
"""
Delete records with given ids
property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
ir_property.unlink(cr, uid, property_ids, context=context)
- self._workflow_trigger(cr, uid, ids, 'trg_delete', context=context)
+ self.delete_workflow(cr, uid, ids, context=context)
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
pool_model_data = self.pool.get('ir.model.data')
if ir_value_ids:
ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
- for order, object, store_ids, fields in result_store:
- if object != self._name:
- obj = self.pool.get(object)
+ for order, obj_name, store_ids, fields in result_store:
+ if obj_name != self._name:
+ obj = self.pool[obj_name]
cr.execute('select id from '+obj._table+' where id IN %s', (tuple(store_ids),))
rids = map(lambda x: x[0], cr.fetchall())
if rids:
# TODO: optimize
for f in direct:
if self._columns[f].translate:
- src_trans = self.pool.get(self._name).read(cr, user, ids, [f])[0][f]
+ src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
if not src_trans:
src_trans = vals[f]
# Inserting value to DB
v[val] = vals[val]
unknown_fields.remove(val)
if v:
- self.pool.get(table).write(cr, user, nids, v, context)
+ self.pool[table].write(cr, user, nids, v, context)
if unknown_fields:
_logger.warning(
result.sort()
done = {}
- for order, object, ids_to_update, fields_to_recompute in result:
- key = (object, tuple(fields_to_recompute))
+ for order, model_name, ids_to_update, fields_to_recompute in result:
+ key = (model_name, tuple(fields_to_recompute))
done.setdefault(key, {})
# avoid to do several times the same computation
todo = []
if id not in done[key]:
done[key][id] = True
todo.append(id)
- self.pool.get(object)._store_set_values(cr, user, todo, fields_to_recompute, context)
+ self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
- self._workflow_trigger(cr, user, ids, 'trg_write', context=context)
+ self.step_workflow(cr, user, ids, context=context)
return True
#
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
-
+
# When linking/creating parent records, force context without 'no_store_function' key that
- # defers stored functions computing, as these won't be computed in batch at the end of create().
+ # defers stored functions computing, as these won't be computed in batch at the end of create().
parent_context = dict(context)
parent_context.pop('no_store_function', None)
-
+
if record_id is None or not record_id:
- record_id = self.pool.get(table).create(cr, user, tocreate[table], context=parent_context)
+ record_id = self.pool[table].create(cr, user, tocreate[table], context=parent_context)
else:
- self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=parent_context)
+ self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
upd0 += ',' + self._inherits[table]
upd1 += ',%s'
upd0 = upd0 + ',"' + field + '"'
upd1 = upd1 + ',' + self._columns[field]._symbol_set[0]
upd2.append(self._columns[field]._symbol_set[1](vals[field]))
- #for the function fields that receive a value, we set them directly in the database
+ #for the function fields that receive a value, we set them directly in the database
#(they may be required), but we also need to trigger the _fct_inv()
if (hasattr(self._columns[field], '_fnct_inv')) and not isinstance(self._columns[field], fields.related):
#TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
result += self._store_get_values(cr, user, [id_new], vals.keys(), context)
result.sort()
done = []
- for order, object, ids, fields2 in result:
- if not (object, ids, fields2) in done:
- self.pool.get(object)._store_set_values(cr, user, ids, fields2, context)
- done.append((object, ids, fields2))
+ for order, model_name, ids, fields2 in result:
+ if not (model_name, ids, fields2) in done:
+ self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
+ done.append((model_name, ids, fields2))
if self._log_create and not (context and context.get('no_store_function', False)):
message = self._description + \
"' " + _("created.")
self.log(cr, user, id_new, message, True, context=context)
self.check_access_rule(cr, user, [id_new], 'create', context=context)
- self._workflow_trigger(cr, user, [id_new], 'trg_create', context=context)
+ self.create_workflow(cr, user, [id_new], context=context)
return id_new
def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None):
return browse_null()
def _store_get_values(self, cr, uid, ids, fields, context):
- """Returns an ordered list of fields.functions to call due to
+ """Returns an ordered list of fields.function to call due to
an update operation on ``fields`` of records with ``ids``,
- obtained by calling the 'store' functions of these fields,
+ obtained by calling the 'store' triggers of these fields,
as setup by their 'store' attribute.
:return: [(priority, model_name, [record_ids,], [function_fields,])]
stored_functions = self.pool._store_function.get(self._name, [])
# use indexed names for the details of the stored_functions:
- model_name_, func_field_to_compute_, id_mapping_fnct_, trigger_fields_, priority_ = range(5)
+ model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
- # only keep functions that should be triggered for the ``fields``
+ # only keep store triggers that should be triggered for the ``fields``
# being written to.
- to_compute = [f for f in stored_functions \
+ triggers_to_compute = [f for f in stored_functions \
if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
- mapping = {}
- fresults = {}
- for function in to_compute:
- fid = id(function[id_mapping_fnct_])
- if not fid in fresults:
+ to_compute_map = {}
+ target_id_results = {}
+ for store_trigger in triggers_to_compute:
+ target_func_id_ = id(store_trigger[target_ids_func_])
+ if not target_func_id_ in target_id_results:
# use admin user for accessing objects having rules defined on store fields
- fresults[fid] = [id2 for id2 in function[id_mapping_fnct_](self, cr, SUPERUSER_ID, ids, context) if id2]
- target_ids = fresults[fid]
+ target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
+ target_ids = target_id_results[target_func_id_]
# the compound key must consider the priority and model name
- key = (function[priority_], function[model_name_])
+ key = (store_trigger[priority_], store_trigger[model_name_])
for target_id in target_ids:
- mapping.setdefault(key, {}).setdefault(target_id,set()).add(tuple(function))
+ to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
- # Here mapping looks like:
- # { (10, 'model_a') : { target_id1: [ (function_1_tuple, function_2_tuple) ], ... }
- # (20, 'model_a') : { target_id2: [ (function_3_tuple, function_4_tuple) ], ... }
- # (99, 'model_a') : { target_id1: [ (function_5_tuple, function_6_tuple) ], ... }
+ # Here to_compute_map looks like:
+ # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
+ # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
+ # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
# }
# Now we need to generate the batch function calls list
# call_map =
# { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
call_map = {}
- for ((priority,model), id_map) in mapping.iteritems():
- functions_ids_maps = {}
+ for ((priority,model), id_map) in to_compute_map.iteritems():
+ trigger_ids_maps = {}
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
- for fid, functions in id_map.iteritems():
- functions_ids_maps.setdefault(tuple(functions), []).append(fid)
- for functions, ids in functions_ids_maps.iteritems():
- call_map.setdefault((priority,model),[]).append((priority, model, ids,
- [f[func_field_to_compute_] for f in functions]))
+ for target_id, triggers in id_map.iteritems():
+ trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
+ for triggers, target_ids in trigger_ids_maps.iteritems():
+ call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
+ [t[func_field_to_compute_] for t in triggers]))
ordered_keys = call_map.keys()
ordered_keys.sort()
result = []
# to reach the parent table (if it was not JOINed yet in the query)
parent_alias = child_object._inherits_join_add(child_object, parent_model, query)
# inherited rules are applied on the external table -> need to get the alias and replace
- parent_table = self.pool.get(parent_model)._table
+ parent_table = self.pool[parent_model]._table
added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
# change references to parent_table to parent_alias, because we now use the alias to refer to the table
new_tables = []
return
# figure out the applicable order_by for the m2o
- dest_model = self.pool.get(order_field_column._obj)
+ dest_model = self.pool[order_field_column._obj]
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
else:
continue # ignore non-readable or "non-joinable" fields
elif order_field in self._inherit_fields:
- parent_obj = self.pool.get(self._inherit_fields[order_field][3])
+ parent_obj = self.pool[self._inherit_fields[order_field][3]]
order_column = parent_obj._columns[order_field]
if order_column._classic_read:
inner_clause = self._inherits_join_calc(order_field, query)
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
where_str = where_clause and (" WHERE %s" % where_clause) or ''
+ query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
if count:
- cr.execute('SELECT count("%s".id) FROM ' % self._table + from_clause + where_str + limit_str + offset_str, where_clause_params)
- res = cr.fetchall()
- return res[0][0]
- cr.execute('SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str, where_clause_params)
+ # /!\ the main query must be executed as a subquery, otherwise
+ # offset and limit apply to the result of count()!
+ cr.execute('SELECT count(*) FROM (%s) AS count' % query_str, where_clause_params)
+ res = cr.fetchone()
+ return res[0]
+
+ cr.execute(query_str, where_clause_params)
res = cr.fetchall()
# TDE note: with auto_join, we could have several lines about the same result
if not args:
args = []
if field in self._inherit_fields:
- return self.pool.get(self._inherit_fields[field][0]).distinct_field_get(cr, uid, field, value, args, offset, limit)
+ return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
else:
return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
if field_to_other in default:
# all the fields of 'other' are given by the record: default[field_to_other],
# except the ones redefined in self
- blacklist.update(set(self.pool.get(other)._all_columns) - set(self._columns))
+ blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
else:
- blacklist_given_fields(self.pool.get(other))
+ blacklist_given_fields(self.pool[other])
blacklist_given_fields(self)
res = dict(default)
elif field._type == 'many2one':
res[f] = data[f] and data[f][0]
elif field._type == 'one2many':
- other = self.pool.get(field._obj)
+ other = self.pool[field._obj]
# duplicate following the order of the ids because we'll rely on
# it later for copying translations in copy_translation()!
lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
for field_name, field_def in fields.items():
# we must recursively copy the translations for o2o and o2m
if field_def['type'] == 'one2many':
- target_obj = self.pool.get(field_def['relation'])
+ target_obj = self.pool[field_def['relation']]
old_record, new_record = self.read(cr, uid, [old_id, new_id], [field_name], context=context)
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
get_xml_id = get_external_id
_get_xml_ids = _get_external_ids
+ def print_report(self, cr, uid, ids, name, data, context=None):
+ """
+ Render the report `name` for the given IDs. The report must be defined
+ for this model, not another.
+ """
+ report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
+ assert self._name == report.table
+ return report.create(cr, uid, ids, data, context)
+
# Transience
def is_transient(self):
""" Return whether the model is transient.
result, record_ids = [], list(command[2])
# read the records and apply the updates
- other_model = self.pool.get(self._all_columns[field_name].column._obj)
+ other_model = self.pool[self._all_columns[field_name].column._obj]
for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
record.update(updates.get(record['id'], {}))
result.append(record)
""" stuff to do right after the registry is built """
pass
+ def __getattr__(self, name):
+ if name.startswith('signal_'):
+ signal_name = name[len('signal_'):]
+ assert signal_name
+ return (lambda *args, **kwargs:
+ self.signal_workflow(*args, signal=signal_name, **kwargs))
+ get = getattr(super(BaseModel, self), '__getattr__', None)
+ if get is not None: return get(name)
+ raise AttributeError(
+ "'%s' object has no attribute '%s'" % (type(self).__name__, name))
+
# keep this import here, at top it will cause dependency cycle errors
import expression
message = _(u"Missing required value for the field '%s'.") % field_name
field = fields.get(field_name)
if field:
- message = _(u"%s This might be '%s' in the current model, or a field "
- u"of the same name in an o2m.") % (message, field['string'])
+ message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
return {
'message': message,
'field': field_name,