1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
51 from collections import defaultdict, MutableMapping
52 from inspect import getmembers
55 import dateutil.relativedelta
57 from lxml import etree
60 from . import SUPERUSER_ID
63 from .api import Environment
64 from .exceptions import except_orm, AccessError, MissingError, ValidationError
65 from .osv import fields
66 from .osv.query import Query
67 from .tools import lazy_property, ormcache
68 from .tools.config import config
69 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
70 from .tools.safe_eval import safe_eval as eval
71 from .tools.translate import _
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
77 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
78 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
80 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def check_object_name(name):
84 """ Check if the given name is a valid openerp object name.
86 The _name attribute in osv and osv_memory object is subject to
87 some restrictions. This function returns True or False whether
88 the given name is allowed or not.
90 TODO: this is an approximation. The goal in this approximation
91 is to disallow uppercase characters (in some places, we quote
92 table/column names and in other not, which leads to this kind
95 psycopg2.ProgrammingError: relation "xxx" does not exist).
97 The same restriction should apply to both osv and osv_memory
98 objects for consistency.
101 if regex_object_name.match(name) is None:
105 def raise_on_invalid_object_name(name):
106 if not check_object_name(name):
107 msg = "The _name attribute %s is not valid." % name
109 raise except_orm('ValueError', msg)
111 POSTGRES_CONFDELTYPES = {
119 def intersect(la, lb):
120 return filter(lambda x: x in lb, la)
123 """ Test whether functions `f` and `g` are identical or have the same name """
124 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
126 def fix_import_export_id_paths(fieldname):
128 Fixes the id fields in import and exports, and splits field paths
131 :param str fieldname: name of the field to import/export
132 :return: split field name
135 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
136 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
137 return fixed_external_id.split('/')
139 def pg_varchar(size=0):
140 """ Returns the VARCHAR declaration for the provided size:
142 * If no size (or an empty or negative size is provided) return an
144 * Otherwise return a VARCHAR(n)
146 :type int size: varchar size, optional
150 if not isinstance(size, int):
151 raise TypeError("VARCHAR parameter should be an int, got %s"
154 return 'VARCHAR(%d)' % size
157 FIELDS_TO_PGTYPES = {
158 fields.boolean: 'bool',
159 fields.integer: 'int4',
163 fields.datetime: 'timestamp',
164 fields.binary: 'bytea',
165 fields.many2one: 'int4',
166 fields.serialized: 'text',
169 def get_pg_type(f, type_override=None):
171 :param fields._column f: field to get a Postgres type for
172 :param type type_override: use the provided type for dispatching instead of the field's own type
173 :returns: (postgres_identification_type, postgres_type_specification)
176 field_type = type_override or type(f)
178 if field_type in FIELDS_TO_PGTYPES:
179 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
180 elif issubclass(field_type, fields.float):
181 # Explicit support for "falsy" digits (0, False) to indicate a
182 # NUMERIC field with no fixed precision. The values will be saved
183 # in the database with all significant digits.
184 # FLOAT8 type is still the default when there is no precision because
185 # it is faster for most operations (sums, etc.)
186 if f.digits is not None:
187 pg_type = ('numeric', 'NUMERIC')
189 pg_type = ('float8', 'DOUBLE PRECISION')
190 elif issubclass(field_type, (fields.char, fields.reference)):
191 pg_type = ('varchar', pg_varchar(f.size))
192 elif issubclass(field_type, fields.selection):
193 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
194 or getattr(f, 'size', None) == -1:
195 pg_type = ('int4', 'INTEGER')
197 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
198 elif issubclass(field_type, fields.function):
199 if f._type == 'selection':
200 pg_type = ('varchar', pg_varchar())
202 pg_type = get_pg_type(f, getattr(fields, f._type))
204 _logger.warning('%s type not supported!', field_type)
210 class MetaModel(api.Meta):
211 """ Metaclass for the models.
213 This class is used as the metaclass for the class :class:`BaseModel` to
214 discover the models defined in a module (without instanciating them).
215 If the automatic discovery is not needed, it is possible to set the model's
216 ``_register`` attribute to False.
220 module_to_models = {}
222 def __init__(self, name, bases, attrs):
223 if not self._register:
224 self._register = True
225 super(MetaModel, self).__init__(name, bases, attrs)
228 if not hasattr(self, '_module'):
229 # The (OpenERP) module name can be in the `openerp.addons` namespace
230 # or not. For instance, module `sale` can be imported as
231 # `openerp.addons.sale` (the right way) or `sale` (for backward
233 module_parts = self.__module__.split('.')
234 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
235 module_name = self.__module__.split('.')[2]
237 module_name = self.__module__.split('.')[0]
238 self._module = module_name
240 # Remember which models to instanciate for this module.
242 self.module_to_models.setdefault(self._module, []).append(self)
244 # transform columns into new-style fields (enables field inheritance)
245 for name, column in self._columns.iteritems():
246 if name in self.__dict__:
247 _logger.warning("In class %s, field %r overriding an existing value", self, name)
248 setattr(self, name, column.to_field())
252 """ Pseudo-ids for new records. """
253 def __nonzero__(self):
256 IdType = (int, long, basestring, NewId)
259 # maximum number of prefetched records
262 # special columns automatically created by the ORM
263 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
264 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
266 class BaseModel(object):
267 """ Base class for OpenERP models.
269 OpenERP models are created by inheriting from this class' subclasses:
271 * :class:`Model` for regular database-persisted models
273 * :class:`TransientModel` for temporary data, stored in the database but
274 automatically vaccuumed every so often
276 * :class:`AbstractModel` for abstract super classes meant to be shared by
277 multiple inheriting model
279 The system automatically instantiates every model once per database. Those
280 instances represent the available models on each database, and depend on
281 which modules are installed on that database. The actual class of each
282 instance is built from the Python classes that create and inherit from the
285 Every model instance is a "recordset", i.e., an ordered collection of
286 records of the model. Recordsets are returned by methods like
287 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
288 explicit representation: a record is represented as a recordset of one
291 To create a class that should not be instantiated, the _register class
292 attribute may be set to False.
294 __metaclass__ = MetaModel
295 _auto = True # create database backend
296 _register = False # Set to false if the model shouldn't be automatically discovered.
303 _parent_name = 'parent_id'
304 _parent_store = False
305 _parent_order = False
311 _translate = True # set to False to disable translations export for this model
313 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
314 # to include in the _read_group, if grouped on this field
318 _transient = False # True in a TransientModel
321 # { 'parent_model': 'm2o_field', ... }
324 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
325 # model from which it is inherits'd, r is the (local) field towards m, f
326 # is the _column object itself, and n is the original (i.e. top-most)
329 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
330 # field_column_obj, origina_parent_model), ... }
333 # Mapping field name/column_info object
334 # This is similar to _inherit_fields but:
335 # 1. includes self fields,
336 # 2. uses column_info instead of a triple.
337 # Warning: _all_columns is deprecated, use _fields instead
342 _sql_constraints = []
344 # model dependencies, for models backed up by sql views:
345 # {model_name: field_names, ...}
348 CONCURRENCY_CHECK_FIELD = '__last_update'
350 def log(self, cr, uid, id, message, secondary=False, context=None):
351 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
353 def view_init(self, cr, uid, fields_list, context=None):
354 """Override this method to do specific things when a view on the object is opened."""
357 def _field_create(self, cr, context=None):
358 """ Create entries in ir_model_fields for all the model's fields.
360 If necessary, also create an entry in ir_model, and if called from the
361 modules loading scheme (by receiving 'module' in the context), also
362 create entries in ir_model_data (for the model and the fields).
364 - create an entry in ir_model (if there is not already one),
365 - create an entry in ir_model_data (if there is not already one, and if
366 'module' is in the context),
367 - update ir_model_fields with the fields found in _columns
368 (TODO there is some redundancy as _columns is updated from
369 ir_model_fields in __init__).
374 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
376 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
377 model_id = cr.fetchone()[0]
378 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
380 model_id = cr.fetchone()[0]
381 if 'module' in context:
382 name_id = 'model_'+self._name.replace('.', '_')
383 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
385 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
386 (name_id, context['module'], 'ir.model', model_id)
389 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
391 for rec in cr.dictfetchall():
392 cols[rec['name']] = rec
394 ir_model_fields_obj = self.pool.get('ir.model.fields')
396 # sparse field should be created at the end, as it depends on its serialized field already existing
397 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
398 for (k, f) in model_fields:
400 'model_id': model_id,
403 'field_description': f.string,
405 'relation': f._obj or '',
406 'select_level': tools.ustr(int(f.select)),
407 'readonly': (f.readonly and 1) or 0,
408 'required': (f.required and 1) or 0,
409 'selectable': (f.selectable and 1) or 0,
410 'translate': (f.translate and 1) or 0,
411 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
412 'serialization_field_id': None,
414 if getattr(f, 'serialization_field', None):
415 # resolve link to serialization_field if specified by name
416 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
417 if not serialization_field_id:
418 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
419 vals['serialization_field_id'] = serialization_field_id[0]
421 # When its a custom field,it does not contain f.select
422 if context.get('field_state', 'base') == 'manual':
423 if context.get('field_name', '') == k:
424 vals['select_level'] = context.get('select', '0')
425 #setting value to let the problem NOT occur next time
427 vals['select_level'] = cols[k]['select_level']
430 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
431 id = cr.fetchone()[0]
433 cr.execute("""INSERT INTO ir_model_fields (
434 id, model_id, model, name, field_description, ttype,
435 relation,state,select_level,relation_field, translate, serialization_field_id
437 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
439 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
440 vals['relation'], 'base',
441 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
443 if 'module' in context:
444 name1 = 'field_' + self._table + '_' + k
445 cr.execute("select name from ir_model_data where name=%s", (name1,))
447 name1 = name1 + "_" + str(id)
448 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
449 (name1, context['module'], 'ir.model.fields', id)
452 for key, val in vals.items():
453 if cols[k][key] != vals[key]:
454 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
455 cr.execute("""UPDATE ir_model_fields SET
456 model_id=%s, field_description=%s, ttype=%s, relation=%s,
457 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
459 model=%s AND name=%s""", (
460 vals['model_id'], vals['field_description'], vals['ttype'],
462 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
465 self.invalidate_cache(cr, SUPERUSER_ID)
468 def _add_field(cls, name, field):
469 """ Add the given `field` under the given `name` in the class """
470 # add field as an attribute and in cls._fields (for reflection)
471 if not isinstance(getattr(cls, name, field), Field):
472 _logger.warning("In model %r, field %r overriding existing value", cls._name, name)
473 setattr(cls, name, field)
474 cls._fields[name] = field
476 # basic setup of field
477 field.set_class_name(cls, name)
479 if field.store or field.column:
480 cls._columns[name] = field.to_column()
482 # remove potential column that may be overridden by field
483 cls._columns.pop(name, None)
486 def _pop_field(cls, name):
487 """ Remove the field with the given `name` from the model.
488 This method should only be used for manual fields.
490 field = cls._fields.pop(name)
491 cls._columns.pop(name, None)
492 cls._all_columns.pop(name, None)
493 if hasattr(cls, name):
498 def _add_magic_fields(cls):
499 """ Introduce magic fields on the current class
501 * id is a "normal" field (with a specific getter)
502 * create_uid, create_date, write_uid and write_date have become
504 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
505 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
506 to get the same structure as the previous
507 ``(now() at time zone 'UTC')::timestamp``::
509 # select (now() at time zone 'UTC')::timestamp;
511 ----------------------------
512 2013-06-18 08:30:37.292809
514 >>> str(datetime.datetime.utcnow())
515 '2013-06-18 08:31:32.821177'
517 def add(name, field):
518 """ add `field` with the given `name` if it does not exist yet """
519 if name not in cls._columns and name not in cls._fields:
520 cls._add_field(name, field)
525 # this field 'id' must override any other column or field
526 cls._add_field('id', fields.Id(automatic=True))
528 add('display_name', fields.Char(string='Display Name', automatic=True,
529 compute='_compute_display_name'))
532 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
533 add('create_date', fields.Datetime(string='Created on', automatic=True))
534 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
535 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
536 last_modified_name = 'compute_concurrency_field_with_access'
538 last_modified_name = 'compute_concurrency_field'
540 # this field must override any other column or field
541 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
542 string='Last Modified on', compute=last_modified_name, automatic=True))
545 def compute_concurrency_field(self):
546 self[self.CONCURRENCY_CHECK_FIELD] = \
547 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
550 @api.depends('create_date', 'write_date')
551 def compute_concurrency_field_with_access(self):
552 self[self.CONCURRENCY_CHECK_FIELD] = \
553 self.write_date or self.create_date or \
554 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
557 # Goal: try to apply inheritance at the instanciation level and
558 # put objects in the pool var
561 def _build_model(cls, pool, cr):
562 """ Instanciate a given model.
564 This class method instanciates the class of some model (i.e. a class
565 deriving from osv or osv_memory). The class might be the class passed
566 in argument or, if it inherits from another class, a class constructed
567 by combining the two classes.
571 # IMPORTANT: the registry contains an instance for each model. The class
572 # of each model carries inferred metadata that is shared among the
573 # model's instances for this registry, but not among registries. Hence
574 # we cannot use that "registry class" for combining model classes by
575 # inheritance, since it confuses the metadata inference process.
577 # Keep links to non-inherited constraints in cls; this is useful for
578 # instance when exporting translations
579 cls._local_constraints = cls.__dict__.get('_constraints', [])
580 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
582 # determine inherited models
583 parents = getattr(cls, '_inherit', [])
584 parents = [parents] if isinstance(parents, basestring) else (parents or [])
586 # determine the model's name
587 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
589 # determine the module that introduced the model
590 original_module = pool[name]._original_module if name in parents else cls._module
592 # build the class hierarchy for the model
593 for parent in parents:
594 if parent not in pool:
595 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
596 'You may need to add a dependency on the parent class\' module.' % (name, parent))
597 parent_model = pool[parent]
599 # do no use the class of parent_model, since that class contains
600 # inferred metadata; use its ancestor instead
601 parent_class = type(parent_model).__base__
603 # don't inherit custom fields
604 columns = dict((key, val)
605 for key, val in parent_class._columns.iteritems()
608 columns.update(cls._columns)
610 inherits = dict(parent_class._inherits)
611 inherits.update(cls._inherits)
613 depends = dict(parent_class._depends)
614 for m, fs in cls._depends.iteritems():
615 depends[m] = depends.get(m, []) + fs
617 old_constraints = parent_class._constraints
618 new_constraints = cls._constraints
619 # filter out from old_constraints the ones overridden by a
620 # constraint with the same function name in new_constraints
621 constraints = new_constraints + [oldc
622 for oldc in old_constraints
623 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
624 for newc in new_constraints)
627 sql_constraints = cls._sql_constraints + \
628 parent_class._sql_constraints
634 '_inherits': inherits,
636 '_constraints': constraints,
637 '_sql_constraints': sql_constraints,
639 cls = type(name, (cls, parent_class), attrs)
641 # introduce the "registry class" of the model;
642 # duplicate some attributes so that the ORM can modify them
646 '_columns': dict(cls._columns),
647 '_defaults': {}, # filled by Field._determine_default()
648 '_inherits': dict(cls._inherits),
649 '_depends': dict(cls._depends),
650 '_constraints': list(cls._constraints),
651 '_sql_constraints': list(cls._sql_constraints),
652 '_original_module': original_module,
654 cls = type(cls._name, (cls,), attrs)
656 # instantiate the model, and initialize it
657 model = object.__new__(cls)
658 model.__init__(pool, cr)
662 def _init_function_fields(cls, pool, cr):
663 # initialize the list of non-stored function fields for this model
664 pool._pure_function_fields[cls._name] = []
666 # process store of low-level function fields
667 for fname, column in cls._columns.iteritems():
668 if hasattr(column, 'digits_change'):
669 column.digits_change(cr)
670 # filter out existing store about this field
671 pool._store_function[cls._name] = [
673 for stored in pool._store_function.get(cls._name, [])
674 if (stored[0], stored[1]) != (cls._name, fname)
676 if not isinstance(column, fields.function):
679 # register it on the pool for invalidation
680 pool._pure_function_fields[cls._name].append(fname)
682 # process store parameter
685 get_ids = lambda self, cr, uid, ids, c={}: ids
686 store = {cls._name: (get_ids, None, column.priority, None)}
687 for model, spec in store.iteritems():
689 (fnct, fields2, order, length) = spec
691 (fnct, fields2, order) = spec
694 raise except_orm('Error',
695 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
696 pool._store_function.setdefault(model, [])
697 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
698 if t not in pool._store_function[model]:
699 pool._store_function[model].append(t)
700 pool._store_function[model].sort(key=lambda x: x[4])
703 def _init_manual_fields(cls, pool, cr):
704 # Check whether the query is already done
705 if pool.fields_by_model is not None:
706 manual_fields = pool.fields_by_model.get(cls._name, [])
708 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
709 manual_fields = cr.dictfetchall()
711 for field in manual_fields:
712 if field['name'] in cls._columns:
715 'string': field['field_description'],
716 'required': bool(field['required']),
717 'readonly': bool(field['readonly']),
718 'domain': eval(field['domain']) if field['domain'] else None,
719 'size': field['size'] or None,
720 'ondelete': field['on_delete'],
721 'translate': (field['translate']),
724 #'select': int(field['select_level'])
726 if field['serialization_field_id']:
727 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
728 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
729 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
730 attrs.update({'relation': field['relation']})
731 cls._columns[field['name']] = fields.sparse(**attrs)
732 elif field['ttype'] == 'selection':
733 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
734 elif field['ttype'] == 'reference':
735 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
736 elif field['ttype'] == 'many2one':
737 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
738 elif field['ttype'] == 'one2many':
739 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
740 elif field['ttype'] == 'many2many':
741 _rel1 = field['relation'].replace('.', '_')
742 _rel2 = field['model'].replace('.', '_')
743 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
744 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
746 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
749 def _init_constraints_onchanges(cls):
750 # store sql constraint error messages
751 for (key, _, msg) in cls._sql_constraints:
752 cls.pool._sql_error[cls._table + '_' + key] = msg
754 # collect constraint and onchange methods
755 cls._constraint_methods = []
756 cls._onchange_methods = defaultdict(list)
757 for attr, func in getmembers(cls, callable):
758 if hasattr(func, '_constrains'):
759 if not all(name in cls._fields for name in func._constrains):
760 _logger.warning("@constrains%r parameters must be field names", func._constrains)
761 cls._constraint_methods.append(func)
762 if hasattr(func, '_onchange'):
763 if not all(name in cls._fields for name in func._onchange):
764 _logger.warning("@onchange%r parameters must be field names", func._onchange)
765 for name in func._onchange:
766 cls._onchange_methods[name].append(func)
769 # In the past, this method was registering the model class in the server.
770 # This job is now done entirely by the metaclass MetaModel.
772 # Do not create an instance here. Model instances are created by method
776 def __init__(self, pool, cr):
777 """ Initialize a model and make it part of the given registry.
779 - copy the stored fields' functions in the registry,
780 - retrieve custom fields and add them in the model,
781 - ensure there is a many2one for each _inherits'd parent,
782 - update the children's _columns,
783 - give a chance to each field to initialize itself.
788 # link the class to the registry, and update the registry
790 cls._model = self # backward compatibility
791 pool.add(cls._name, self)
793 # determine description, table, sequence and log_access
794 if not cls._description:
795 cls._description = cls._name
797 cls._table = cls._name.replace('.', '_')
798 if not cls._sequence:
799 cls._sequence = cls._table + '_id_seq'
800 if not hasattr(cls, '_log_access'):
801 # If _log_access is not specified, it is the same value as _auto.
802 cls._log_access = cls._auto
805 if cls.is_transient():
806 cls._transient_check_count = 0
807 cls._transient_max_count = config.get('osv_memory_count_limit')
808 cls._transient_max_hours = config.get('osv_memory_age_limit')
809 assert cls._log_access, \
810 "TransientModels must have log_access turned on, " \
811 "in order to implement their access rights policy"
813 # retrieve new-style fields (from above registry class) and duplicate
814 # them (to avoid clashes with inheritance between different models)
816 above = cls.__bases__[0]
817 for attr, field in getmembers(above, Field.__instancecheck__):
818 if not field.inherited:
819 cls._add_field(attr, field.new())
821 # introduce magic fields
822 cls._add_magic_fields()
824 # register stuff about low-level function fields and custom fields
825 cls._init_function_fields(pool, cr)
826 cls._init_manual_fields(pool, cr)
829 cls._inherits_check()
830 cls._inherits_reload()
832 # register constraints and onchange methods
833 cls._init_constraints_onchanges()
836 for k in cls._defaults:
837 assert k in cls._fields, \
838 "Model %s has a default for nonexiting field %s" % (cls._name, k)
841 for column in cls._columns.itervalues():
846 assert cls._rec_name in cls._fields, \
847 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
848 elif 'name' in cls._fields:
849 cls._rec_name = 'name'
851 # prepare ormcache, which must be shared by all instances of the model
856 def _is_an_ordinary_table(self):
857 self.env.cr.execute("""\
861 AND relkind = %s""", [self._table, 'r'])
862 return bool(self.env.cr.fetchone())
864 def __export_xml_id(self):
865 """ Return a valid xml_id for the record `self`. """
866 if not self._is_an_ordinary_table():
868 "You can not export the column ID of model %s, because the "
869 "table %s is not an ordinary table."
870 % (self._name, self._table))
871 ir_model_data = self.sudo().env['ir.model.data']
872 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
875 return '%s.%s' % (data[0].module, data[0].name)
880 name = '%s_%s' % (self._table, self.id)
881 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
883 name = '%s_%s_%s' % (self._table, self.id, postfix)
884 ir_model_data.create({
887 'module': '__export__',
890 return '__export__.' + name
893 def __export_rows(self, fields):
894 """ Export fields of the records in `self`.
896 :param fields: list of lists of fields to traverse
897 :return: list of lists of corresponding values
901 # main line of record, initially empty
902 current = [''] * len(fields)
903 lines.append(current)
905 # list of primary fields followed by secondary field(s)
908 # process column by column
909 for i, path in enumerate(fields):
914 if name in primary_done:
918 current[i] = str(record.id)
920 current[i] = record.__export_xml_id()
922 field = record._fields[name]
925 # this part could be simpler, but it has to be done this way
926 # in order to reproduce the former behavior
927 if not isinstance(value, BaseModel):
928 current[i] = field.convert_to_export(value, self.env)
930 primary_done.append(name)
932 # This is a special case, its strange behavior is intended!
933 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
934 xml_ids = [r.__export_xml_id() for r in value]
935 current[i] = ','.join(xml_ids) or False
938 # recursively export the fields that follow name
939 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
940 lines2 = value.__export_rows(fields2)
942 # merge first line with record's main line
943 for j, val in enumerate(lines2[0]):
946 # check value of current field
948 # assign xml_ids, and forget about remaining lines
949 xml_ids = [item[1] for item in value.name_get()]
950 current[i] = ','.join(xml_ids)
952 # append the other lines at the end
960 def export_data(self, fields_to_export, raw_data=False):
961 """ Export fields for selected objects
963 :param fields_to_export: list of fields
964 :param raw_data: True to return value in native Python type
965 :rtype: dictionary with a *datas* matrix
967 This method is used when exporting data via client menu
969 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
971 self = self.with_context(export_raw_data=True)
972 return {'datas': self.__export_rows(fields_to_export)}
974 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
977 Use :meth:`~load` instead
979 Import given data in given module
981 This method is used when importing data via client menu.
983 Example of fields to import for a sale.order::
986 partner_id, (=name_search)
987 order_line/.id, (=database_id)
989 order_line/product_id/id, (=xml id)
990 order_line/price_unit,
991 order_line/product_uom_qty,
992 order_line/product_uom/id (=xml_id)
994 This method returns a 4-tuple with the following structure::
996 (return_code, errored_resource, error_message, unused)
998 * The first item is a return code, it is ``-1`` in case of
999 import error, or the last imported row number in case of success
1000 * The second item contains the record data dict that failed to import
1001 in case of error, otherwise it's 0
1002 * The third item contains an error message string in case of error,
1004 * The last item is currently unused, with no specific semantics
1006 :param fields: list of fields to import
1007 :param datas: data to import
1008 :param mode: 'init' or 'update' for record creation
1009 :param current_module: module name
1010 :param noupdate: flag for record creation
1011 :param filename: optional file to store partial import state for recovery
1012 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1013 :rtype: (int, dict or 0, str or 0, str or 0)
1015 context = dict(context) if context is not None else {}
1016 context['_import_current_module'] = current_module
1018 fields = map(fix_import_export_id_paths, fields)
1019 ir_model_data_obj = self.pool.get('ir.model.data')
1022 if m['type'] == 'error':
1023 raise Exception(m['message'])
1025 if config.get('import_partial') and filename:
1026 with open(config.get('import_partial'), 'rb') as partial_import_file:
1027 data = pickle.load(partial_import_file)
1028 position = data.get(filename, 0)
1032 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1033 self._extract_records(cr, uid, fields, datas,
1034 context=context, log=log),
1035 context=context, log=log):
1036 ir_model_data_obj._update(cr, uid, self._name,
1037 current_module, res, mode=mode, xml_id=xml_id,
1038 noupdate=noupdate, res_id=res_id, context=context)
1039 position = info.get('rows', {}).get('to', 0) + 1
1040 if config.get('import_partial') and filename and (not (position%100)):
1041 with open(config.get('import_partial'), 'rb') as partial_import:
1042 data = pickle.load(partial_import)
1043 data[filename] = position
1044 with open(config.get('import_partial'), 'wb') as partial_import:
1045 pickle.dump(data, partial_import)
1046 if context.get('defer_parent_store_computation'):
1047 self._parent_store_compute(cr)
1049 except Exception, e:
1051 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1053 if context.get('defer_parent_store_computation'):
1054 self._parent_store_compute(cr)
1055 return position, 0, 0, 0
1057 def load(self, cr, uid, fields, data, context=None):
1059 Attempts to load the data matrix, and returns a list of ids (or
1060 ``False`` if there was an error and no id could be generated) and a
1063 The ids are those of the records created and saved (in database), in
1064 the same order they were extracted from the file. They can be passed
1065 directly to :meth:`~read`
1067 :param fields: list of fields to import, at the same index as the corresponding data
1068 :type fields: list(str)
1069 :param data: row-major matrix of data to import
1070 :type data: list(list(str))
1071 :param dict context:
1072 :returns: {ids: list(int)|False, messages: [Message]}
1074 cr.execute('SAVEPOINT model_load')
1077 fields = map(fix_import_export_id_paths, fields)
1078 ModelData = self.pool['ir.model.data'].clear_caches()
1080 fg = self.fields_get(cr, uid, context=context)
1087 for id, xid, record, info in self._convert_records(cr, uid,
1088 self._extract_records(cr, uid, fields, data,
1089 context=context, log=messages.append),
1090 context=context, log=messages.append):
1092 cr.execute('SAVEPOINT model_load_save')
1093 except psycopg2.InternalError, e:
1094 # broken transaction, exit and hope the source error was
1096 if not any(message['type'] == 'error' for message in messages):
1097 messages.append(dict(info, type='error',message=
1098 u"Unknown database error: '%s'" % e))
1101 ids.append(ModelData._update(cr, uid, self._name,
1102 current_module, record, mode=mode, xml_id=xid,
1103 noupdate=noupdate, res_id=id, context=context))
1104 cr.execute('RELEASE SAVEPOINT model_load_save')
1105 except psycopg2.Warning, e:
1106 messages.append(dict(info, type='warning', message=str(e)))
1107 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1108 except psycopg2.Error, e:
1109 messages.append(dict(
1111 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1112 # Failed to write, log to messages, rollback savepoint (to
1113 # avoid broken transaction) and keep going
1114 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1115 except Exception, e:
1116 message = (_('Unknown error during import:') +
1117 ' %s: %s' % (type(e), unicode(e)))
1118 moreinfo = _('Resolve other errors first')
1119 messages.append(dict(info, type='error',
1122 # Failed for some reason, perhaps due to invalid data supplied,
1123 # rollback savepoint and keep going
1124 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1125 if any(message['type'] == 'error' for message in messages):
1126 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1128 return {'ids': ids, 'messages': messages}
1130 def _extract_records(self, cr, uid, fields_, data,
1131 context=None, log=lambda a: None):
1132 """ Generates record dicts from the data sequence.
1134 The result is a generator of dicts mapping field names to raw
1135 (unconverted, unvalidated) values.
1137 For relational fields, if sub-fields were provided the value will be
1138 a list of sub-records
1140 The following sub-fields may be set on the record (by key):
1141 * None is the name_get for the record (to use with name_create/name_search)
1142 * "id" is the External ID for the record
1143 * ".id" is the Database ID for the record
1145 from openerp.fields import Char, Integer
1146 fields = dict(self._fields)
1147 # Fake fields to avoid special cases in extractor
1148 fields[None] = Char('rec_name')
1149 fields['id'] = Char('External ID')
1150 fields['.id'] = Integer('Database ID')
1152 # m2o fields can't be on multiple lines so exclude them from the
1153 # is_relational field rows filter, but special-case it later on to
1154 # be handled with relational fields (as it can have subfields)
1155 is_relational = lambda field: fields[field].relational
1156 get_o2m_values = itemgetter_tuple(
1157 [index for index, field in enumerate(fields_)
1158 if fields[field[0]].type == 'one2many'])
1159 get_nono2m_values = itemgetter_tuple(
1160 [index for index, field in enumerate(fields_)
1161 if fields[field[0]].type != 'one2many'])
1162 # Checks if the provided row has any non-empty non-relational field
1163 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1164 return any(g(row)) and not any(f(row))
1168 if index >= len(data): return
1171 # copy non-relational fields to record dict
1172 record = dict((field[0], value)
1173 for field, value in itertools.izip(fields_, row)
1174 if not is_relational(field[0]))
1176 # Get all following rows which have relational values attached to
1177 # the current record (no non-relational values)
1178 record_span = itertools.takewhile(
1179 only_o2m_values, itertools.islice(data, index + 1, None))
1180 # stitch record row back on for relational fields
1181 record_span = list(itertools.chain([row], record_span))
1182 for relfield in set(
1183 field[0] for field in fields_
1184 if is_relational(field[0])):
1185 # FIXME: how to not use _obj without relying on fields_get?
1186 Model = self.pool[fields[relfield].comodel_name]
1188 # get only cells for this sub-field, should be strictly
1189 # non-empty, field path [None] is for name_get field
1190 indices, subfields = zip(*((index, field[1:] or [None])
1191 for index, field in enumerate(fields_)
1192 if field[0] == relfield))
1194 # return all rows which have at least one value for the
1195 # subfields of relfield
1196 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1197 record[relfield] = [subrecord
1198 for subrecord, _subinfo in Model._extract_records(
1199 cr, uid, subfields, relfield_data,
1200 context=context, log=log)]
1202 yield record, {'rows': {
1204 'to': index + len(record_span) - 1
1206 index += len(record_span)
1208 def _convert_records(self, cr, uid, records,
1209 context=None, log=lambda a: None):
1210 """ Converts records from the source iterable (recursive dicts of
1211 strings) into forms which can be written to the database (via
1212 self.create or (ir.model.data)._update)
1214 :returns: a list of triplets of (id, xid, record)
1215 :rtype: list((int|None, str|None, dict))
1217 if context is None: context = {}
1218 Converter = self.pool['ir.fields.converter']
1219 Translation = self.pool['ir.translation']
1220 fields = dict(self._fields)
1222 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1223 context.get('lang'))
1225 for f, field in fields.iteritems())
1227 convert = Converter.for_model(cr, uid, self, context=context)
1229 def _log(base, field, exception):
1230 type = 'warning' if isinstance(exception, Warning) else 'error'
1231 # logs the logical (not human-readable) field name for automated
1232 # processing of response, but injects human readable in message
1233 record = dict(base, type=type, field=field,
1234 message=unicode(exception.args[0]) % base)
1235 if len(exception.args) > 1 and exception.args[1]:
1236 record.update(exception.args[1])
1239 stream = CountingStream(records)
1240 for record, extras in stream:
1243 # name_get/name_create
1244 if None in record: pass
1251 dbid = int(record['.id'])
1253 # in case of overridden id column
1254 dbid = record['.id']
1255 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1258 record=stream.index,
1260 message=_(u"Unknown database identifier '%s'") % dbid))
1263 converted = convert(record, lambda field, err:\
1264 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1266 yield dbid, xid, converted, dict(extras, record=stream.index)
1269 def _validate_fields(self, field_names):
1270 field_names = set(field_names)
1272 # old-style constraint methods
1273 trans = self.env['ir.translation']
1274 cr, uid, context = self.env.args
1277 for fun, msg, names in self._constraints:
1279 # validation must be context-independent; call `fun` without context
1280 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1282 except Exception, e:
1283 _logger.debug('Exception while validating constraint', exc_info=True)
1285 extra_error = tools.ustr(e)
1288 res_msg = msg(self._model, cr, uid, ids, context=context)
1289 if isinstance(res_msg, tuple):
1290 template, params = res_msg
1291 res_msg = template % params
1293 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1295 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1297 _("Field(s) `%s` failed against a constraint: %s") %
1298 (', '.join(names), res_msg)
1301 raise ValidationError('\n'.join(errors))
1303 # new-style constraint methods
1304 for check in self._constraint_methods:
1305 if set(check._constrains) & field_names:
1308 except ValidationError, e:
1310 except Exception, e:
1311 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1314 def default_get(self, fields_list):
1315 """ default_get(fields) -> default_values
1317 Return default values for the fields in `fields_list`. Default
1318 values are determined by the context, user defaults, and the model
1321 :param fields_list: a list of field names
1322 :return: a dictionary mapping each field name to its corresponding
1323 default value, if it has one.
1326 # trigger view init hook
1327 self.view_init(fields_list)
1330 parent_fields = defaultdict(list)
1332 for name in fields_list:
1333 # 1. look up context
1334 key = 'default_' + name
1335 if key in self._context:
1336 defaults[name] = self._context[key]
1339 # 2. look up ir_values
1340 # Note: performance is good, because get_defaults_dict is cached!
1341 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1342 if name in ir_values_dict:
1343 defaults[name] = ir_values_dict[name]
1346 field = self._fields.get(name)
1348 # 3. look up property fields
1349 # TODO: get rid of this one
1350 if field and field.company_dependent:
1351 defaults[name] = self.env['ir.property'].get(name, self._name)
1354 # 4. look up field.default
1355 if field and field.default:
1356 defaults[name] = field.default(self)
1359 # 5. delegate to parent model
1360 if field and field.inherited:
1361 field = field.related_field
1362 parent_fields[field.model_name].append(field.name)
1364 # convert default values to the right format
1365 defaults = self._convert_to_cache(defaults, validate=False)
1366 defaults = self._convert_to_write(defaults)
1368 # add default values for inherited fields
1369 for model, names in parent_fields.iteritems():
1370 defaults.update(self.env[model].default_get(names))
1374 def fields_get_keys(self, cr, user, context=None):
1375 res = self._columns.keys()
1376 # TODO I believe this loop can be replace by
1377 # res.extend(self._inherit_fields.key())
1378 for parent in self._inherits:
1379 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1382 def _rec_name_fallback(self, cr, uid, context=None):
1383 rec_name = self._rec_name
1384 if rec_name not in self._columns:
1385 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1389 # Overload this method if you need a window title which depends on the context
1391 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1394 def user_has_groups(self, cr, uid, groups, context=None):
1395 """Return true if the user is at least member of one of the groups
1396 in groups_str. Typically used to resolve `groups` attribute
1397 in view and model definitions.
1399 :param str groups: comma-separated list of fully-qualified group
1400 external IDs, e.g.: ``base.group_user,base.group_system``
1401 :return: True if the current user is a member of one of the
1404 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1405 for group_ext_id in groups.split(','))
1407 def _get_default_form_view(self, cr, user, context=None):
1408 """ Generates a default single-line form view using all fields
1409 of the current model except the m2m and o2m ones.
1411 :param cr: database cursor
1412 :param int user: user id
1413 :param dict context: connection context
1414 :returns: a form view as an lxml document
1415 :rtype: etree._Element
1417 view = etree.Element('form', string=self._description)
1418 group = etree.SubElement(view, 'group', col="4")
1419 for fname, field in self._fields.iteritems():
1420 if field.automatic or field.type in ('one2many', 'many2many'):
1423 etree.SubElement(group, 'field', name=fname)
1424 if field.type == 'text':
1425 etree.SubElement(group, 'newline')
1428 def _get_default_search_view(self, cr, user, context=None):
1429 """ Generates a single-field search view, based on _rec_name.
1431 :param cr: database cursor
1432 :param int user: user id
1433 :param dict context: connection context
1434 :returns: a tree view as an lxml document
1435 :rtype: etree._Element
1437 view = etree.Element('search', string=self._description)
1438 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1441 def _get_default_tree_view(self, cr, user, context=None):
1442 """ Generates a single-field tree view, based on _rec_name.
1444 :param cr: database cursor
1445 :param int user: user id
1446 :param dict context: connection context
1447 :returns: a tree view as an lxml document
1448 :rtype: etree._Element
1450 view = etree.Element('tree', string=self._description)
1451 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1454 def _get_default_calendar_view(self, cr, user, context=None):
1455 """ Generates a default calendar view by trying to infer
1456 calendar fields from a number of pre-set attribute names
1458 :param cr: database cursor
1459 :param int user: user id
1460 :param dict context: connection context
1461 :returns: a calendar view
1462 :rtype: etree._Element
1464 def set_first_of(seq, in_, to):
1465 """Sets the first value of `seq` also found in `in_` to
1466 the `to` attribute of the view being closed over.
1468 Returns whether it's found a suitable value (and set it on
1469 the attribute) or not
1477 view = etree.Element('calendar', string=self._description)
1478 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1480 if self._date_name not in self._columns:
1482 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1483 if dt in self._columns:
1484 self._date_name = dt
1489 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1490 view.set('date_start', self._date_name)
1492 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1493 self._columns, 'color')
1495 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1496 self._columns, 'date_stop'):
1497 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1498 self._columns, 'date_delay'):
1500 _('Invalid Object Architecture!'),
1501 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1505 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1506 """ fields_view_get([view_id | view_type='form'])
1508 Get the detailed composition of the requested view like fields, model, view architecture
1510 :param view_id: id of the view or None
1511 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1512 :param toolbar: true to include contextual actions
1513 :param submenu: deprecated
1514 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1515 :raise AttributeError:
1516 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1517 * if some tag other than 'position' is found in parent view
1518 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1522 View = self.pool['ir.ui.view']
1525 'model': self._name,
1526 'field_parent': False,
1529 # try to find a view_id if none provided
1531 # <view_type>_view_ref in context can be used to overrride the default view
1532 view_ref_key = view_type + '_view_ref'
1533 view_ref = context.get(view_ref_key)
1536 module, view_ref = view_ref.split('.', 1)
1537 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1538 view_ref_res = cr.fetchone()
1540 view_id = view_ref_res[0]
1542 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1543 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1547 # otherwise try to find the lowest priority matching ir.ui.view
1548 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1550 # context for post-processing might be overriden
1553 # read the view with inherited views applied
1554 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1555 result['arch'] = root_view['arch']
1556 result['name'] = root_view['name']
1557 result['type'] = root_view['type']
1558 result['view_id'] = root_view['id']
1559 result['field_parent'] = root_view['field_parent']
1560 # override context fro postprocessing
1561 if root_view.get('model') != self._name:
1562 ctx = dict(context, base_model_name=root_view.get('model'))
1564 # fallback on default views methods if no ir.ui.view could be found
1566 get_func = getattr(self, '_get_default_%s_view' % view_type)
1567 arch_etree = get_func(cr, uid, context)
1568 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1569 result['type'] = view_type
1570 result['name'] = 'default'
1571 except AttributeError:
1572 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1574 # Apply post processing, groups and modifiers etc...
1575 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1576 result['arch'] = xarch
1577 result['fields'] = xfields
1579 # Add related action information if aksed
1581 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1587 ir_values_obj = self.pool.get('ir.values')
1588 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1589 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1590 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1591 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1592 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1593 #When multi="True" set it will display only in More of the list view
1594 resrelate = [clean(action) for action in resrelate
1595 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1597 for x in itertools.chain(resprint, resaction, resrelate):
1598 x['string'] = x['name']
1600 result['toolbar'] = {
1602 'action': resaction,
1607 def get_formview_id(self, cr, uid, id, context=None):
1608 """ Return an view id to open the document with. This method is meant to be
1609 overridden in addons that want to give specific view ids for example.
1611 :param int id: id of the document to open
1615 def get_formview_action(self, cr, uid, id, context=None):
1616 """ Return an action to open the document. This method is meant to be
1617 overridden in addons that want to give specific view ids for example.
1619 :param int id: id of the document to open
1621 view_id = self.get_formview_id(cr, uid, id, context=context)
1623 'type': 'ir.actions.act_window',
1624 'res_model': self._name,
1625 'view_type': 'form',
1626 'view_mode': 'form',
1627 'views': [(view_id, 'form')],
1628 'target': 'current',
1632 def get_access_action(self, cr, uid, id, context=None):
1633 """ Return an action to open the document. This method is meant to be
1634 overridden in addons that want to give specific access to the document.
1635 By default it opens the formview of the document.
1637 :paramt int id: id of the document to open
1639 return self.get_formview_action(cr, uid, id, context=context)
1641 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1642 return self.pool['ir.ui.view'].postprocess_and_fields(
1643 cr, uid, self._name, node, view_id, context=context)
1645 def search_count(self, cr, user, args, context=None):
1646 """ search_count(args) -> int
1648 Returns the number of records in the current model matching :ref:`the
1649 provided domain <reference/orm/domains>`.
1651 res = self.search(cr, user, args, context=context, count=True)
1652 if isinstance(res, list):
1656 @api.returns('self')
1657 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1658 """ search(args[, offset=0][, limit=None][, order=None])
1660 Searches for records based on the ``args``
1661 :ref:`search domain <reference/orm/domains>`.
1663 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1664 list to match all records.
1665 :param int offset: number of results to ignore (default: none)
1666 :param int limit: maximum number of records to return (default: all)
1667 :param str order: sort string
1668 :returns: at most ``limit`` records matching the search criteria
1670 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1672 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1675 # display_name, name_get, name_create, name_search
1678 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1679 def _compute_display_name(self):
1680 names = dict(self.name_get())
1682 record.display_name = names.get(record.id, False)
1686 """ name_get() -> [(id, name), ...]
1688 Returns a textual representation for the records in ``self``.
1689 By default this is the value of the ``display_name`` field.
1691 :return: list of pairs ``(id, text_repr)`` for each records
1695 name = self._rec_name
1696 if name in self._fields:
1697 convert = self._fields[name].convert_to_display_name
1699 result.append((record.id, convert(record[name])))
1702 result.append((record.id, "%s,%s" % (record._name, record.id)))
1707 def name_create(self, name):
1708 """ name_create(name) -> record
1710 Create a new record by calling :meth:`~.create` with only one value
1711 provided: the display name of the new record.
1713 The new record will be initialized with any default values
1714 applicable to this model, or provided through the context. The usual
1715 behavior of :meth:`~.create` applies.
1717 :param name: display name of the record to create
1719 :return: the :meth:`~.name_get` pair value of the created record
1722 record = self.create({self._rec_name: name})
1723 return record.name_get()[0]
1725 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1729 def name_search(self, name='', args=None, operator='ilike', limit=100):
1730 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1732 Search for records that have a display name matching the given
1733 `name` pattern when compared with the given `operator`, while also
1734 matching the optional search domain (`args`).
1736 This is used for example to provide suggestions based on a partial
1737 value for a relational field. Sometimes be seen as the inverse
1738 function of :meth:`~.name_get`, but it is not guaranteed to be.
1740 This method is equivalent to calling :meth:`~.search` with a search
1741 domain based on ``display_name`` and then :meth:`~.name_get` on the
1742 result of the search.
1744 :param str name: the name pattern to match
1745 :param list args: optional search domain (see :meth:`~.search` for
1746 syntax), specifying further restrictions
1747 :param str operator: domain operator for matching `name`, such as
1748 ``'like'`` or ``'='``.
1749 :param int limit: optional max number of records to return
1751 :return: list of pairs ``(id, text_repr)`` for all matching records.
1753 return self._name_search(name, args, operator, limit=limit)
1755 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1756 # private implementation of name_search, allows passing a dedicated user
1757 # for the name_get part to solve some access rights issues
1758 args = list(args or [])
1759 # optimize out the default criterion of ``ilike ''`` that matches everything
1760 if not self._rec_name:
1761 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1762 elif not (name == '' and operator == 'ilike'):
1763 args += [(self._rec_name, operator, name)]
1764 access_rights_uid = name_get_uid or user
1765 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1766 res = self.name_get(cr, access_rights_uid, ids, context)
1769 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1772 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1774 fields = self._columns.keys() + self._inherit_fields.keys()
1775 #FIXME: collect all calls to _get_source into one SQL call.
1777 res[lang] = {'code': lang}
1779 if f in self._columns:
1780 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1782 res[lang][f] = res_trans
1784 res[lang][f] = self._columns[f].string
1785 for table in self._inherits:
1786 cols = intersect(self._inherit_fields.keys(), fields)
1787 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1790 res[lang]['code'] = lang
1791 for f in res2[lang]:
1792 res[lang][f] = res2[lang][f]
1795 def write_string(self, cr, uid, id, langs, vals, context=None):
1796 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1797 #FIXME: try to only call the translation in one SQL
1800 if field in self._columns:
1801 src = self._columns[field].string
1802 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1803 for table in self._inherits:
1804 cols = intersect(self._inherit_fields.keys(), vals)
1806 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1809 def _add_missing_default_values(self, cr, uid, values, context=None):
1810 # avoid overriding inherited values when parent is set
1812 for tables, parent_field in self._inherits.items():
1813 if parent_field in values:
1814 avoid_tables.append(tables)
1816 # compute missing fields
1817 missing_defaults = set()
1818 for field in self._columns.keys():
1819 if not field in values:
1820 missing_defaults.add(field)
1821 for field in self._inherit_fields.keys():
1822 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1823 missing_defaults.add(field)
1824 # discard magic fields
1825 missing_defaults -= set(MAGIC_COLUMNS)
1827 if missing_defaults:
1828 # override defaults with the provided values, never allow the other way around
1829 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1831 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1832 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1833 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1834 defaults[dv] = [(6, 0, defaults[dv])]
1835 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1836 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1837 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1838 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1839 defaults.update(values)
1843 def clear_caches(self):
1844 """ Clear the caches
1846 This clears the caches associated to methods decorated with
1847 ``tools.ormcache`` or ``tools.ormcache_multi``.
1850 self._ormcache.clear()
1851 self.pool._any_cache_cleared = True
1852 except AttributeError:
1856 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1857 aggregated_fields, count_field,
1858 read_group_result, read_group_order=None, context=None):
1859 """Helper method for filling in empty groups for all possible values of
1860 the field being grouped by"""
1862 # self._group_by_full should map groupable fields to a method that returns
1863 # a list of all aggregated values that we want to display for this field,
1864 # in the form of a m2o-like pair (key,label).
1865 # This is useful to implement kanban views for instance, where all columns
1866 # should be displayed even if they don't contain any record.
1868 # Grab the list of all groups that should be displayed, including all present groups
1869 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1870 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1871 read_group_order=read_group_order,
1872 access_rights_uid=openerp.SUPERUSER_ID,
1875 result_template = dict.fromkeys(aggregated_fields, False)
1876 result_template[groupby + '_count'] = 0
1877 if remaining_groupbys:
1878 result_template['__context'] = {'group_by': remaining_groupbys}
1880 # Merge the left_side (current results as dicts) with the right_side (all
1881 # possible values as m2o pairs). Both lists are supposed to be using the
1882 # same ordering, and can be merged in one pass.
1885 def append_left(left_side):
1886 grouped_value = left_side[groupby] and left_side[groupby][0]
1887 if not grouped_value in known_values:
1888 result.append(left_side)
1889 known_values[grouped_value] = left_side
1891 known_values[grouped_value].update({count_field: left_side[count_field]})
1892 def append_right(right_side):
1893 grouped_value = right_side[0]
1894 if not grouped_value in known_values:
1895 line = dict(result_template)
1896 line[groupby] = right_side
1897 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1899 known_values[grouped_value] = line
1900 while read_group_result or all_groups:
1901 left_side = read_group_result[0] if read_group_result else None
1902 right_side = all_groups[0] if all_groups else None
1903 assert left_side is None or left_side[groupby] is False \
1904 or isinstance(left_side[groupby], (tuple,list)), \
1905 'M2O-like pair expected, got %r' % left_side[groupby]
1906 assert right_side is None or isinstance(right_side, (tuple,list)), \
1907 'M2O-like pair expected, got %r' % right_side
1908 if left_side is None:
1909 append_right(all_groups.pop(0))
1910 elif right_side is None:
1911 append_left(read_group_result.pop(0))
1912 elif left_side[groupby] == right_side:
1913 append_left(read_group_result.pop(0))
1914 all_groups.pop(0) # discard right_side
1915 elif not left_side[groupby] or not left_side[groupby][0]:
1916 # left side == "Undefined" entry, not present on right_side
1917 append_left(read_group_result.pop(0))
1919 append_right(all_groups.pop(0))
1923 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1926 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1928 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1929 to the query if order should be computed against m2o field.
1930 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1931 :param aggregated_fields: list of aggregated fields in the query
1932 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1933 These dictionaries contains the qualified name of each groupby
1934 (fully qualified SQL name for the corresponding field),
1935 and the (non raw) field name.
1936 :param osv.Query query: the query under construction
1937 :return: (groupby_terms, orderby_terms)
1940 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1941 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1943 return groupby_terms, orderby_terms
1945 self._check_qorder(orderby)
1946 for order_part in orderby.split(','):
1947 order_split = order_part.split()
1948 order_field = order_split[0]
1949 if order_field in groupby_fields:
1951 if self._fields[order_field.split(':')[0]].type == 'many2one':
1952 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1954 orderby_terms.append(order_clause)
1955 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1957 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1958 orderby_terms.append(order)
1959 elif order_field in aggregated_fields:
1960 orderby_terms.append(order_part)
1962 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1963 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1964 self._name, order_part)
1965 return groupby_terms, orderby_terms
1967 def _read_group_process_groupby(self, gb, query, context):
1969 Helper method to collect important information about groupbys: raw
1970 field name, type, time informations, qualified name, ...
1972 split = gb.split(':')
1973 field_type = self._fields[split[0]].type
1974 gb_function = split[1] if len(split) == 2 else None
1975 temporal = field_type in ('date', 'datetime')
1976 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1977 qualified_field = self._inherits_join_calc(split[0], query)
1980 # Careful with week/year formats:
1981 # - yyyy (lower) must always be used, *except* for week+year formats
1982 # - YYYY (upper) must always be used for week+year format
1983 # e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
1984 # and W1 2006 for others
1986 # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
1987 # such as 2006-01-01 being formatted as "January 2005" in some locales.
1988 # Cfr: http://babel.pocoo.org/docs/dates/#date-fields
1989 'day': 'dd MMM yyyy', # yyyy = normal year
1990 'week': "'W'w YYYY", # w YYYY = ISO week-year
1991 'month': 'MMMM yyyy',
1992 'quarter': 'QQQ yyyy',
1996 'day': dateutil.relativedelta.relativedelta(days=1),
1997 'week': datetime.timedelta(days=7),
1998 'month': dateutil.relativedelta.relativedelta(months=1),
1999 'quarter': dateutil.relativedelta.relativedelta(months=3),
2000 'year': dateutil.relativedelta.relativedelta(years=1)
2003 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
2004 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
2005 if field_type == 'boolean':
2006 qualified_field = "coalesce(%s,false)" % qualified_field
2011 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2012 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2013 'tz_convert': tz_convert,
2014 'qualified_field': qualified_field
2017 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2019 Helper method to sanitize the data received by read_group. The None
2020 values are converted to False, and the date/datetime are formatted,
2021 and corrected according to the timezones.
2023 value = False if value is None else value
2024 gb = groupby_dict.get(key)
2025 if gb and gb['type'] in ('date', 'datetime') and value:
2026 if isinstance(value, basestring):
2027 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2028 value = datetime.datetime.strptime(value, dt_format)
2029 if gb['tz_convert']:
2030 value = pytz.timezone(context['tz']).localize(value)
2033 def _read_group_get_domain(self, groupby, value):
2035 Helper method to construct the domain corresponding to a groupby and
2036 a given value. This is mostly relevant for date/datetime.
2038 if groupby['type'] in ('date', 'datetime') and value:
2039 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2040 domain_dt_begin = value
2041 domain_dt_end = value + groupby['interval']
2042 if groupby['tz_convert']:
2043 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2044 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2045 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2046 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2047 if groupby['type'] == 'many2one' and value:
2049 return [(groupby['field'], '=', value)]
2051 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2053 Helper method to format the data contained in the dictianary data by
2054 adding the domain corresponding to its values, the groupbys in the
2055 context and by properly formatting the date/datetime values.
2057 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2058 for k,v in data.iteritems():
2059 gb = groupby_dict.get(k)
2060 if gb and gb['type'] in ('date', 'datetime') and v:
2061 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2063 data['__domain'] = domain_group + domain
2064 if len(groupby) - len(annotated_groupbys) >= 1:
2065 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2069 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2071 Get the list of records in list view grouped by the given ``groupby`` fields
2073 :param cr: database cursor
2074 :param uid: current user id
2075 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2076 :param list fields: list of fields present in the list view specified on the object
2077 :param list groupby: list of groupby descriptions by which the records will be grouped.
2078 A groupby description is either a field (then it will be grouped by that field)
2079 or a string 'field:groupby_function'. Right now, the only functions supported
2080 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2081 date/datetime fields.
2082 :param int offset: optional number of records to skip
2083 :param int limit: optional max number of records to return
2084 :param dict context: context arguments, like lang, time zone.
2085 :param list orderby: optional ``order by`` specification, for
2086 overriding the natural sort ordering of the
2087 groups, see also :py:meth:`~osv.osv.osv.search`
2088 (supported only for many2one fields currently)
2089 :param bool lazy: if true, the results are only grouped by the first groupby and the
2090 remaining groupbys are put in the __context key. If false, all the groupbys are
2092 :return: list of dictionaries(one dictionary for each record) containing:
2094 * the values of fields grouped by the fields in ``groupby`` argument
2095 * __domain: list of tuples specifying the search criteria
2096 * __context: dictionary with argument like ``groupby``
2097 :rtype: [{'field_name_1': value, ...]
2098 :raise AccessError: * if user has no read rights on the requested object
2099 * if user tries to bypass access rules for read on the requested object
2103 self.check_access_rights(cr, uid, 'read')
2104 query = self._where_calc(cr, uid, domain, context=context)
2105 fields = fields or self._columns.keys()
2107 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2108 groupby_list = groupby[:1] if lazy else groupby
2109 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2110 for gb in groupby_list]
2111 groupby_fields = [g['field'] for g in annotated_groupbys]
2112 order = orderby or ','.join([g for g in groupby_list])
2113 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2115 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2116 for gb in groupby_fields:
2117 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2118 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2119 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2120 if not (gb in self._fields):
2121 # Don't allow arbitrary values, as this would be a SQL injection vector!
2122 raise except_orm(_('Invalid group_by'),
2123 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2125 aggregated_fields = [
2127 if f not in ('id', 'sequence')
2128 if f not in groupby_fields
2129 if f in self._fields
2130 if self._fields[f].type in ('integer', 'float')
2131 if getattr(self._fields[f].base_field.column, '_classic_write')
2134 field_formatter = lambda f: (self._fields[f].group_operator or 'sum', self._inherits_join_calc(f, query), f)
2135 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2137 for gb in annotated_groupbys:
2138 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2140 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2141 from_clause, where_clause, where_clause_params = query.get_sql()
2142 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2143 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2146 count_field += '_count'
2148 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2149 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2152 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2160 'table': self._table,
2161 'count_field': count_field,
2162 'extra_fields': prefix_terms(',', select_terms),
2163 'from': from_clause,
2164 'where': prefix_term('WHERE', where_clause),
2165 'groupby': prefix_terms('GROUP BY', groupby_terms),
2166 'orderby': prefix_terms('ORDER BY', orderby_terms),
2167 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2168 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2170 cr.execute(query, where_clause_params)
2171 fetched_data = cr.dictfetchall()
2173 if not groupby_fields:
2176 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2178 data_ids = [r['id'] for r in fetched_data]
2179 many2onefields = list(set(many2onefields))
2180 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2181 for d in fetched_data:
2182 d.update(data_dict[d['id']])
2184 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2185 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2186 if lazy and groupby_fields[0] in self._group_by_full:
2187 # Right now, read_group only fill results in lazy mode (by default).
2188 # If you need to have the empty groups in 'eager' mode, then the
2189 # method _read_group_fill_results need to be completely reimplemented
2191 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2192 aggregated_fields, count_field, result, read_group_order=order,
2196 def _inherits_join_add(self, current_model, parent_model_name, query):
2198 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2199 :param current_model: current model object
2200 :param parent_model_name: name of the parent model for which the clauses should be added
2201 :param query: query object on which the JOIN should be added
2203 inherits_field = current_model._inherits[parent_model_name]
2204 parent_model = self.pool[parent_model_name]
2205 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2208 def _inherits_join_calc(self, field, query):
2210 Adds missing table select and join clause(s) to ``query`` for reaching
2211 the field coming from an '_inherits' parent table (no duplicates).
2213 :param field: name of inherited field to reach
2214 :param query: query object on which the JOIN should be added
2215 :return: qualified name of field, to be used in SELECT clause
2217 current_table = self
2218 parent_alias = '"%s"' % current_table._table
2219 while field in current_table._inherit_fields and not field in current_table._columns:
2220 parent_model_name = current_table._inherit_fields[field][0]
2221 parent_table = self.pool[parent_model_name]
2222 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2223 current_table = parent_table
2224 return '%s."%s"' % (parent_alias, field)
2226 def _parent_store_compute(self, cr):
2227 if not self._parent_store:
2229 _logger.info('Computing parent left and right for table %s...', self._table)
2230 def browse_rec(root, pos=0):
2232 where = self._parent_name+'='+str(root)
2234 where = self._parent_name+' IS NULL'
2235 if self._parent_order:
2236 where += ' order by '+self._parent_order
2237 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2239 for id in cr.fetchall():
2240 pos2 = browse_rec(id[0], pos2)
2241 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2243 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2244 if self._parent_order:
2245 query += ' order by ' + self._parent_order
2248 for (root,) in cr.fetchall():
2249 pos = browse_rec(root, pos)
2250 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2253 def _update_store(self, cr, f, k):
2254 _logger.info("storing computed values of fields.function '%s'", k)
2255 ss = self._columns[k]._symbol_set
2256 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2257 cr.execute('select id from '+self._table)
2258 ids_lst = map(lambda x: x[0], cr.fetchall())
2260 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2261 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2262 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2263 for key, val in res.items():
2266 # if val is a many2one, just write the ID
2267 if type(val) == tuple:
2269 if val is not False:
2270 cr.execute(update_query, (ss[1](val), key))
2273 def _check_selection_field_value(self, field, value):
2274 """ Check whether value is among the valid values for the given
2275 selection/reference field, and raise an exception if not.
2277 field = self._fields[field]
2278 field.convert_to_cache(value, self)
2280 def _check_removed_columns(self, cr, log=False):
2281 # iterate on the database columns to drop the NOT NULL constraints
2282 # of fields which were required but have been removed (or will be added by another module)
2283 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2284 columns += MAGIC_COLUMNS
2285 cr.execute("SELECT a.attname, a.attnotnull"
2286 " FROM pg_class c, pg_attribute a"
2287 " WHERE c.relname=%s"
2288 " AND c.oid=a.attrelid"
2289 " AND a.attisdropped=%s"
2290 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2291 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2293 for column in cr.dictfetchall():
2295 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2296 column['attname'], self._table, self._name)
2297 if column['attnotnull']:
2298 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2299 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2300 self._table, column['attname'])
2302 def _save_constraint(self, cr, constraint_name, type):
2304 Record the creation of a constraint for this model, to make it possible
2305 to delete it later when the module is uninstalled. Type can be either
2306 'f' or 'u' depending on the constraint being a foreign key or not.
2308 if not self._module:
2309 # no need to save constraints for custom models as they're not part
2312 assert type in ('f', 'u')
2314 SELECT 1 FROM ir_model_constraint, ir_module_module
2315 WHERE ir_model_constraint.module=ir_module_module.id
2316 AND ir_model_constraint.name=%s
2317 AND ir_module_module.name=%s
2318 """, (constraint_name, self._module))
2321 INSERT INTO ir_model_constraint
2322 (name, date_init, date_update, module, model, type)
2323 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2324 (SELECT id FROM ir_module_module WHERE name=%s),
2325 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2326 (constraint_name, self._module, self._name, type))
2328 def _save_relation_table(self, cr, relation_table):
2330 Record the creation of a many2many for this model, to make it possible
2331 to delete it later when the module is uninstalled.
2334 SELECT 1 FROM ir_model_relation, ir_module_module
2335 WHERE ir_model_relation.module=ir_module_module.id
2336 AND ir_model_relation.name=%s
2337 AND ir_module_module.name=%s
2338 """, (relation_table, self._module))
2340 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2341 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2342 (SELECT id FROM ir_module_module WHERE name=%s),
2343 (SELECT id FROM ir_model WHERE model=%s))""",
2344 (relation_table, self._module, self._name))
2345 self.invalidate_cache(cr, SUPERUSER_ID)
2347 # checked version: for direct m2o starting from `self`
2348 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2349 assert self.is_transient() or not dest_model.is_transient(), \
2350 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2351 if self.is_transient() and not dest_model.is_transient():
2352 # TransientModel relationships to regular Models are annoying
2353 # usually because they could block deletion due to the FKs.
2354 # So unless stated otherwise we default them to ondelete=cascade.
2355 ondelete = ondelete or 'cascade'
2356 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2357 self._foreign_keys.add(fk_def)
2358 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2360 # unchecked version: for custom cases, such as m2m relationships
2361 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2362 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2363 self._foreign_keys.add(fk_def)
2364 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2366 def _drop_constraint(self, cr, source_table, constraint_name):
2367 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2369 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2370 # Find FK constraint(s) currently established for the m2o field,
2371 # and see whether they are stale or not
2372 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2373 cl2.relname as foreign_table
2374 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2375 pg_attribute as att1, pg_attribute as att2
2376 WHERE con.conrelid = cl1.oid
2377 AND cl1.relname = %s
2378 AND con.confrelid = cl2.oid
2379 AND array_lower(con.conkey, 1) = 1
2380 AND con.conkey[1] = att1.attnum
2381 AND att1.attrelid = cl1.oid
2382 AND att1.attname = %s
2383 AND array_lower(con.confkey, 1) = 1
2384 AND con.confkey[1] = att2.attnum
2385 AND att2.attrelid = cl2.oid
2386 AND att2.attname = %s
2387 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2388 constraints = cr.dictfetchall()
2390 if len(constraints) == 1:
2391 # Is it the right constraint?
2393 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2394 or cons['foreign_table'] != dest_model._table:
2395 # Wrong FK: drop it and recreate
2396 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2397 source_table, cons['constraint_name'])
2398 self._drop_constraint(cr, source_table, cons['constraint_name'])
2400 # it's all good, nothing to do!
2403 # Multiple FKs found for the same field, drop them all, and re-create
2404 for cons in constraints:
2405 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2406 source_table, cons['constraint_name'])
2407 self._drop_constraint(cr, source_table, cons['constraint_name'])
2409 # (re-)create the FK
2410 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2413 def _set_default_value_on_column(self, cr, column_name, context=None):
2414 # ideally, we should use default_get(), but it fails due to ir.values
2418 default = self._defaults.get(column_name)
2419 if callable(default):
2420 default = default(self, cr, SUPERUSER_ID, context)
2422 column = self._columns[column_name]
2423 ss = column._symbol_set
2424 db_default = ss[1](default)
2425 # Write default if non-NULL, except for booleans for which False means
2426 # the same as NULL - this saves us an expensive query on large tables.
2427 write_default = (db_default is not None if column._type != 'boolean'
2430 _logger.debug("Table '%s': setting default value of new column %s to %r",
2431 self._table, column_name, default)
2432 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2433 self._table, column_name, ss[0], column_name)
2434 cr.execute(query, (db_default,))
2435 # this is a disgrace
2438 def _auto_init(self, cr, context=None):
2441 Call _field_create and, unless _auto is False:
2443 - create the corresponding table in database for the model,
2444 - possibly add the parent columns in database,
2445 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2446 'write_date' in database if _log_access is True (the default),
2447 - report on database columns no more existing in _columns,
2448 - remove no more existing not null constraints,
2449 - alter existing database columns to match _columns,
2450 - create database tables to match _columns,
2451 - add database indices to match _columns,
2452 - save in self._foreign_keys a list a foreign keys to create (see
2456 self._foreign_keys = set()
2457 raise_on_invalid_object_name(self._name)
2460 store_compute = False
2461 stored_fields = [] # new-style stored fields with compute
2463 update_custom_fields = context.get('update_custom_fields', False)
2464 self._field_create(cr, context=context)
2465 create = not self._table_exist(cr)
2469 self._create_table(cr)
2472 cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
2473 has_rows = cr.rowcount
2476 if self._parent_store:
2477 if not self._parent_columns_exist(cr):
2478 self._create_parent_columns(cr)
2479 store_compute = True
2481 self._check_removed_columns(cr, log=False)
2483 # iterate on the "object columns"
2484 column_data = self._select_column_data(cr)
2486 for k, f in self._columns.iteritems():
2487 if k == 'id': # FIXME: maybe id should be a regular column?
2489 # Don't update custom (also called manual) fields
2490 if f.manual and not update_custom_fields:
2493 if isinstance(f, fields.one2many):
2494 self._o2m_raise_on_missing_reference(cr, f)
2496 elif isinstance(f, fields.many2many):
2497 self._m2m_raise_or_create_relation(cr, f)
2500 res = column_data.get(k)
2502 # The field is not found as-is in database, try if it
2503 # exists with an old name.
2504 if not res and hasattr(f, 'oldname'):
2505 res = column_data.get(f.oldname)
2507 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2509 column_data[k] = res
2510 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2511 self._table, f.oldname, k)
2513 # The field already exists in database. Possibly
2514 # change its type, rename it, drop it or change its
2517 f_pg_type = res['typname']
2518 f_pg_size = res['size']
2519 f_pg_notnull = res['attnotnull']
2520 if isinstance(f, fields.function) and not f.store and\
2521 not getattr(f, 'nodrop', False):
2522 _logger.info('column %s (%s) converted to a function, removed from table %s',
2523 k, f.string, self._table)
2524 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2526 _schema.debug("Table '%s': dropped column '%s' with cascade",
2530 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2535 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2536 ('varchar', 'text', 'TEXT', ''),
2537 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2538 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2539 ('timestamp', 'date', 'date', '::date'),
2540 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2541 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2543 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2545 with cr.savepoint():
2546 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2547 except psycopg2.NotSupportedError:
2548 # In place alter table cannot be done because a view is depending of this field.
2549 # Do a manual copy. This will drop the view (that will be recreated later)
2550 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2551 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2552 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2553 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2555 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2556 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2558 if (f_pg_type==c[0]) and (f._type==c[1]):
2559 if f_pg_type != f_obj_type:
2561 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2562 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2563 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2564 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2566 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2567 self._table, k, c[0], c[1])
2570 if f_pg_type != f_obj_type:
2574 newname = k + '_moved' + str(i)
2575 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2576 "WHERE c.relname=%s " \
2577 "AND a.attname=%s " \
2578 "AND c.oid=a.attrelid ", (self._table, newname))
2579 if not cr.fetchone()[0]:
2583 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2584 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2585 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2586 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2587 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2588 self._table, k, f_pg_type, f._type, newname)
2590 # if the field is required and hasn't got a NOT NULL constraint
2591 if f.required and f_pg_notnull == 0:
2593 self._set_default_value_on_column(cr, k, context=context)
2594 # add the NOT NULL constraint
2596 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2598 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2601 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2602 "If you want to have it, you should update the records and execute manually:\n"\
2603 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2604 _schema.warning(msg, self._table, k, self._table, k)
2606 elif not f.required and f_pg_notnull == 1:
2607 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2609 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2612 indexname = '%s_%s_index' % (self._table, k)
2613 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2614 res2 = cr.dictfetchall()
2615 if not res2 and f.select:
2616 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2618 if f._type == 'text':
2619 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2620 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2621 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2622 " because there is a length limit for indexable btree values!\n"\
2623 "Use a search view instead if you simply want to make the field searchable."
2624 _schema.warning(msg, self._table, f._type, k)
2625 if res2 and not f.select:
2626 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2628 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2629 _schema.debug(msg, self._table, k, f._type)
2631 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2632 dest_model = self.pool[f._obj]
2633 if dest_model._auto and dest_model._table != 'ir_actions':
2634 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2636 # The field doesn't exist in database. Create it if necessary.
2638 if not isinstance(f, fields.function) or f.store:
2639 # add the missing field
2640 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2641 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2642 _schema.debug("Table '%s': added column '%s' with definition=%s",
2643 self._table, k, get_pg_type(f)[1])
2647 self._set_default_value_on_column(cr, k, context=context)
2649 # remember the functions to call for the stored fields
2650 if isinstance(f, fields.function):
2652 if f.store is not True: # i.e. if f.store is a dict
2653 order = f.store[f.store.keys()[0]][2]
2654 todo_end.append((order, self._update_store, (f, k)))
2656 # remember new-style stored fields with compute method
2657 if k in self._fields and self._fields[k].depends:
2658 stored_fields.append(self._fields[k])
2660 # and add constraints if needed
2661 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2662 if f._obj not in self.pool:
2663 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2664 dest_model = self.pool[f._obj]
2665 ref = dest_model._table
2666 # ir_actions is inherited so foreign key doesn't work on it
2667 if dest_model._auto and ref != 'ir_actions':
2668 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2670 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2674 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2675 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2678 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2679 "Try to re-run: openerp-server --update=module\n"\
2680 "If it doesn't work, update records and execute manually:\n"\
2681 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2682 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2686 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2687 create = not bool(cr.fetchone())
2689 cr.commit() # start a new transaction
2692 self._add_sql_constraints(cr)
2695 self._execute_sql(cr)
2698 self._parent_store_compute(cr)
2702 # trigger computation of new-style stored fields with a compute
2704 _logger.info("Storing computed values of %s fields %s",
2705 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2706 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2707 recs = recs.search([])
2709 map(recs._recompute_todo, stored_fields)
2712 todo_end.append((1000, func, ()))
2716 def _auto_end(self, cr, context=None):
2717 """ Create the foreign keys recorded by _auto_init. """
2718 for t, k, r, d in self._foreign_keys:
2719 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2720 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2722 del self._foreign_keys
2725 def _table_exist(self, cr):
2726 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2730 def _create_table(self, cr):
2731 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2732 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2733 _schema.debug("Table '%s': created", self._table)
2736 def _parent_columns_exist(self, cr):
2737 cr.execute("""SELECT c.relname
2738 FROM pg_class c, pg_attribute a
2739 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2740 """, (self._table, 'parent_left'))
2744 def _create_parent_columns(self, cr):
2745 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2746 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2747 if 'parent_left' not in self._columns:
2748 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2750 _schema.debug("Table '%s': added column '%s' with definition=%s",
2751 self._table, 'parent_left', 'INTEGER')
2752 elif not self._columns['parent_left'].select:
2753 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2755 if 'parent_right' not in self._columns:
2756 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2758 _schema.debug("Table '%s': added column '%s' with definition=%s",
2759 self._table, 'parent_right', 'INTEGER')
2760 elif not self._columns['parent_right'].select:
2761 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2763 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2764 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2765 self._parent_name, self._name)
2770 def _select_column_data(self, cr):
2771 # attlen is the number of bytes necessary to represent the type when
2772 # the type has a fixed size. If the type has a varying size attlen is
2773 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2774 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2775 "FROM pg_class c,pg_attribute a,pg_type t " \
2776 "WHERE c.relname=%s " \
2777 "AND c.oid=a.attrelid " \
2778 "AND a.atttypid=t.oid", (self._table,))
2779 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2782 def _o2m_raise_on_missing_reference(self, cr, f):
2783 # TODO this check should be a method on fields.one2many.
2784 if f._obj in self.pool:
2785 other = self.pool[f._obj]
2786 # TODO the condition could use fields_get_keys().
2787 if f._fields_id not in other._columns.keys():
2788 if f._fields_id not in other._inherit_fields.keys():
2789 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2791 def _m2m_raise_or_create_relation(self, cr, f):
2792 m2m_tbl, col1, col2 = f._sql_names(self)
2793 # do not create relations for custom fields as they do not belong to a module
2794 # they will be automatically removed when dropping the corresponding ir.model.field
2795 # table name for custom relation all starts with x_, see __init__
2796 if not m2m_tbl.startswith('x_'):
2797 self._save_relation_table(cr, m2m_tbl)
2798 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2799 if not cr.dictfetchall():
2800 if f._obj not in self.pool:
2801 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2802 dest_model = self.pool[f._obj]
2803 ref = dest_model._table
2804 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2805 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2806 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2807 if not cr.fetchall():
2808 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2809 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2810 if not cr.fetchall():
2811 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2813 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2814 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2815 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2817 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2820 def _add_sql_constraints(self, cr):
2823 Modify this model's database table constraints so they match the one in
2827 def unify_cons_text(txt):
2828 return txt.lower().replace(', ',',').replace(' (','(')
2830 for (key, con, _) in self._sql_constraints:
2831 conname = '%s_%s' % (self._table, key)
2833 self._save_constraint(cr, conname, 'u')
2834 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2835 existing_constraints = cr.dictfetchall()
2839 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2840 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2841 self._table, conname, con),
2842 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2847 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2848 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2849 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2855 if not existing_constraints:
2856 # constraint does not exists:
2857 sql_actions['add']['execute'] = True
2858 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2859 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2860 # constraint exists but its definition has changed:
2861 sql_actions['drop']['execute'] = True
2862 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2863 sql_actions['add']['execute'] = True
2864 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2866 # we need to add the constraint:
2867 sql_actions = [item for item in sql_actions.values()]
2868 sql_actions.sort(key=lambda x: x['order'])
2869 for sql_action in [action for action in sql_actions if action['execute']]:
2871 cr.execute(sql_action['query'])
2873 _schema.debug(sql_action['msg_ok'])
2875 _schema.warning(sql_action['msg_err'])
2879 def _execute_sql(self, cr):
2880 """ Execute the SQL code from the _sql attribute (if any)."""
2881 if hasattr(self, "_sql"):
2882 for line in self._sql.split(';'):
2883 line2 = line.replace('\n', '').strip()
2889 # Update objects that uses this one to update their _inherits fields
2893 def _inherits_reload_src(cls):
2894 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2895 for model in cls.pool.values():
2896 if cls._name in model._inherits:
2897 model._inherits_reload()
2900 def _inherits_reload(cls):
2901 """ Recompute the _inherit_fields mapping.
2903 This will also call itself on each inherits'd child model.
2907 for table in cls._inherits:
2908 other = cls.pool[table]
2909 for col in other._columns.keys():
2910 res[col] = (table, cls._inherits[table], other._columns[col], table)
2911 for col in other._inherit_fields.keys():
2912 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2913 cls._inherit_fields = res
2914 cls._all_columns = cls._get_column_infos()
2916 # interface columns with new-style fields
2917 for attr, column in cls._columns.items():
2918 if attr not in cls._fields:
2919 cls._add_field(attr, column.to_field())
2921 # interface inherited fields with new-style fields (note that the
2922 # reverse order is for being consistent with _all_columns above)
2923 for parent_model, parent_field in reversed(cls._inherits.items()):
2924 for attr, field in cls.pool[parent_model]._fields.iteritems():
2925 if attr not in cls._fields:
2926 cls._add_field(attr, field.new(
2928 related=(parent_field, attr),
2932 cls._inherits_reload_src()
2935 def _get_column_infos(cls):
2936 """Returns a dict mapping all fields names (direct fields and
2937 inherited field via _inherits) to a ``column_info`` struct
2938 giving detailed columns """
2940 # do not inverse for loops, since local fields may hide inherited ones!
2941 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2942 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2943 for k, col in cls._columns.iteritems():
2944 result[k] = fields.column_info(k, col)
2948 def _inherits_check(cls):
2949 for table, field_name in cls._inherits.items():
2950 if field_name not in cls._columns:
2951 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2952 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2953 required=True, ondelete="cascade")
2954 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2955 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2956 cls._columns[field_name].required = True
2957 cls._columns[field_name].ondelete = "cascade"
2959 # reflect fields with delegate=True in dictionary cls._inherits
2960 for field in cls._fields.itervalues():
2961 if field.type == 'many2one' and not field.related and field.delegate:
2962 if not field.required:
2963 _logger.warning("Field %s with delegate=True must be required.", field)
2964 field.required = True
2965 if field.ondelete.lower() not in ('cascade', 'restrict'):
2966 field.ondelete = 'cascade'
2967 cls._inherits[field.comodel_name] = field.name
2970 def _prepare_setup_fields(self):
2971 """ Prepare the setup of fields once the models have been loaded. """
2972 for field in self._fields.itervalues():
2976 def _setup_fields(self, partial=False):
2977 """ Setup the fields (dependency triggers, etc). """
2978 for field in self._fields.itervalues():
2980 field.setup(self.env)
2985 # update columns (fields may have changed), and column_infos
2986 for name, field in self._fields.iteritems():
2988 self._columns[name] = field.to_column()
2989 self._inherits_reload()
2991 # group fields by compute to determine field.computed_fields
2992 fields_by_compute = defaultdict(list)
2993 for field in self._fields.itervalues():
2995 field.computed_fields = fields_by_compute[field.compute]
2996 field.computed_fields.append(field)
2998 field.computed_fields = []
3000 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3001 """ fields_get([fields])
3003 Return the definition of each field.
3005 The returned value is a dictionary (indiced by field name) of
3006 dictionaries. The _inherits'd fields are included. The string, help,
3007 and selection (if present) attributes are translated.
3009 :param cr: database cursor
3010 :param user: current user id
3011 :param allfields: list of fields
3012 :param context: context arguments, like lang, time zone
3013 :return: dictionary of field dictionaries, each one describing a field of the business object
3014 :raise AccessError: * if user has no create/write rights on the requested object
3017 recs = self.browse(cr, user, [], context)
3020 for fname, field in self._fields.iteritems():
3021 if allfields and fname not in allfields:
3023 if not field.setup_done:
3025 if field.groups and not recs.user_has_groups(field.groups):
3027 res[fname] = field.get_description(recs.env)
3029 # if user cannot create or modify records, make all fields readonly
3030 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3031 if not (has_access('write') or has_access('create')):
3032 for description in res.itervalues():
3033 description['readonly'] = True
3034 description['states'] = {}
3038 def get_empty_list_help(self, cr, user, help, context=None):
3039 """ Generic method giving the help message displayed when having
3040 no result to display in a list or kanban view. By default it returns
3041 the help given in parameter that is generally the help message
3042 defined in the action.
3046 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3048 Check the user access rights on the given fields. This raises Access
3049 Denied if the user does not have the rights. Otherwise it returns the
3050 fields (as is if the fields is not falsy, or the readable/writable
3051 fields if fields is falsy).
3053 if user == SUPERUSER_ID:
3054 return fields or list(self._fields)
3057 """ determine whether user has access to field `fname` """
3058 field = self._fields.get(fname)
3059 if field and field.groups:
3060 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3065 fields = filter(valid, self._fields)
3067 invalid_fields = set(filter(lambda name: not valid(name), fields))
3069 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3070 operation, user, self._name, ', '.join(invalid_fields))
3072 _('The requested operation cannot be completed due to security restrictions. '
3073 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3074 (self._description, operation))
3078 # add explicit old-style implementation to read()
3080 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3081 records = self.browse(cr, user, ids, context)
3082 result = BaseModel.read(records, fields, load=load)
3083 return result if isinstance(ids, list) else (bool(result) and result[0])
3085 # new-style implementation of read()
3087 def read(self, fields=None, load='_classic_read'):
3090 Reads the requested fields for the records in `self`, low-level/RPC
3091 method. In Python code, prefer :meth:`~.browse`.
3093 :param fields: list of field names to return (default is all fields)
3094 :return: a list of dictionaries mapping field names to their values,
3095 with one dictionary per record
3096 :raise AccessError: if user has no read rights on some of the given
3099 # check access rights
3100 self.check_access_rights('read')
3101 fields = self.check_field_access_rights('read', fields)
3103 # split fields into stored and computed fields
3104 stored, computed = [], []
3106 if name in self._columns:
3108 elif name in self._fields:
3109 computed.append(name)
3111 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3113 # fetch stored fields from the database to the cache
3114 self._read_from_database(stored)
3116 # retrieve results from records; this takes values from the cache and
3117 # computes remaining fields
3119 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3120 use_name_get = (load == '_classic_read')
3123 values = {'id': record.id}
3124 for name, field in name_fields:
3125 values[name] = field.convert_to_read(record[name], use_name_get)
3126 result.append(values)
3127 except MissingError:
3133 def _prefetch_field(self, field):
3134 """ Read from the database in order to fetch `field` (:class:`Field`
3135 instance) for `self` in cache.
3137 # fetch the records of this model without field_name in their cache
3138 records = self._in_cache_without(field)
3140 if len(records) > PREFETCH_MAX:
3141 records = records[:PREFETCH_MAX] | self
3143 # determine which fields can be prefetched
3144 if not self.env.in_draft and \
3145 self._context.get('prefetch_fields', True) and \
3146 self._columns[field.name]._prefetch:
3147 # prefetch all classic and many2one fields that the user can access
3149 for fname, fcolumn in self._columns.iteritems()
3150 if fcolumn._prefetch
3151 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3154 fnames = {field.name}
3156 # important: never prefetch fields to recompute!
3157 get_recs_todo = self.env.field_todo
3158 for fname in list(fnames):
3159 if get_recs_todo(self._fields[fname]):
3160 if fname == field.name:
3161 records -= get_recs_todo(field)
3163 fnames.discard(fname)
3165 # fetch records with read()
3166 assert self in records and field.name in fnames
3169 result = records.read(list(fnames), load='_classic_write')
3173 # check the cache, and update it if necessary
3174 if not self._cache.contains(field):
3175 for values in result:
3176 record = self.browse(values.pop('id'))
3177 record._cache.update(record._convert_to_cache(values, validate=False))
3178 if not self._cache.contains(field):
3179 e = AccessError("No value found for %s.%s" % (self, field.name))
3180 self._cache[field] = FailedValue(e)
3183 def _read_from_database(self, field_names):
3184 """ Read the given fields of the records in `self` from the database,
3185 and store them in cache. Access errors are also stored in cache.
3188 cr, user, context = env.args
3190 # FIXME: The query construction needs to be rewritten using the internal Query
3191 # object, as in search(), to avoid ambiguous column references when
3192 # reading/sorting on a table that is auto_joined to another table with
3193 # common columns (e.g. the magical columns)
3195 # Construct a clause for the security rules.
3196 # 'tables' holds the list of tables necessary for the SELECT, including
3197 # the ir.rule clauses, and contains at least self._table.
3198 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3200 # determine the fields that are stored as columns in self._table
3201 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3203 # we need fully-qualified column names in case len(tables) > 1
3205 if isinstance(self._columns.get(f), fields.binary) and \
3206 context.get('bin_size_%s' % f, context.get('bin_size')):
3207 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3208 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3210 return '%s."%s"' % (self._table, f)
3211 qual_names = map(qualify, set(fields_pre + ['id']))
3213 query = """ SELECT %(qual_names)s FROM %(tables)s
3214 WHERE %(table)s.id IN %%s AND (%(extra)s)
3217 'qual_names': ",".join(qual_names),
3218 'tables': ",".join(tables),
3219 'table': self._table,
3220 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3221 'order': self._parent_order or self._order,
3225 for sub_ids in cr.split_for_in_conditions(self.ids):
3226 cr.execute(query, [tuple(sub_ids)] + rule_params)
3227 result.extend(cr.dictfetchall())
3229 ids = [vals['id'] for vals in result]
3232 # translate the fields if necessary
3233 if context.get('lang'):
3234 ir_translation = env['ir.translation']
3235 for f in fields_pre:
3236 if self._columns[f].translate:
3237 #TODO: optimize out of this loop
3238 res_trans = ir_translation._get_ids(
3239 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3241 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3243 # apply the symbol_get functions of the fields we just read
3244 for f in fields_pre:
3245 symbol_get = self._columns[f]._symbol_get
3248 vals[f] = symbol_get(vals[f])
3250 # store result in cache for POST fields
3252 record = self.browse(vals['id'])
3253 record._cache.update(record._convert_to_cache(vals, validate=False))
3255 # determine the fields that must be processed now
3256 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3258 # Compute POST fields, grouped by multi
3259 by_multi = defaultdict(list)
3260 for f in fields_post:
3261 by_multi[self._columns[f]._multi].append(f)
3263 for multi, fs in by_multi.iteritems():
3265 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3266 assert res2 is not None, \
3267 'The function field "%s" on the "%s" model returned None\n' \
3268 '(a dictionary was expected).' % (fs[0], self._name)
3270 # TOCHECK : why got string instend of dict in python2.6
3271 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3272 multi_fields = res2.get(vals['id'], {})
3275 vals[f] = multi_fields.get(f, [])
3278 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3281 vals[f] = res2[vals['id']]
3285 # Warn about deprecated fields now that fields_pre and fields_post are computed
3286 for f in field_names:
3287 column = self._columns[f]
3288 if column.deprecated:
3289 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3291 # store result in cache
3293 record = self.browse(vals.pop('id'))
3294 record._cache.update(record._convert_to_cache(vals, validate=False))
3296 # store failed values in cache for the records that could not be read
3297 fetched = self.browse(ids)
3298 missing = self - fetched
3300 extras = fetched - self
3303 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3304 ', '.join(map(repr, missing._ids)),
3305 ', '.join(map(repr, extras._ids)),
3307 # store an access error exception in existing records
3309 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3310 (self._name, 'read')
3312 forbidden = missing.exists()
3313 forbidden._cache.update(FailedValue(exc))
3314 # store a missing error exception in non-existing records
3316 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3318 (missing - forbidden)._cache.update(FailedValue(exc))
3321 def get_metadata(self):
3323 Returns some metadata about the given records.
3325 :return: list of ownership dictionaries for each requested record
3326 :rtype: list of dictionaries with the following keys:
3329 * create_uid: user who created the record
3330 * create_date: date when the record was created
3331 * write_uid: last user who changed the record
3332 * write_date: date of the last change to the record
3333 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3336 if self._log_access:
3337 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3338 quoted_table = '"%s"' % self._table
3339 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3340 query = '''SELECT %s, __imd.module, __imd.name
3341 FROM %s LEFT JOIN ir_model_data __imd
3342 ON (__imd.model = %%s and __imd.res_id = %s.id)
3343 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3344 self._cr.execute(query, (self._name, tuple(self.ids)))
3345 res = self._cr.dictfetchall()
3347 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3348 names = dict(self.env['res.users'].browse(uids).name_get())
3352 value = r[key] = r[key] or False
3353 if key in ('write_uid', 'create_uid') and value in names:
3354 r[key] = (value, names[value])
3355 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3356 del r['name'], r['module']
3359 def _check_concurrency(self, cr, ids, context):
3362 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3364 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3365 for sub_ids in cr.split_for_in_conditions(ids):
3368 id_ref = "%s,%s" % (self._name, id)
3369 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3371 ids_to_check.extend([id, update_date])
3372 if not ids_to_check:
3374 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3377 # mention the first one only to keep the error message readable
3378 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3380 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3381 """Verify the returned rows after applying record rules matches
3382 the length of `ids`, and raise an appropriate exception if it does not.
3386 ids, result_ids = set(ids), set(result_ids)
3387 missing_ids = ids - result_ids
3389 # Attempt to distinguish record rule restriction vs deleted records,
3390 # to provide a more specific error message - check if the missinf
3391 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3392 forbidden_ids = [x[0] for x in cr.fetchall()]
3394 # the missing ids are (at least partially) hidden by access rules
3395 if uid == SUPERUSER_ID:
3397 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3398 raise except_orm(_('Access Denied'),
3399 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3400 (self._description, operation))
3402 # If we get here, the missing_ids are not in the database
3403 if operation in ('read','unlink'):
3404 # No need to warn about deleting an already deleted record.
3405 # And no error when reading a record that was deleted, to prevent spurious
3406 # errors for non-transactional search/read sequences coming from clients
3408 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3409 raise except_orm(_('Missing document(s)'),
3410 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3413 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3414 """Verifies that the operation given by ``operation`` is allowed for the user
3415 according to the access rights."""
3416 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3418 def check_access_rule(self, cr, uid, ids, operation, context=None):
3419 """Verifies that the operation given by ``operation`` is allowed for the user
3420 according to ir.rules.
3422 :param operation: one of ``write``, ``unlink``
3423 :raise except_orm: * if current ir.rules do not permit this operation.
3424 :return: None if the operation is allowed
3426 if uid == SUPERUSER_ID:
3429 if self.is_transient():
3430 # Only one single implicit access rule for transient models: owner only!
3431 # This is ok to hardcode because we assert that TransientModels always
3432 # have log_access enabled so that the create_uid column is always there.
3433 # And even with _inherits, these fields are always present in the local
3434 # table too, so no need for JOINs.
3435 cr.execute("""SELECT distinct create_uid
3437 WHERE id IN %%s""" % self._table, (tuple(ids),))
3438 uids = [x[0] for x in cr.fetchall()]
3439 if len(uids) != 1 or uids[0] != uid:
3440 raise except_orm(_('Access Denied'),
3441 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3443 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3445 where_clause = ' and ' + ' and '.join(where_clause)
3446 for sub_ids in cr.split_for_in_conditions(ids):
3447 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3448 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3449 [sub_ids] + where_params)
3450 returned_ids = [x['id'] for x in cr.dictfetchall()]
3451 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3453 def create_workflow(self, cr, uid, ids, context=None):
3454 """Create a workflow instance for each given record IDs."""
3455 from openerp import workflow
3457 workflow.trg_create(uid, self._name, res_id, cr)
3458 # self.invalidate_cache(cr, uid, context=context) ?
3461 def delete_workflow(self, cr, uid, ids, context=None):
3462 """Delete the workflow instances bound to the given record IDs."""
3463 from openerp import workflow
3465 workflow.trg_delete(uid, self._name, res_id, cr)
3466 self.invalidate_cache(cr, uid, context=context)
3469 def step_workflow(self, cr, uid, ids, context=None):
3470 """Reevaluate the workflow instances of the given record IDs."""
3471 from openerp import workflow
3473 workflow.trg_write(uid, self._name, res_id, cr)
3474 # self.invalidate_cache(cr, uid, context=context) ?
3477 def signal_workflow(self, cr, uid, ids, signal, context=None):
3478 """Send given workflow signal and return a dict mapping ids to workflow results"""
3479 from openerp import workflow
3482 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3483 # self.invalidate_cache(cr, uid, context=context) ?
3486 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3487 """ Rebind the workflow instance bound to the given 'old' record IDs to
3488 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3490 from openerp import workflow
3491 for old_id, new_id in old_new_ids:
3492 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3493 self.invalidate_cache(cr, uid, context=context)
3496 def unlink(self, cr, uid, ids, context=None):
3499 Deletes the records of the current set
3501 :raise AccessError: * if user has no unlink rights on the requested object
3502 * if user tries to bypass access rules for unlink on the requested object
3503 :raise UserError: if the record is default property for other records
3508 if isinstance(ids, (int, long)):
3511 result_store = self._store_get_values(cr, uid, ids, self._fields.keys(), context)
3513 # for recomputing new-style fields
3514 recs = self.browse(cr, uid, ids, context)
3515 recs.modified(self._fields)
3517 self._check_concurrency(cr, ids, context)
3519 self.check_access_rights(cr, uid, 'unlink')
3521 ir_property = self.pool.get('ir.property')
3523 # Check if the records are used as default properties.
3524 domain = [('res_id', '=', False),
3525 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3527 if ir_property.search(cr, uid, domain, context=context):
3528 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3530 # Delete the records' properties.
3531 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3532 ir_property.unlink(cr, uid, property_ids, context=context)
3534 self.delete_workflow(cr, uid, ids, context=context)
3536 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3537 pool_model_data = self.pool.get('ir.model.data')
3538 ir_values_obj = self.pool.get('ir.values')
3539 ir_attachment_obj = self.pool.get('ir.attachment')
3540 for sub_ids in cr.split_for_in_conditions(ids):
3541 cr.execute('delete from ' + self._table + ' ' \
3542 'where id IN %s', (sub_ids,))
3544 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3545 # as these are not connected with real database foreign keys, and would be dangling references.
3546 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3547 # to avoid possible side-effects during admin calls.
3548 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3549 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3550 # Step 2. Marching towards the real deletion of referenced records
3552 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3554 # For the same reason, removing the record relevant to ir_values
3555 ir_value_ids = ir_values_obj.search(cr, uid,
3556 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3559 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3561 # For the same reason, removing the record relevant to ir_attachment
3562 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3563 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3564 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3565 if ir_attachment_ids:
3566 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3568 # invalidate the *whole* cache, since the orm does not handle all
3569 # changes made in the database, like cascading delete!
3570 recs.invalidate_cache()
3572 for order, obj_name, store_ids, fields in result_store:
3573 if obj_name == self._name:
3574 effective_store_ids = set(store_ids) - set(ids)
3576 effective_store_ids = store_ids
3577 if effective_store_ids:
3578 obj = self.pool[obj_name]
3579 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3580 rids = map(lambda x: x[0], cr.fetchall())
3582 obj._store_set_values(cr, uid, rids, fields, context)
3584 # recompute new-style fields
3593 def write(self, vals):
3596 Updates all records in the current set with the provided values.
3598 :param dict vals: fields to update and the value to set on them e.g::
3600 {'foo': 1, 'bar': "Qux"}
3602 will set the field ``foo`` to ``1`` and the field ``bar`` to
3603 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3605 :raise AccessError: * if user has no write rights on the requested object
3606 * if user tries to bypass access rules for write on the requested object
3607 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3608 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3610 .. _openerp/models/relationals/format:
3612 .. note:: Relational fields use a special "commands" format to manipulate their values
3614 This format is a list of command triplets executed sequentially,
3615 possible command triplets are:
3617 ``(0, _, values: dict)``
3618 links to a new record created from the provided values
3619 ``(1, id, values: dict)``
3620 updates the already-linked record of id ``id`` with the
3623 unlinks and deletes the linked record of id ``id``
3625 unlinks the linked record of id ``id`` without deleting it
3627 links to an existing record of id ``id``
3629 unlinks all records in the relation, equivalent to using
3630 the command ``3`` on every linked record
3632 replaces the existing list of linked records by the provoded
3633 ones, equivalent to using ``5`` then ``4`` for each id in
3636 (in command triplets, ``_`` values are ignored and can be
3637 anything, generally ``0`` or ``False``)
3639 Any command can be used on :class:`~openerp.fields.Many2many`,
3640 only ``0``, ``1`` and ``2`` can be used on
3641 :class:`~openerp.fields.One2many`.
3646 self._check_concurrency(self._ids)
3647 self.check_access_rights('write')
3649 # No user-driven update of these columns
3650 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3651 vals.pop(field, None)
3653 # split up fields into old-style and pure new-style ones
3654 old_vals, new_vals, unknown = {}, {}, []
3655 for key, val in vals.iteritems():
3656 field = self._fields.get(key)
3658 if field.column or field.inherited:
3660 if field.inverse and not field.inherited:
3666 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3668 # write old-style fields with (low-level) method _write
3670 self._write(old_vals)
3672 # put the values of pure new-style fields into cache, and inverse them
3675 record._cache.update(record._convert_to_cache(new_vals, update=True))
3676 for key in new_vals:
3677 self._fields[key].determine_inverse(self)
3681 def _write(self, cr, user, ids, vals, context=None):
3682 # low-level implementation of write()
3687 self.check_field_access_rights(cr, user, 'write', vals.keys())
3688 deleted_related = defaultdict(list)
3689 for field in vals.keys():
3691 if field in self._columns:
3692 fobj = self._columns[field]
3693 elif field in self._inherit_fields:
3694 fobj = self._inherit_fields[field][2]
3697 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3698 for wtuple in vals[field]:
3699 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3700 deleted_related[fobj._obj].append(wtuple[1])
3705 for group in groups:
3706 module = group.split(".")[0]
3707 grp = group.split(".")[1]
3708 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3709 (grp, module, 'res.groups', user))
3710 readonly = cr.fetchall()
3711 if readonly[0][0] >= 1:
3718 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3720 # for recomputing new-style fields
3721 recs = self.browse(cr, user, ids, context)
3722 modified_fields = list(vals)
3723 if self._log_access:
3724 modified_fields += ['write_date', 'write_uid']
3725 recs.modified(modified_fields)
3727 parents_changed = []
3728 parent_order = self._parent_order or self._order
3729 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3730 # The parent_left/right computation may take up to
3731 # 5 seconds. No need to recompute the values if the
3732 # parent is the same.
3733 # Note: to respect parent_order, nodes must be processed in
3734 # order, so ``parents_changed`` must be ordered properly.
3735 parent_val = vals[self._parent_name]
3737 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3738 (self._table, self._parent_name, self._parent_name, parent_order)
3739 cr.execute(query, (tuple(ids), parent_val))
3741 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3742 (self._table, self._parent_name, parent_order)
3743 cr.execute(query, (tuple(ids),))
3744 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3751 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3753 ffield = self._fields.get(field)
3754 if ffield and ffield.deprecated:
3755 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, ffield.deprecated)
3756 if field in self._columns:
3757 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3758 if (not totranslate) or not self._columns[field].translate:
3759 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3760 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3761 direct.append(field)
3763 upd_todo.append(field)
3765 updend.append(field)
3766 if field in self._columns \
3767 and hasattr(self._columns[field], 'selection') \
3769 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3771 if self._log_access:
3772 upd0.append('write_uid=%s')
3773 upd0.append("write_date=(now() at time zone 'UTC')")
3775 direct.append('write_uid')
3776 direct.append('write_date')
3779 self.check_access_rule(cr, user, ids, 'write', context=context)
3780 for sub_ids in cr.split_for_in_conditions(ids):
3781 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3782 'where id IN %s', upd1 + [sub_ids])
3783 if cr.rowcount != len(sub_ids):
3784 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3789 if self._columns[f].translate:
3790 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3793 # Inserting value to DB
3794 context_wo_lang = dict(context, lang=None)
3795 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3796 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3798 # invalidate and mark new-style fields to recompute; do this before
3799 # setting other fields, because it can require the value of computed
3800 # fields, e.g., a one2many checking constraints on records
3801 recs.modified(direct)
3803 # call the 'set' method of fields which are not classic_write
3804 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3806 # default element in context must be removed when call a one2many or many2many
3807 rel_context = context.copy()
3808 for c in context.items():
3809 if c[0].startswith('default_'):
3810 del rel_context[c[0]]
3812 for field in upd_todo:
3814 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3816 # for recomputing new-style fields
3817 recs.modified(upd_todo)
3819 unknown_fields = updend[:]
3820 for table in self._inherits:
3821 col = self._inherits[table]
3823 for sub_ids in cr.split_for_in_conditions(ids):
3824 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3825 'where id IN %s', (sub_ids,))
3826 nids.extend([x[0] for x in cr.fetchall()])
3830 if self._inherit_fields[val][0] == table:
3832 unknown_fields.remove(val)
3834 self.pool[table].write(cr, user, nids, v, context)
3838 'No such field(s) in model %s: %s.',
3839 self._name, ', '.join(unknown_fields))
3841 # check Python constraints
3842 recs._validate_fields(vals)
3844 # TODO: use _order to set dest at the right position and not first node of parent
3845 # We can't defer parent_store computation because the stored function
3846 # fields that are computer may refer (directly or indirectly) to
3847 # parent_left/right (via a child_of domain)
3850 self.pool._init_parent[self._name] = True
3852 order = self._parent_order or self._order
3853 parent_val = vals[self._parent_name]
3855 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3857 clause, params = '%s IS NULL' % (self._parent_name,), ()
3859 for id in parents_changed:
3860 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3861 pleft, pright = cr.fetchone()
3862 distance = pright - pleft + 1
3864 # Positions of current siblings, to locate proper insertion point;
3865 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3866 # after each update, in case several nodes are sequentially inserted one
3867 # next to the other (i.e computed incrementally)
3868 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3869 parents = cr.fetchall()
3871 # Find Position of the element
3873 for (parent_pright, parent_id) in parents:
3876 position = parent_pright and parent_pright + 1 or 1
3878 # It's the first node of the parent
3883 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3884 position = cr.fetchone()[0] + 1
3886 if pleft < position <= pright:
3887 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3889 if pleft < position:
3890 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3891 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3892 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3894 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3895 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3896 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3897 recs.invalidate_cache(['parent_left', 'parent_right'])
3899 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3903 for order, model_name, ids_to_update, fields_to_recompute in result:
3904 key = (model_name, tuple(fields_to_recompute))
3905 done.setdefault(key, {})
3906 # avoid to do several times the same computation
3908 for id in ids_to_update:
3909 if id not in done[key]:
3910 done[key][id] = True
3911 if id not in deleted_related[model_name]:
3913 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3915 # recompute new-style fields
3916 if context.get('recompute', True):
3919 self.step_workflow(cr, user, ids, context=context)
3923 # TODO: Should set perm to user.xxx
3926 @api.returns('self', lambda value: value.id)
3927 def create(self, vals):
3928 """ create(vals) -> record
3930 Creates a new record for the model.
3932 The new record is initialized using the values from ``vals`` and
3933 if necessary those from :meth:`~.default_get`.
3936 values for the model's fields, as a dictionary::
3938 {'field_name': field_value, ...}
3940 see :meth:`~.write` for details
3941 :return: new record created
3942 :raise AccessError: * if user has no create rights on the requested object
3943 * if user tries to bypass access rules for create on the requested object
3944 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3945 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3947 self.check_access_rights('create')
3949 # add missing defaults, and drop fields that may not be set by user
3950 vals = self._add_missing_default_values(vals)
3951 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3952 vals.pop(field, None)
3954 # split up fields into old-style and pure new-style ones
3955 old_vals, new_vals, unknown = {}, {}, []
3956 for key, val in vals.iteritems():
3957 field = self._fields.get(key)
3959 if field.column or field.inherited:
3961 if field.inverse and not field.inherited:
3967 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3969 # create record with old-style fields
3970 record = self.browse(self._create(old_vals))
3972 # put the values of pure new-style fields into cache, and inverse them
3973 record._cache.update(record._convert_to_cache(new_vals))
3974 for key in new_vals:
3975 self._fields[key].determine_inverse(record)
3979 def _create(self, cr, user, vals, context=None):
3980 # low-level implementation of create()
3984 if self.is_transient():
3985 self._transient_vacuum(cr, user)
3988 for v in self._inherits:
3989 if self._inherits[v] not in vals:
3992 tocreate[v] = {'id': vals[self._inherits[v]]}
3995 # list of column assignments defined as tuples like:
3996 # (column_name, format_string, column_value)
3997 # (column_name, sql_formula)
3998 # Those tuples will be used by the string formatting for the INSERT
4000 ('id', "nextval('%s')" % self._sequence),
4005 for v in vals.keys():
4006 if v in self._inherit_fields and v not in self._columns:
4007 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4008 tocreate[table][v] = vals[v]
4011 if (v not in self._inherit_fields) and (v not in self._columns):
4013 unknown_fields.append(v)
4016 'No such field(s) in model %s: %s.',
4017 self._name, ', '.join(unknown_fields))
4019 for table in tocreate:
4020 if self._inherits[table] in vals:
4021 del vals[self._inherits[table]]
4023 record_id = tocreate[table].pop('id', None)
4025 if record_id is None or not record_id:
4026 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4028 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4030 updates.append((self._inherits[table], '%s', record_id))
4032 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4033 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4035 for bool_field in bool_fields:
4036 if bool_field not in vals:
4037 vals[bool_field] = False
4039 for field in vals.keys():
4041 if field in self._columns:
4042 fobj = self._columns[field]
4044 fobj = self._inherit_fields[field][2]
4050 for group in groups:
4051 module = group.split(".")[0]
4052 grp = group.split(".")[1]
4053 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4054 (grp, module, 'res.groups', user))
4055 readonly = cr.fetchall()
4056 if readonly[0][0] >= 1:
4059 elif readonly[0][0] == 0:
4067 current_field = self._columns[field]
4068 if current_field._classic_write:
4069 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4071 #for the function fields that receive a value, we set them directly in the database
4072 #(they may be required), but we also need to trigger the _fct_inv()
4073 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4074 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4075 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4076 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4077 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4078 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4079 #after the release but, definitively, the behavior shouldn't be different for related and function
4081 upd_todo.append(field)
4083 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4084 #related. See the above TODO comment for further explanations.
4085 if not isinstance(current_field, fields.related):
4086 upd_todo.append(field)
4087 if field in self._columns \
4088 and hasattr(current_field, 'selection') \
4090 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4091 if self._log_access:
4092 updates.append(('create_uid', '%s', user))
4093 updates.append(('write_uid', '%s', user))
4094 updates.append(('create_date', "(now() at time zone 'UTC')"))
4095 updates.append(('write_date', "(now() at time zone 'UTC')"))
4097 # the list of tuples used in this formatting corresponds to
4098 # tuple(field_name, format, value)
4099 # In some case, for example (id, create_date, write_date) we does not
4100 # need to read the third value of the tuple, because the real value is
4101 # encoded in the second value (the format).
4103 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4105 ', '.join('"%s"' % u[0] for u in updates),
4106 ', '.join(u[1] for u in updates)
4108 tuple([u[2] for u in updates if len(u) > 2])
4111 id_new, = cr.fetchone()
4112 recs = self.browse(cr, user, id_new, context)
4114 if self._parent_store and not context.get('defer_parent_store_computation'):
4116 self.pool._init_parent[self._name] = True
4118 parent = vals.get(self._parent_name, False)
4120 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4122 result_p = cr.fetchall()
4123 for (pleft,) in result_p:
4128 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4129 pleft_old = cr.fetchone()[0]
4132 cr.execute('select max(parent_right) from '+self._table)
4133 pleft = cr.fetchone()[0] or 0
4134 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4135 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4136 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4137 recs.invalidate_cache(['parent_left', 'parent_right'])
4139 # invalidate and mark new-style fields to recompute; do this before
4140 # setting other fields, because it can require the value of computed
4141 # fields, e.g., a one2many checking constraints on records
4142 recs.modified([u[0] for u in updates])
4144 # call the 'set' method of fields which are not classic_write
4145 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4147 # default element in context must be remove when call a one2many or many2many
4148 rel_context = context.copy()
4149 for c in context.items():
4150 if c[0].startswith('default_'):
4151 del rel_context[c[0]]
4154 for field in upd_todo:
4155 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4157 # for recomputing new-style fields
4158 recs.modified(upd_todo)
4160 # check Python constraints
4161 recs._validate_fields(vals)
4163 if context.get('recompute', True):
4164 result += self._store_get_values(cr, user, [id_new],
4165 list(set(vals.keys() + self._inherits.values())),
4169 for order, model_name, ids, fields2 in result:
4170 if not (model_name, ids, fields2) in done:
4171 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4172 done.append((model_name, ids, fields2))
4173 # recompute new-style fields
4176 if self._log_create and context.get('recompute', True):
4177 message = self._description + \
4179 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4180 "' " + _("created.")
4181 self.log(cr, user, id_new, message, True, context=context)
4183 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4184 self.create_workflow(cr, user, [id_new], context=context)
4187 def _store_get_values(self, cr, uid, ids, fields, context):
4188 """Returns an ordered list of fields.function to call due to
4189 an update operation on ``fields`` of records with ``ids``,
4190 obtained by calling the 'store' triggers of these fields,
4191 as setup by their 'store' attribute.
4193 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4195 if fields is None: fields = []
4196 stored_functions = self.pool._store_function.get(self._name, [])
4198 # use indexed names for the details of the stored_functions:
4199 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4201 # only keep store triggers that should be triggered for the ``fields``
4203 triggers_to_compute = (
4204 f for f in stored_functions
4205 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4209 target_id_results = {}
4210 for store_trigger in triggers_to_compute:
4211 target_func_id_ = id(store_trigger[target_ids_func_])
4212 if target_func_id_ not in target_id_results:
4213 # use admin user for accessing objects having rules defined on store fields
4214 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4215 target_ids = target_id_results[target_func_id_]
4217 # the compound key must consider the priority and model name
4218 key = (store_trigger[priority_], store_trigger[model_name_])
4219 for target_id in target_ids:
4220 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4222 # Here to_compute_map looks like:
4223 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4224 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4225 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4228 # Now we need to generate the batch function calls list
4230 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4232 for ((priority,model), id_map) in to_compute_map.iteritems():
4233 trigger_ids_maps = {}
4234 # function_ids_maps =
4235 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4236 for target_id, triggers in id_map.iteritems():
4237 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4238 for triggers, target_ids in trigger_ids_maps.iteritems():
4239 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4240 [t[func_field_to_compute_] for t in triggers]))
4243 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4246 def _store_set_values(self, cr, uid, ids, fields, context):
4247 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4248 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4253 if self._log_access:
4254 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4258 field_dict.setdefault(r[0], [])
4259 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4260 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4261 for i in self.pool._store_function.get(self._name, []):
4263 up_write_date = write_date + datetime.timedelta(hours=i[5])
4264 if datetime.datetime.now() < up_write_date:
4266 field_dict[r[0]].append(i[1])
4272 if self._columns[f]._multi not in keys:
4273 keys.append(self._columns[f]._multi)
4274 todo.setdefault(self._columns[f]._multi, [])
4275 todo[self._columns[f]._multi].append(f)
4279 # use admin user for accessing objects having rules defined on store fields
4280 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4281 for id, value in result.items():
4283 for f in value.keys():
4284 if f in field_dict[id]:
4291 if self._columns[v]._type == 'many2one':
4293 value[v] = value[v][0]
4296 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4297 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4300 cr.execute('update "' + self._table + '" set ' + \
4301 ','.join(upd0) + ' where id = %s', upd1)
4305 # use admin user for accessing objects having rules defined on store fields
4306 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4307 for r in result.keys():
4309 if r in field_dict.keys():
4310 if f in field_dict[r]:
4312 for id, value in result.items():
4313 if self._columns[f]._type == 'many2one':
4318 cr.execute('update "' + self._table + '" set ' + \
4319 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4321 # invalidate and mark new-style fields to recompute
4322 self.browse(cr, uid, ids, context).modified(fields)
4326 # TODO: ameliorer avec NULL
4327 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4328 """Computes the WHERE clause needed to implement an OpenERP domain.
4329 :param domain: the domain to compute
4331 :param active_test: whether the default filtering of records with ``active``
4332 field set to ``False`` should be applied.
4333 :return: the query expressing the given domain as provided in domain
4334 :rtype: osv.query.Query
4339 # if the object has a field named 'active', filter out all inactive
4340 # records unless they were explicitely asked for
4341 if 'active' in self._fields and active_test and context.get('active_test', True):
4343 # the item[0] trick below works for domain items and '&'/'|'/'!'
4345 if not any(item[0] == 'active' for item in domain):
4346 domain.insert(0, ('active', '=', 1))
4348 domain = [('active', '=', 1)]
4351 e = expression.expression(cr, user, domain, self, context)
4352 tables = e.get_tables()
4353 where_clause, where_params = e.to_sql()
4354 where_clause = where_clause and [where_clause] or []
4356 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4358 return Query(tables, where_clause, where_params)
4360 def _check_qorder(self, word):
4361 if not regex_order.match(word):
4362 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4365 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4366 """Add what's missing in ``query`` to implement all appropriate ir.rules
4367 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4369 :param query: the current query object
4371 if uid == SUPERUSER_ID:
4374 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4375 """ :param parent_model: name of the parent model, if the added
4376 clause comes from a parent model
4380 # as inherited rules are being applied, we need to add the missing JOIN
4381 # to reach the parent table (if it was not JOINed yet in the query)
4382 parent_alias = self._inherits_join_add(self, parent_model, query)
4383 # inherited rules are applied on the external table -> need to get the alias and replace
4384 parent_table = self.pool[parent_model]._table
4385 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4386 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4388 for table in added_tables:
4389 # table is just a table name -> switch to the full alias
4390 if table == '"%s"' % parent_table:
4391 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4392 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4394 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4395 added_tables = new_tables
4396 query.where_clause += added_clause
4397 query.where_clause_params += added_params
4398 for table in added_tables:
4399 if table not in query.tables:
4400 query.tables.append(table)
4404 # apply main rules on the object
4405 rule_obj = self.pool.get('ir.rule')
4406 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4407 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4409 # apply ir.rules from the parents (through _inherits)
4410 for inherited_model in self._inherits:
4411 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4412 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4413 parent_model=inherited_model)
4415 def _generate_m2o_order_by(self, order_field, query):
4417 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4418 either native m2o fields or function/related fields that are stored, including
4419 intermediate JOINs for inheritance if required.
4421 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4423 if order_field not in self._columns and order_field in self._inherit_fields:
4424 # also add missing joins for reaching the table containing the m2o field
4425 qualified_field = self._inherits_join_calc(order_field, query)
4426 order_field_column = self._inherit_fields[order_field][2]
4428 qualified_field = '"%s"."%s"' % (self._table, order_field)
4429 order_field_column = self._columns[order_field]
4431 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4432 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4433 _logger.debug("Many2one function/related fields must be stored " \
4434 "to be used as ordering fields! Ignoring sorting for %s.%s",
4435 self._name, order_field)
4438 # figure out the applicable order_by for the m2o
4439 dest_model = self.pool[order_field_column._obj]
4440 m2o_order = dest_model._order
4441 if not regex_order.match(m2o_order):
4442 # _order is complex, can't use it here, so we default to _rec_name
4443 m2o_order = dest_model._rec_name
4445 # extract the field names, to be able to qualify them and add desc/asc
4447 for order_part in m2o_order.split(","):
4448 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4449 m2o_order = m2o_order_list
4451 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4452 # as we don't want to exclude results that have NULL values for the m2o
4453 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4454 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4455 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4456 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4458 def _generate_order_by(self, order_spec, query):
4460 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4461 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4463 :raise" except_orm in case order_spec is malformed
4465 order_by_clause = ''
4466 order_spec = order_spec or self._order
4468 order_by_elements = []
4469 self._check_qorder(order_spec)
4470 for order_part in order_spec.split(','):
4471 order_split = order_part.strip().split(' ')
4472 order_field = order_split[0].strip()
4473 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4476 if order_field == 'id':
4477 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4478 elif order_field in self._columns:
4479 order_column = self._columns[order_field]
4480 if order_column._classic_read:
4481 inner_clause = '"%s"."%s"' % (self._table, order_field)
4482 elif order_column._type == 'many2one':
4483 inner_clause = self._generate_m2o_order_by(order_field, query)
4485 continue # ignore non-readable or "non-joinable" fields
4486 elif order_field in self._inherit_fields:
4487 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4488 order_column = parent_obj._columns[order_field]
4489 if order_column._classic_read:
4490 inner_clause = self._inherits_join_calc(order_field, query)
4491 elif order_column._type == 'many2one':
4492 inner_clause = self._generate_m2o_order_by(order_field, query)
4494 continue # ignore non-readable or "non-joinable" fields
4496 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4497 if order_column and order_column._type == 'boolean':
4498 inner_clause = "COALESCE(%s, false)" % inner_clause
4500 if isinstance(inner_clause, list):
4501 for clause in inner_clause:
4502 order_by_elements.append("%s %s" % (clause, order_direction))
4504 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4505 if order_by_elements:
4506 order_by_clause = ",".join(order_by_elements)
4508 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4510 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4512 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4513 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4514 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4515 This is ok at the security level because this method is private and not callable through XML-RPC.
4517 :param access_rights_uid: optional user ID to use when checking access rights
4518 (not for ir.rules, this is only for ir.model.access)
4522 self.check_access_rights(cr, access_rights_uid or user, 'read')
4524 # For transient models, restrict acces to the current user, except for the super-user
4525 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4526 args = expression.AND(([('create_uid', '=', user)], args or []))
4528 query = self._where_calc(cr, user, args, context=context)
4529 self._apply_ir_rules(cr, user, query, 'read', context=context)
4530 order_by = self._generate_order_by(order, query)
4531 from_clause, where_clause, where_clause_params = query.get_sql()
4533 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4536 # Ignore order, limit and offset when just counting, they don't make sense and could
4538 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4539 cr.execute(query_str, where_clause_params)
4543 limit_str = limit and ' limit %d' % limit or ''
4544 offset_str = offset and ' offset %d' % offset or ''
4545 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4546 cr.execute(query_str, where_clause_params)
4549 # TDE note: with auto_join, we could have several lines about the same result
4550 # i.e. a lead with several unread messages; we uniquify the result using
4551 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4552 def _uniquify_list(seq):
4554 return [x for x in seq if x not in seen and not seen.add(x)]
4556 return _uniquify_list([x[0] for x in res])
4558 # returns the different values ever entered for one field
4559 # this is used, for example, in the client when the user hits enter on
4561 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4564 if field in self._inherit_fields:
4565 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4567 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4569 def copy_data(self, cr, uid, id, default=None, context=None):
4571 Copy given record's data with all its fields values
4573 :param cr: database cursor
4574 :param uid: current user id
4575 :param id: id of the record to copy
4576 :param default: field values to override in the original values of the copied record
4577 :type default: dictionary
4578 :param context: context arguments, like lang, time zone
4579 :type context: dictionary
4580 :return: dictionary containing all the field values
4586 # avoid recursion through already copied records in case of circular relationship
4587 seen_map = context.setdefault('__copy_data_seen', {})
4588 if id in seen_map.setdefault(self._name, []):
4590 seen_map[self._name].append(id)
4594 if 'state' not in default:
4595 if 'state' in self._defaults:
4596 if callable(self._defaults['state']):
4597 default['state'] = self._defaults['state'](self, cr, uid, context)
4599 default['state'] = self._defaults['state']
4601 # build a black list of fields that should not be copied
4602 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4603 whitelist = set(name for name, field in self._fields.iteritems() if not field.inherited)
4605 def blacklist_given_fields(obj):
4606 # blacklist the fields that are given by inheritance
4607 for other, field_to_other in obj._inherits.items():
4608 blacklist.add(field_to_other)
4609 if field_to_other in default:
4610 # all the fields of 'other' are given by the record: default[field_to_other],
4611 # except the ones redefined in self
4612 blacklist.update(set(self.pool[other]._fields) - whitelist)
4614 blacklist_given_fields(self.pool[other])
4615 # blacklist deprecated fields
4616 for name, field in obj._fields.iteritems():
4617 if field.deprecated:
4620 blacklist_given_fields(self)
4623 fields_to_copy = dict((f,fi) for f, fi in self._fields.iteritems()
4626 if f not in blacklist)
4628 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4632 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4635 for f, field in fields_to_copy.iteritems():
4636 if field.type == 'many2one':
4637 res[f] = data[f] and data[f][0]
4638 elif field.type == 'one2many':
4639 other = self.pool[field.comodel_name]
4640 # duplicate following the order of the ids because we'll rely on
4641 # it later for copying translations in copy_translation()!
4642 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4643 # the lines are duplicated using the wrong (old) parent, but then
4644 # are reassigned to the correct one thanks to the (0, 0, ...)
4645 res[f] = [(0, 0, line) for line in lines if line]
4646 elif field.type == 'many2many':
4647 res[f] = [(6, 0, data[f])]
4653 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4657 # avoid recursion through already copied records in case of circular relationship
4658 seen_map = context.setdefault('__copy_translations_seen',{})
4659 if old_id in seen_map.setdefault(self._name,[]):
4661 seen_map[self._name].append(old_id)
4663 trans_obj = self.pool.get('ir.translation')
4665 for field_name, field in self._fields.iteritems():
4666 # removing the lang to compare untranslated values
4667 context_wo_lang = dict(context, lang=None)
4668 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4669 # we must recursively copy the translations for o2o and o2m
4670 if field.type == 'one2many':
4671 target_obj = self.pool[field.comodel_name]
4672 # here we rely on the order of the ids to match the translations
4673 # as foreseen in copy_data()
4674 old_children = sorted(r.id for r in old_record[field_name])
4675 new_children = sorted(r.id for r in new_record[field_name])
4676 for (old_child, new_child) in zip(old_children, new_children):
4677 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4678 # and for translatable fields we keep them for copy
4679 elif getattr(field, 'translate', False):
4680 if field_name in self._columns:
4681 trans_name = self._name + "," + field_name
4684 elif field_name in self._inherit_fields:
4685 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4686 # get the id of the parent record to set the translation
4687 inherit_field_name = self._inherit_fields[field_name][1]
4688 target_id = new_record[inherit_field_name].id
4689 source_id = old_record[inherit_field_name].id
4693 trans_ids = trans_obj.search(cr, uid, [
4694 ('name', '=', trans_name),
4695 ('res_id', '=', source_id)
4697 user_lang = context.get('lang')
4698 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4700 # remove source to avoid triggering _set_src
4701 del record['source']
4702 record.update({'res_id': target_id})
4703 if user_lang and user_lang == record['lang']:
4704 # 'source' to force the call to _set_src
4705 # 'value' needed if value is changed in copy(), want to see the new_value
4706 record['source'] = old_record[field_name]
4707 record['value'] = new_record[field_name]
4708 trans_obj.create(cr, uid, record, context=context)
4710 @api.returns('self', lambda value: value.id)
4711 def copy(self, cr, uid, id, default=None, context=None):
4712 """ copy(default=None)
4714 Duplicate record with given id updating it with default values
4716 :param dict default: dictionary of field values to override in the
4717 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4718 :returns: new record
4723 context = context.copy()
4724 data = self.copy_data(cr, uid, id, default, context)
4725 new_id = self.create(cr, uid, data, context)
4726 self.copy_translations(cr, uid, id, new_id, context)
4730 @api.returns('self')
4732 """ exists() -> records
4734 Returns the subset of records in `self` that exist, and marks deleted
4735 records as such in cache. It can be used as a test on records::
4740 By convention, new records are returned as existing.
4742 ids = filter(None, self._ids) # ids to check in database
4745 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4746 self._cr.execute(query, (ids,))
4747 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4748 [id for id in self._ids if not id]) # new ids
4749 existing = self.browse(ids)
4750 if len(existing) < len(self):
4751 # mark missing records in cache with a failed value
4752 exc = MissingError(_("Record does not exist or has been deleted."))
4753 (self - existing)._cache.update(FailedValue(exc))
4756 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4757 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4759 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4760 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4761 return self._check_recursion(cr, uid, ids, context, parent)
4763 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4765 Verifies that there is no loop in a hierarchical structure of records,
4766 by following the parent relationship using the **parent** field until a loop
4767 is detected or until a top-level record is found.
4769 :param cr: database cursor
4770 :param uid: current user id
4771 :param ids: list of ids of records to check
4772 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4773 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4776 parent = self._parent_name
4778 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4779 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4782 while current_id is not None:
4783 cr.execute(query, (current_id,))
4784 result = cr.fetchone()
4785 current_id = result[0] if result else None
4786 if current_id == id:
4790 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4792 Verifies that there is no loop in a hierarchical structure of records,
4793 by following the parent relationship using the **parent** field until a loop
4794 is detected or until a top-level record is found.
4796 :param cr: database cursor
4797 :param uid: current user id
4798 :param ids: list of ids of records to check
4799 :param field_name: field to check
4800 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4803 field = self._fields.get(field_name)
4804 if not (field and field.type == 'many2many' and
4805 field.comodel_name == self._name and field.store):
4806 # field must be a many2many on itself
4807 raise ValueError('invalid field_name: %r' % (field_name,))
4809 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % \
4810 (field.column2, field.relation, field.column1)
4814 for i in range(0, len(ids_parent), cr.IN_MAX):
4816 sub_ids_parent = ids_parent[i:j]
4817 cr.execute(query, (tuple(sub_ids_parent),))
4818 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4819 ids_parent = ids_parent2
4820 for i in ids_parent:
4825 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4826 """Retrieve the External ID(s) of any database record.
4828 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4830 :return: map of ids to the list of their fully qualified External IDs
4831 in the form ``module.key``, or an empty list when there's no External
4832 ID for a record, e.g.::
4834 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4837 ir_model_data = self.pool.get('ir.model.data')
4838 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4839 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4842 # can't use dict.fromkeys() as the list would be shared!
4844 for record in data_results:
4845 result[record['res_id']].append('%(module)s.%(name)s' % record)
4848 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4849 """Retrieve the External ID of any database record, if there
4850 is one. This method works as a possible implementation
4851 for a function field, to be able to add it to any
4852 model object easily, referencing it as ``Model.get_external_id``.
4854 When multiple External IDs exist for a record, only one
4855 of them is returned (randomly).
4857 :return: map of ids to their fully qualified XML ID,
4858 defaulting to an empty string when there's none
4859 (to be usable as a function field),
4862 { 'id': 'module.ext_id',
4865 results = self._get_xml_ids(cr, uid, ids)
4866 for k, v in results.iteritems():
4873 # backwards compatibility
4874 get_xml_id = get_external_id
4875 _get_xml_ids = _get_external_ids
4877 def print_report(self, cr, uid, ids, name, data, context=None):
4879 Render the report `name` for the given IDs. The report must be defined
4880 for this model, not another.
4882 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4883 assert self._name == report.table
4884 return report.create(cr, uid, ids, data, context)
4888 def is_transient(cls):
4889 """ Return whether the model is transient.
4891 See :class:`TransientModel`.
4894 return cls._transient
4896 def _transient_clean_rows_older_than(self, cr, seconds):
4897 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4898 # Never delete rows used in last 5 minutes
4899 seconds = max(seconds, 300)
4900 query = ("SELECT id FROM " + self._table + " WHERE"
4901 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4902 " < ((now() at time zone 'UTC') - interval %s)")
4903 cr.execute(query, ("%s seconds" % seconds,))
4904 ids = [x[0] for x in cr.fetchall()]
4905 self.unlink(cr, SUPERUSER_ID, ids)
4907 def _transient_clean_old_rows(self, cr, max_count):
4908 # Check how many rows we have in the table
4909 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4911 if res[0][0] <= max_count:
4912 return # max not reached, nothing to do
4913 self._transient_clean_rows_older_than(cr, 300)
4915 def _transient_vacuum(self, cr, uid, force=False):
4916 """Clean the transient records.
4918 This unlinks old records from the transient model tables whenever the
4919 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4920 Actual cleaning will happen only once every "_transient_check_time" calls.
4921 This means this method can be called frequently called (e.g. whenever
4922 a new record is created).
4923 Example with both max_hours and max_count active:
4924 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4925 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4926 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4927 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4928 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4929 would immediately cause the maximum to be reached again.
4930 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4932 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4933 _transient_check_time = 20 # arbitrary limit on vacuum executions
4934 self._transient_check_count += 1
4935 if not force and (self._transient_check_count < _transient_check_time):
4936 return True # no vacuum cleaning this time
4937 self._transient_check_count = 0
4939 # Age-based expiration
4940 if self._transient_max_hours:
4941 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4943 # Count-based expiration
4944 if self._transient_max_count:
4945 self._transient_clean_old_rows(cr, self._transient_max_count)
4949 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4950 """ Serializes one2many and many2many commands into record dictionaries
4951 (as if all the records came from the database via a read()). This
4952 method is aimed at onchange methods on one2many and many2many fields.
4954 Because commands might be creation commands, not all record dicts
4955 will contain an ``id`` field. Commands matching an existing record
4956 will have an ``id``.
4958 :param field_name: name of the one2many or many2many field matching the commands
4959 :type field_name: str
4960 :param commands: one2many or many2many commands to execute on ``field_name``
4961 :type commands: list((int|False, int|False, dict|False))
4962 :param fields: list of fields to read from the database, when applicable
4963 :type fields: list(str)
4964 :returns: records in a shape similar to that returned by ``read()``
4965 (except records may be missing the ``id`` field if they don't exist in db)
4968 result = [] # result (list of dict)
4969 record_ids = [] # ids of records to read
4970 updates = {} # {id: dict} of updates on particular records
4972 for command in commands or []:
4973 if not isinstance(command, (list, tuple)):
4974 record_ids.append(command)
4975 elif command[0] == 0:
4976 result.append(command[2])
4977 elif command[0] == 1:
4978 record_ids.append(command[1])
4979 updates.setdefault(command[1], {}).update(command[2])
4980 elif command[0] in (2, 3):
4981 record_ids = [id for id in record_ids if id != command[1]]
4982 elif command[0] == 4:
4983 record_ids.append(command[1])
4984 elif command[0] == 5:
4985 result, record_ids = [], []
4986 elif command[0] == 6:
4987 result, record_ids = [], list(command[2])
4989 # read the records and apply the updates
4990 other_model = self.pool[self._fields[field_name].comodel_name]
4991 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4992 record.update(updates.get(record['id'], {}))
4993 result.append(record)
4997 # for backward compatibility
4998 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5000 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5002 Performs a ``search()`` followed by a ``read()``.
5004 :param cr: database cursor
5005 :param user: current user id
5006 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5007 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5008 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5009 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5010 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5011 :param context: context arguments.
5012 :return: List of dictionaries containing the asked fields.
5013 :rtype: List of dictionaries.
5016 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5020 if fields and fields == ['id']:
5021 # shortcut read if we only want the ids
5022 return [{'id': id} for id in record_ids]
5024 # read() ignores active_test, but it would forward it to any downstream search call
5025 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5026 # was presumably only meant for the main search().
5027 # TODO: Move this to read() directly?
5028 read_ctx = dict(context or {})
5029 read_ctx.pop('active_test', None)
5031 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5032 if len(result) <= 1:
5036 index = dict((r['id'], r) for r in result)
5037 return [index[x] for x in record_ids if x in index]
5039 def _register_hook(self, cr):
5040 """ stuff to do right after the registry is built """
5044 def _patch_method(cls, name, method):
5045 """ Monkey-patch a method for all instances of this model. This replaces
5046 the method called `name` by `method` in the given class.
5047 The original method is then accessible via ``method.origin``, and it
5048 can be restored with :meth:`~._revert_method`.
5053 def do_write(self, values):
5054 # do stuff, and call the original method
5055 return do_write.origin(self, values)
5057 # patch method write of model
5058 model._patch_method('write', do_write)
5060 # this will call do_write
5061 records = model.search([...])
5064 # restore the original method
5065 model._revert_method('write')
5067 origin = getattr(cls, name)
5068 method.origin = origin
5069 # propagate decorators from origin to method, and apply api decorator
5070 wrapped = api.guess(api.propagate(origin, method))
5071 wrapped.origin = origin
5072 setattr(cls, name, wrapped)
5075 def _revert_method(cls, name):
5076 """ Revert the original method called `name` in the given class.
5077 See :meth:`~._patch_method`.
5079 method = getattr(cls, name)
5080 setattr(cls, name, method.origin)
5085 # An instance represents an ordered collection of records in a given
5086 # execution environment. The instance object refers to the environment, and
5087 # the records themselves are represented by their cache dictionary. The 'id'
5088 # of each record is found in its corresponding cache dictionary.
5090 # This design has the following advantages:
5091 # - cache access is direct and thus fast;
5092 # - one can consider records without an 'id' (see new records);
5093 # - the global cache is only an index to "resolve" a record 'id'.
5097 def _browse(cls, env, ids):
5098 """ Create an instance attached to `env`; `ids` is a tuple of record
5101 records = object.__new__(cls)
5104 env.prefetch[cls._name].update(ids)
5108 def browse(self, cr, uid, arg=None, context=None):
5109 ids = _normalize_ids(arg)
5110 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5111 return self._browse(Environment(cr, uid, context or {}), ids)
5114 def browse(self, arg=None):
5115 """ browse([ids]) -> records
5117 Returns a recordset for the ids provided as parameter in the current
5120 Can take no ids, a single id or a sequence of ids.
5122 ids = _normalize_ids(arg)
5123 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5124 return self._browse(self.env, ids)
5127 # Internal properties, for manipulating the instance's implementation
5132 """ List of actual record ids in this recordset (ignores placeholder
5133 ids for records to create)
5135 return filter(None, list(self._ids))
5137 # backward-compatibility with former browse records
5138 _cr = property(lambda self: self.env.cr)
5139 _uid = property(lambda self: self.env.uid)
5140 _context = property(lambda self: self.env.context)
5143 # Conversion methods
5146 def ensure_one(self):
5147 """ Verifies that the current recorset holds a single record. Raises
5148 an exception otherwise.
5152 raise except_orm("ValueError", "Expected singleton: %s" % self)
5154 def with_env(self, env):
5155 """ Returns a new version of this recordset attached to the provided
5158 :type env: :class:`~openerp.api.Environment`
5160 return self._browse(env, self._ids)
5162 def sudo(self, user=SUPERUSER_ID):
5163 """ sudo([user=SUPERUSER])
5165 Returns a new version of this recordset attached to the provided
5168 return self.with_env(self.env(user=user))
5170 def with_context(self, *args, **kwargs):
5171 """ with_context([context][, **overrides]) -> records
5173 Returns a new version of this recordset attached to an extended
5176 The extended context is either the provided ``context`` in which
5177 ``overrides`` are merged or the *current* context in which
5178 ``overrides`` are merged e.g.::
5180 # current context is {'key1': True}
5181 r2 = records.with_context({}, key2=True)
5182 # -> r2._context is {'key2': True}
5183 r2 = records.with_context(key2=True)
5184 # -> r2._context is {'key1': True, 'key2': True}
5186 context = dict(args[0] if args else self._context, **kwargs)
5187 return self.with_env(self.env(context=context))
5189 def _convert_to_cache(self, values, update=False, validate=True):
5190 """ Convert the `values` dictionary into cached values.
5192 :param update: whether the conversion is made for updating `self`;
5193 this is necessary for interpreting the commands of *2many fields
5194 :param validate: whether values must be checked
5196 fields = self._fields
5197 target = self if update else self.browse()
5199 name: fields[name].convert_to_cache(value, target, validate=validate)
5200 for name, value in values.iteritems()
5204 def _convert_to_write(self, values):
5205 """ Convert the `values` dictionary into the format of :meth:`write`. """
5206 fields = self._fields
5208 for name, value in values.iteritems():
5210 value = fields[name].convert_to_write(value)
5211 if not isinstance(value, NewId):
5212 result[name] = value
5216 # Record traversal and update
5219 def _mapped_func(self, func):
5220 """ Apply function `func` on all records in `self`, and return the
5221 result as a list or a recordset (if `func` return recordsets).
5223 vals = [func(rec) for rec in self]
5224 val0 = vals[0] if vals else func(self)
5225 if isinstance(val0, BaseModel):
5226 return reduce(operator.or_, vals, val0)
5229 def mapped(self, func):
5230 """ Apply `func` on all records in `self`, and return the result as a
5231 list or a recordset (if `func` return recordsets). In the latter
5232 case, the order of the returned recordset is arbritrary.
5234 :param func: a function or a dot-separated sequence of field names
5236 if isinstance(func, basestring):
5238 for name in func.split('.'):
5239 recs = recs._mapped_func(operator.itemgetter(name))
5242 return self._mapped_func(func)
5244 def _mapped_cache(self, name_seq):
5245 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5246 field names, and only cached values are used.
5249 for name in name_seq.split('.'):
5250 field = recs._fields[name]
5251 null = field.null(self.env)
5252 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5255 def filtered(self, func):
5256 """ Select the records in `self` such that `func(rec)` is true, and
5257 return them as a recordset.
5259 :param func: a function or a dot-separated sequence of field names
5261 if isinstance(func, basestring):
5263 func = lambda rec: filter(None, rec.mapped(name))
5264 return self.browse([rec.id for rec in self if func(rec)])
5266 def sorted(self, key=None):
5267 """ Return the recordset `self` ordered by `key` """
5269 return self.search([('id', 'in', self.ids)])
5271 return self.browse(map(int, sorted(self, key=key)))
5273 def update(self, values):
5274 """ Update record `self[0]` with `values`. """
5275 for name, value in values.iteritems():
5279 # New records - represent records that do not exist in the database yet;
5280 # they are used to perform onchanges.
5284 def new(self, values={}):
5285 """ new([values]) -> record
5287 Return a new record instance attached to the current environment and
5288 initialized with the provided ``value``. The record is *not* created
5289 in database, it only exists in memory.
5291 record = self.browse([NewId()])
5292 record._cache.update(record._convert_to_cache(values, update=True))
5294 if record.env.in_onchange:
5295 # The cache update does not set inverse fields, so do it manually.
5296 # This is useful for computing a function field on secondary
5297 # records, if that field depends on the main record.
5299 field = self._fields.get(name)
5301 for invf in field.inverse_fields:
5302 invf._update(record[name], record)
5307 # Dirty flag, to mark records modified (in draft mode)
5312 """ Return whether any record in `self` is dirty. """
5313 dirty = self.env.dirty
5314 return any(record in dirty for record in self)
5317 def _dirty(self, value):
5318 """ Mark the records in `self` as dirty. """
5320 map(self.env.dirty.add, self)
5322 map(self.env.dirty.discard, self)
5328 def __nonzero__(self):
5329 """ Test whether `self` is nonempty. """
5330 return bool(getattr(self, '_ids', True))
5333 """ Return the size of `self`. """
5334 return len(self._ids)
5337 """ Return an iterator over `self`. """
5338 for id in self._ids:
5339 yield self._browse(self.env, (id,))
5341 def __contains__(self, item):
5342 """ Test whether `item` (record or field name) is an element of `self`.
5343 In the first case, the test is fully equivalent to::
5345 any(item == record for record in self)
5347 if isinstance(item, BaseModel) and self._name == item._name:
5348 return len(item) == 1 and item.id in self._ids
5349 elif isinstance(item, basestring):
5350 return item in self._fields
5352 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5354 def __add__(self, other):
5355 """ Return the concatenation of two recordsets. """
5356 if not isinstance(other, BaseModel) or self._name != other._name:
5357 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5358 return self.browse(self._ids + other._ids)
5360 def __sub__(self, other):
5361 """ Return the recordset of all the records in `self` that are not in `other`. """
5362 if not isinstance(other, BaseModel) or self._name != other._name:
5363 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5364 other_ids = set(other._ids)
5365 return self.browse([id for id in self._ids if id not in other_ids])
5367 def __and__(self, other):
5368 """ Return the intersection of two recordsets.
5369 Note that recordset order is not preserved.
5371 if not isinstance(other, BaseModel) or self._name != other._name:
5372 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5373 return self.browse(set(self._ids) & set(other._ids))
5375 def __or__(self, other):
5376 """ Return the union of two recordsets.
5377 Note that recordset order is not preserved.
5379 if not isinstance(other, BaseModel) or self._name != other._name:
5380 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5381 return self.browse(set(self._ids) | set(other._ids))
5383 def __eq__(self, other):
5384 """ Test whether two recordsets are equivalent (up to reordering). """
5385 if not isinstance(other, BaseModel):
5387 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5389 return self._name == other._name and set(self._ids) == set(other._ids)
5391 def __ne__(self, other):
5392 return not self == other
5394 def __lt__(self, other):
5395 if not isinstance(other, BaseModel) or self._name != other._name:
5396 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5397 return set(self._ids) < set(other._ids)
5399 def __le__(self, other):
5400 if not isinstance(other, BaseModel) or self._name != other._name:
5401 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5402 return set(self._ids) <= set(other._ids)
5404 def __gt__(self, other):
5405 if not isinstance(other, BaseModel) or self._name != other._name:
5406 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5407 return set(self._ids) > set(other._ids)
5409 def __ge__(self, other):
5410 if not isinstance(other, BaseModel) or self._name != other._name:
5411 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5412 return set(self._ids) >= set(other._ids)
5418 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5420 def __unicode__(self):
5421 return unicode(str(self))
5426 if hasattr(self, '_ids'):
5427 return hash((self._name, frozenset(self._ids)))
5429 return hash(self._name)
5431 def __getitem__(self, key):
5432 """ If `key` is an integer or a slice, return the corresponding record
5433 selection as an instance (attached to `self.env`).
5434 Otherwise read the field `key` of the first record in `self`.
5438 inst = model.search(dom) # inst is a recordset
5439 r4 = inst[3] # fourth record in inst
5440 rs = inst[10:20] # subset of inst
5441 nm = rs['name'] # name of first record in inst
5443 if isinstance(key, basestring):
5444 # important: one must call the field's getter
5445 return self._fields[key].__get__(self, type(self))
5446 elif isinstance(key, slice):
5447 return self._browse(self.env, self._ids[key])
5449 return self._browse(self.env, (self._ids[key],))
5451 def __setitem__(self, key, value):
5452 """ Assign the field `key` to `value` in record `self`. """
5453 # important: one must call the field's setter
5454 return self._fields[key].__set__(self, value)
5457 # Cache and recomputation management
5462 """ Return the cache of `self`, mapping field names to values. """
5463 return RecordCache(self)
5466 def _in_cache_without(self, field):
5467 """ Make sure `self` is present in cache (for prefetching), and return
5468 the records of model `self` in cache that have no value for `field`
5469 (:class:`Field` instance).
5472 prefetch_ids = env.prefetch[self._name]
5473 prefetch_ids.update(self._ids)
5474 ids = filter(None, prefetch_ids - set(env.cache[field]))
5475 return self.browse(ids)
5479 """ Clear the records cache.
5482 The record cache is automatically invalidated.
5484 self.invalidate_cache()
5487 def invalidate_cache(self, fnames=None, ids=None):
5488 """ Invalidate the record caches after some records have been modified.
5489 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5491 :param fnames: the list of modified fields, or ``None`` for all fields
5492 :param ids: the list of modified record ids, or ``None`` for all
5496 return self.env.invalidate_all()
5497 fields = self._fields.values()
5499 fields = map(self._fields.__getitem__, fnames)
5501 # invalidate fields and inverse fields, too
5502 spec = [(f, ids) for f in fields] + \
5503 [(invf, None) for f in fields for invf in f.inverse_fields]
5504 self.env.invalidate(spec)
5507 def modified(self, fnames):
5508 """ Notify that fields have been modified on `self`. This invalidates
5509 the cache, and prepares the recomputation of stored function fields
5510 (new-style fields only).
5512 :param fnames: iterable of field names that have been modified on
5515 # each field knows what to invalidate and recompute
5517 for fname in fnames:
5518 spec += self._fields[fname].modified(self)
5522 for env in self.env.all
5523 for field in env.cache
5525 # invalidate non-stored fields.function which are currently cached
5526 spec += [(f, None) for f in self.pool.pure_function_fields
5527 if f in cached_fields]
5529 self.env.invalidate(spec)
5531 def _recompute_check(self, field):
5532 """ If `field` must be recomputed on some record in `self`, return the
5533 corresponding records that must be recomputed.
5535 return self.env.check_todo(field, self)
5537 def _recompute_todo(self, field):
5538 """ Mark `field` to be recomputed. """
5539 self.env.add_todo(field, self)
5541 def _recompute_done(self, field):
5542 """ Mark `field` as recomputed. """
5543 self.env.remove_todo(field, self)
5546 def recompute(self):
5547 """ Recompute stored function fields. The fields and records to
5548 recompute have been determined by method :meth:`modified`.
5550 while self.env.has_todo():
5551 field, recs = self.env.get_todo()
5552 # evaluate the fields to recompute, and save them to database
5553 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5555 values = rec._convert_to_write({
5556 f.name: rec[f.name] for f in field.computed_fields
5559 except MissingError:
5561 # mark the computed fields as done
5562 map(recs._recompute_done, field.computed_fields)
5565 # Generic onchange method
5568 def _has_onchange(self, field, other_fields):
5569 """ Return whether `field` should trigger an onchange event in the
5570 presence of `other_fields`.
5572 # test whether self has an onchange method for field, or field is a
5573 # dependency of any field in other_fields
5574 return field.name in self._onchange_methods or \
5575 any(dep in other_fields for dep in field.dependents)
5578 def _onchange_spec(self, view_info=None):
5579 """ Return the onchange spec from a view description; if not given, the
5580 result of ``self.fields_view_get()`` is used.
5584 # for traversing the XML arch and populating result
5585 def process(node, info, prefix):
5586 if node.tag == 'field':
5587 name = node.attrib['name']
5588 names = "%s.%s" % (prefix, name) if prefix else name
5589 if not result.get(names):
5590 result[names] = node.attrib.get('on_change')
5591 # traverse the subviews included in relational fields
5592 for subinfo in info['fields'][name].get('views', {}).itervalues():
5593 process(etree.fromstring(subinfo['arch']), subinfo, names)
5596 process(child, info, prefix)
5598 if view_info is None:
5599 view_info = self.fields_view_get()
5600 process(etree.fromstring(view_info['arch']), view_info, '')
5603 def _onchange_eval(self, field_name, onchange, result):
5604 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5605 on record `self`. Value assignments are applied on `self`, while
5606 domain and warning messages are put in dictionary `result`.
5608 onchange = onchange.strip()
5611 if onchange in ("1", "true"):
5612 for method in self._onchange_methods.get(field_name, ()):
5613 method_res = method(self)
5616 if 'domain' in method_res:
5617 result.setdefault('domain', {}).update(method_res['domain'])
5618 if 'warning' in method_res:
5619 result['warning'] = method_res['warning']
5623 match = onchange_v7.match(onchange)
5625 method, params = match.groups()
5627 # evaluate params -> tuple
5628 global_vars = {'context': self._context, 'uid': self._uid}
5629 if self._context.get('field_parent'):
5630 class RawRecord(object):
5631 def __init__(self, record):
5632 self._record = record
5633 def __getattr__(self, name):
5634 field = self._record._fields[name]
5635 value = self._record[name]
5636 return field.convert_to_onchange(value)
5637 record = self[self._context['field_parent']]
5638 global_vars['parent'] = RawRecord(record)
5640 key: self._fields[key].convert_to_onchange(val)
5641 for key, val in self._cache.iteritems()
5643 params = eval("[%s]" % params, global_vars, field_vars)
5645 # call onchange method
5646 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5647 method_res = getattr(self._model, method)(*args)
5648 if not isinstance(method_res, dict):
5650 if 'value' in method_res:
5651 method_res['value'].pop('id', None)
5652 self.update(self._convert_to_cache(method_res['value'], validate=False))
5653 if 'domain' in method_res:
5654 result.setdefault('domain', {}).update(method_res['domain'])
5655 if 'warning' in method_res:
5656 result['warning'] = method_res['warning']
5659 def onchange(self, values, field_name, field_onchange):
5660 """ Perform an onchange on the given field.
5662 :param values: dictionary mapping field names to values, giving the
5663 current state of modification
5664 :param field_name: name of the modified field_name
5665 :param field_onchange: dictionary mapping field names to their
5670 if field_name and field_name not in self._fields:
5673 # determine subfields for field.convert_to_write() below
5675 subfields = defaultdict(set)
5676 for dotname in field_onchange:
5678 secondary.append(dotname)
5679 name, subname = dotname.split('.')
5680 subfields[name].add(subname)
5682 # create a new record with values, and attach `self` to it
5683 with env.do_in_onchange():
5684 record = self.new(values)
5685 values = dict(record._cache)
5686 # attach `self` with a different context (for cache consistency)
5687 record._origin = self.with_context(__onchange=True)
5689 # determine which field should be triggered an onchange
5690 todo = set([field_name]) if field_name else set(values)
5693 # dummy assignment: trigger invalidations on the record
5695 value = record[name]
5696 field = self._fields[name]
5697 if not field_name and field.type == 'many2one' and field.delegate and not value:
5698 # do not nullify all fields of parent record for new records
5700 record[name] = value
5702 result = {'value': {}}
5710 with env.do_in_onchange():
5711 # apply field-specific onchange methods
5712 if field_onchange.get(name):
5713 record._onchange_eval(name, field_onchange[name], result)
5715 # force re-evaluation of function fields on secondary records
5716 for field_seq in secondary:
5717 record.mapped(field_seq)
5719 # determine which fields have been modified
5720 for name, oldval in values.iteritems():
5721 field = self._fields[name]
5722 newval = record[name]
5723 if field.type in ('one2many', 'many2many'):
5724 if newval != oldval or newval._dirty:
5725 # put new value in result
5726 result['value'][name] = field.convert_to_write(
5727 newval, record._origin, subfields.get(name),
5731 # keep result: newval may have been dirty before
5734 if newval != oldval:
5735 # put new value in result
5736 result['value'][name] = field.convert_to_write(
5737 newval, record._origin, subfields.get(name),
5741 # clean up result to not return another value
5742 result['value'].pop(name, None)
5744 # At the moment, the client does not support updates on a *2many field
5745 # while this one is modified by the user.
5746 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5747 result['value'].pop(field_name, None)
5752 class RecordCache(MutableMapping):
5753 """ Implements a proxy dictionary to read/update the cache of a record.
5754 Upon iteration, it looks like a dictionary mapping field names to
5755 values. However, fields may be used as keys as well.
5757 def __init__(self, records):
5758 self._recs = records
5760 def contains(self, field):
5761 """ Return whether `records[0]` has a value for `field` in cache. """
5762 if isinstance(field, basestring):
5763 field = self._recs._fields[field]
5764 return self._recs.id in self._recs.env.cache[field]
5766 def __contains__(self, field):
5767 """ Return whether `records[0]` has a regular value for `field` in cache. """
5768 if isinstance(field, basestring):
5769 field = self._recs._fields[field]
5770 dummy = SpecialValue(None)
5771 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5772 return not isinstance(value, SpecialValue)
5774 def __getitem__(self, field):
5775 """ Return the cached value of `field` for `records[0]`. """
5776 if isinstance(field, basestring):
5777 field = self._recs._fields[field]
5778 value = self._recs.env.cache[field][self._recs.id]
5779 return value.get() if isinstance(value, SpecialValue) else value
5781 def __setitem__(self, field, value):
5782 """ Assign the cached value of `field` for all records in `records`. """
5783 if isinstance(field, basestring):
5784 field = self._recs._fields[field]
5785 values = dict.fromkeys(self._recs._ids, value)
5786 self._recs.env.cache[field].update(values)
5788 def update(self, *args, **kwargs):
5789 """ Update the cache of all records in `records`. If the argument is a
5790 `SpecialValue`, update all fields (except "magic" columns).
5792 if args and isinstance(args[0], SpecialValue):
5793 values = dict.fromkeys(self._recs._ids, args[0])
5794 for name, field in self._recs._fields.iteritems():
5796 self._recs.env.cache[field].update(values)
5798 return super(RecordCache, self).update(*args, **kwargs)
5800 def __delitem__(self, field):
5801 """ Remove the cached value of `field` for all `records`. """
5802 if isinstance(field, basestring):
5803 field = self._recs._fields[field]
5804 field_cache = self._recs.env.cache[field]
5805 for id in self._recs._ids:
5806 field_cache.pop(id, None)
5809 """ Iterate over the field names with a regular value in cache. """
5810 cache, id = self._recs.env.cache, self._recs.id
5811 dummy = SpecialValue(None)
5812 for name, field in self._recs._fields.iteritems():
5813 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5817 """ Return the number of fields with a regular value in cache. """
5818 return sum(1 for name in self)
5820 class Model(BaseModel):
5821 """Main super-class for regular database-persisted OpenERP models.
5823 OpenERP models are created by inheriting from this class::
5828 The system will later instantiate the class once per database (on
5829 which the class' module is installed).
5832 _register = False # not visible in ORM registry, meant to be python-inherited only
5833 _transient = False # True in a TransientModel
5835 class TransientModel(BaseModel):
5836 """Model super-class for transient records, meant to be temporarily
5837 persisted, and regularly vaccuum-cleaned.
5839 A TransientModel has a simplified access rights management,
5840 all users can create new records, and may only access the
5841 records they created. The super-user has unrestricted access
5842 to all TransientModel records.
5845 _register = False # not visible in ORM registry, meant to be python-inherited only
5848 class AbstractModel(BaseModel):
5849 """Abstract Model super-class for creating an abstract class meant to be
5850 inherited by regular models (Models or TransientModels) but not meant to
5851 be usable on its own, or persisted.
5853 Technical note: we don't want to make AbstractModel the super-class of
5854 Model or BaseModel because it would not make sense to put the main
5855 definition of persistence methods such as create() in it, and still we
5856 should be able to override them within an AbstractModel.
5858 _auto = False # don't create any database backend for AbstractModels
5859 _register = False # not visible in ORM registry, meant to be python-inherited only
5862 def itemgetter_tuple(items):
5863 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5864 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5869 return lambda gettable: (gettable[items[0]],)
5870 return operator.itemgetter(*items)
5872 def convert_pgerror_23502(model, fields, info, e):
5873 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5874 r'not-null constraint\n',
5876 field_name = m and m.group('field')
5877 if not m or field_name not in fields:
5878 return {'message': unicode(e)}
5879 message = _(u"Missing required value for the field '%s'.") % field_name
5880 field = fields.get(field_name)
5882 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5885 'field': field_name,
5888 def convert_pgerror_23505(model, fields, info, e):
5889 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5891 field_name = m and m.group('field')
5892 if not m or field_name not in fields:
5893 return {'message': unicode(e)}
5894 message = _(u"The value for the field '%s' already exists.") % field_name
5895 field = fields.get(field_name)
5897 message = _(u"%s This might be '%s' in the current model, or a field "
5898 u"of the same name in an o2m.") % (message, field['string'])
5901 'field': field_name,
5904 PGERROR_TO_OE = defaultdict(
5905 # shape of mapped converters
5906 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5907 # not_null_violation
5908 '23502': convert_pgerror_23502,
5909 # unique constraint error
5910 '23505': convert_pgerror_23505,
5913 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5914 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5916 Various implementations were tested on the corpus of all browse() calls
5917 performed during a full crawler run (after having installed all website_*
5918 modules) and this one was the most efficient overall.
5920 A possible bit of correctness was sacrificed by not doing any test on
5921 Iterable and just assuming that any non-atomic type was an iterable of
5926 # much of the corpus is falsy objects (empty list, tuple or set, None)
5930 # `type in set` is significantly faster (because more restrictive) than
5931 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5932 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5933 # (and looks much worse) in most cases, but over millions of calls it
5934 # does have a very minor effect.
5935 if arg.__class__ in atoms:
5940 # keep those imports here to avoid dependency cycle errors
5941 from .osv import expression
5942 from .fields import Field, SpecialValue, FailedValue
5944 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: