1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
51 from collections import defaultdict, MutableMapping
52 from inspect import getmembers
55 import dateutil.relativedelta
57 from lxml import etree
60 from . import SUPERUSER_ID
63 from .api import Environment
64 from .exceptions import except_orm, AccessError, MissingError, ValidationError
65 from .osv import fields
66 from .osv.query import Query
67 from .tools import lazy_property, ormcache
68 from .tools.config import config
69 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
70 from .tools.safe_eval import safe_eval as eval
71 from .tools.translate import _
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
77 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
78 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
80 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def check_object_name(name):
84 """ Check if the given name is a valid openerp object name.
86 The _name attribute in osv and osv_memory object is subject to
87 some restrictions. This function returns True or False whether
88 the given name is allowed or not.
90 TODO: this is an approximation. The goal in this approximation
91 is to disallow uppercase characters (in some places, we quote
92 table/column names and in other not, which leads to this kind
95 psycopg2.ProgrammingError: relation "xxx" does not exist).
97 The same restriction should apply to both osv and osv_memory
98 objects for consistency.
101 if regex_object_name.match(name) is None:
105 def raise_on_invalid_object_name(name):
106 if not check_object_name(name):
107 msg = "The _name attribute %s is not valid." % name
109 raise except_orm('ValueError', msg)
111 POSTGRES_CONFDELTYPES = {
119 def intersect(la, lb):
120 return filter(lambda x: x in lb, la)
123 """ Test whether functions `f` and `g` are identical or have the same name """
124 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
126 def fix_import_export_id_paths(fieldname):
128 Fixes the id fields in import and exports, and splits field paths
131 :param str fieldname: name of the field to import/export
132 :return: split field name
135 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
136 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
137 return fixed_external_id.split('/')
139 def pg_varchar(size=0):
140 """ Returns the VARCHAR declaration for the provided size:
142 * If no size (or an empty or negative size is provided) return an
144 * Otherwise return a VARCHAR(n)
146 :type int size: varchar size, optional
150 if not isinstance(size, int):
151 raise TypeError("VARCHAR parameter should be an int, got %s"
154 return 'VARCHAR(%d)' % size
157 FIELDS_TO_PGTYPES = {
158 fields.boolean: 'bool',
159 fields.integer: 'int4',
163 fields.datetime: 'timestamp',
164 fields.binary: 'bytea',
165 fields.many2one: 'int4',
166 fields.serialized: 'text',
169 def get_pg_type(f, type_override=None):
171 :param fields._column f: field to get a Postgres type for
172 :param type type_override: use the provided type for dispatching instead of the field's own type
173 :returns: (postgres_identification_type, postgres_type_specification)
176 field_type = type_override or type(f)
178 if field_type in FIELDS_TO_PGTYPES:
179 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
180 elif issubclass(field_type, fields.float):
181 # Explicit support for "falsy" digits (0, False) to indicate a
182 # NUMERIC field with no fixed precision. The values will be saved
183 # in the database with all significant digits.
184 # FLOAT8 type is still the default when there is no precision because
185 # it is faster for most operations (sums, etc.)
186 if f.digits is not None:
187 pg_type = ('numeric', 'NUMERIC')
189 pg_type = ('float8', 'DOUBLE PRECISION')
190 elif issubclass(field_type, (fields.char, fields.reference)):
191 pg_type = ('varchar', pg_varchar(f.size))
192 elif issubclass(field_type, fields.selection):
193 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
194 or getattr(f, 'size', None) == -1:
195 pg_type = ('int4', 'INTEGER')
197 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
198 elif issubclass(field_type, fields.function):
199 if f._type == 'selection':
200 pg_type = ('varchar', pg_varchar())
202 pg_type = get_pg_type(f, getattr(fields, f._type))
204 _logger.warning('%s type not supported!', field_type)
210 class MetaModel(api.Meta):
211 """ Metaclass for the models.
213 This class is used as the metaclass for the class :class:`BaseModel` to
214 discover the models defined in a module (without instanciating them).
215 If the automatic discovery is not needed, it is possible to set the model's
216 ``_register`` attribute to False.
220 module_to_models = {}
222 def __init__(self, name, bases, attrs):
223 if not self._register:
224 self._register = True
225 super(MetaModel, self).__init__(name, bases, attrs)
228 if not hasattr(self, '_module'):
229 # The (OpenERP) module name can be in the `openerp.addons` namespace
230 # or not. For instance, module `sale` can be imported as
231 # `openerp.addons.sale` (the right way) or `sale` (for backward
233 module_parts = self.__module__.split('.')
234 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
235 module_name = self.__module__.split('.')[2]
237 module_name = self.__module__.split('.')[0]
238 self._module = module_name
240 # Remember which models to instanciate for this module.
242 self.module_to_models.setdefault(self._module, []).append(self)
244 # transform columns into new-style fields (enables field inheritance)
245 for name, column in self._columns.iteritems():
246 if name in self.__dict__:
247 _logger.warning("In class %s, field %r overriding an existing value", self, name)
248 setattr(self, name, column.to_field())
252 """ Pseudo-ids for new records. """
253 def __nonzero__(self):
256 IdType = (int, long, basestring, NewId)
259 # maximum number of prefetched records
262 # special columns automatically created by the ORM
263 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
264 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
266 class BaseModel(object):
267 """ Base class for OpenERP models.
269 OpenERP models are created by inheriting from this class' subclasses:
271 * :class:`Model` for regular database-persisted models
273 * :class:`TransientModel` for temporary data, stored in the database but
274 automatically vaccuumed every so often
276 * :class:`AbstractModel` for abstract super classes meant to be shared by
277 multiple inheriting model
279 The system automatically instantiates every model once per database. Those
280 instances represent the available models on each database, and depend on
281 which modules are installed on that database. The actual class of each
282 instance is built from the Python classes that create and inherit from the
285 Every model instance is a "recordset", i.e., an ordered collection of
286 records of the model. Recordsets are returned by methods like
287 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
288 explicit representation: a record is represented as a recordset of one
291 To create a class that should not be instantiated, the _register class
292 attribute may be set to False.
294 __metaclass__ = MetaModel
295 _auto = True # create database backend
296 _register = False # Set to false if the model shouldn't be automatically discovered.
303 _parent_name = 'parent_id'
304 _parent_store = False
305 _parent_order = False
311 _translate = True # set to False to disable translations export for this model
313 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
314 # to include in the _read_group, if grouped on this field
318 _transient = False # True in a TransientModel
321 # { 'parent_model': 'm2o_field', ... }
324 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
325 # model from which it is inherits'd, r is the (local) field towards m, f
326 # is the _column object itself, and n is the original (i.e. top-most)
329 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
330 # field_column_obj, origina_parent_model), ... }
333 # Mapping field name/column_info object
334 # This is similar to _inherit_fields but:
335 # 1. includes self fields,
336 # 2. uses column_info instead of a triple.
337 # Warning: _all_columns is deprecated, use _fields instead
342 _sql_constraints = []
344 # model dependencies, for models backed up by sql views:
345 # {model_name: field_names, ...}
348 CONCURRENCY_CHECK_FIELD = '__last_update'
350 def log(self, cr, uid, id, message, secondary=False, context=None):
351 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
353 def view_init(self, cr, uid, fields_list, context=None):
354 """Override this method to do specific things when a view on the object is opened."""
357 def _field_create(self, cr, context=None):
358 """ Create entries in ir_model_fields for all the model's fields.
360 If necessary, also create an entry in ir_model, and if called from the
361 modules loading scheme (by receiving 'module' in the context), also
362 create entries in ir_model_data (for the model and the fields).
364 - create an entry in ir_model (if there is not already one),
365 - create an entry in ir_model_data (if there is not already one, and if
366 'module' is in the context),
367 - update ir_model_fields with the fields found in _columns
368 (TODO there is some redundancy as _columns is updated from
369 ir_model_fields in __init__).
374 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
376 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
377 model_id = cr.fetchone()[0]
378 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
380 model_id = cr.fetchone()[0]
381 if 'module' in context:
382 name_id = 'model_'+self._name.replace('.', '_')
383 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
385 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
386 (name_id, context['module'], 'ir.model', model_id)
389 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
391 for rec in cr.dictfetchall():
392 cols[rec['name']] = rec
394 ir_model_fields_obj = self.pool.get('ir.model.fields')
396 # sparse field should be created at the end, as it depends on its serialized field already existing
397 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
398 for (k, f) in model_fields:
400 'model_id': model_id,
403 'field_description': f.string,
405 'relation': f._obj or '',
406 'select_level': tools.ustr(int(f.select)),
407 'readonly': (f.readonly and 1) or 0,
408 'required': (f.required and 1) or 0,
409 'selectable': (f.selectable and 1) or 0,
410 'translate': (f.translate and 1) or 0,
411 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
412 'serialization_field_id': None,
414 if getattr(f, 'serialization_field', None):
415 # resolve link to serialization_field if specified by name
416 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
417 if not serialization_field_id:
418 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
419 vals['serialization_field_id'] = serialization_field_id[0]
421 # When its a custom field,it does not contain f.select
422 if context.get('field_state', 'base') == 'manual':
423 if context.get('field_name', '') == k:
424 vals['select_level'] = context.get('select', '0')
425 #setting value to let the problem NOT occur next time
427 vals['select_level'] = cols[k]['select_level']
430 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
431 id = cr.fetchone()[0]
433 cr.execute("""INSERT INTO ir_model_fields (
434 id, model_id, model, name, field_description, ttype,
435 relation,state,select_level,relation_field, translate, serialization_field_id
437 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
439 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
440 vals['relation'], 'base',
441 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
443 if 'module' in context:
444 name1 = 'field_' + self._table + '_' + k
445 cr.execute("select name from ir_model_data where name=%s", (name1,))
447 name1 = name1 + "_" + str(id)
448 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
449 (name1, context['module'], 'ir.model.fields', id)
452 for key, val in vals.items():
453 if cols[k][key] != vals[key]:
454 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
455 cr.execute("""UPDATE ir_model_fields SET
456 model_id=%s, field_description=%s, ttype=%s, relation=%s,
457 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
459 model=%s AND name=%s""", (
460 vals['model_id'], vals['field_description'], vals['ttype'],
462 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
465 self.invalidate_cache(cr, SUPERUSER_ID)
468 def _add_field(cls, name, field):
469 """ Add the given `field` under the given `name` in the class """
470 # add field as an attribute and in cls._fields (for reflection)
471 if not isinstance(getattr(cls, name, field), Field):
472 _logger.warning("In model %r, field %r overriding existing value", cls._name, name)
473 setattr(cls, name, field)
474 cls._fields[name] = field
476 # basic setup of field
477 field.set_class_name(cls, name)
479 if field.store or field.column:
480 cls._columns[name] = field.to_column()
482 # remove potential column that may be overridden by field
483 cls._columns.pop(name, None)
486 def _pop_field(cls, name):
487 """ Remove the field with the given `name` from the model.
488 This method should only be used for manual fields.
490 field = cls._fields.pop(name)
491 cls._columns.pop(name, None)
492 cls._all_columns.pop(name, None)
493 if hasattr(cls, name):
498 def _add_magic_fields(cls):
499 """ Introduce magic fields on the current class
501 * id is a "normal" field (with a specific getter)
502 * create_uid, create_date, write_uid and write_date have become
504 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
505 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
506 to get the same structure as the previous
507 ``(now() at time zone 'UTC')::timestamp``::
509 # select (now() at time zone 'UTC')::timestamp;
511 ----------------------------
512 2013-06-18 08:30:37.292809
514 >>> str(datetime.datetime.utcnow())
515 '2013-06-18 08:31:32.821177'
517 def add(name, field):
518 """ add `field` with the given `name` if it does not exist yet """
519 if name not in cls._fields:
520 cls._add_field(name, field)
525 # this field 'id' must override any other column or field
526 cls._add_field('id', fields.Id(automatic=True))
528 add('display_name', fields.Char(string='Display Name', automatic=True,
529 compute='_compute_display_name'))
532 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
533 add('create_date', fields.Datetime(string='Created on', automatic=True))
534 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
535 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
536 last_modified_name = 'compute_concurrency_field_with_access'
538 last_modified_name = 'compute_concurrency_field'
540 # this field must override any other column or field
541 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
542 string='Last Modified on', compute=last_modified_name, automatic=True))
545 def compute_concurrency_field(self):
546 self[self.CONCURRENCY_CHECK_FIELD] = \
547 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
550 @api.depends('create_date', 'write_date')
551 def compute_concurrency_field_with_access(self):
552 self[self.CONCURRENCY_CHECK_FIELD] = \
553 self.write_date or self.create_date or \
554 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
557 # Goal: try to apply inheritance at the instanciation level and
558 # put objects in the pool var
561 def _build_model(cls, pool, cr):
562 """ Instanciate a given model.
564 This class method instanciates the class of some model (i.e. a class
565 deriving from osv or osv_memory). The class might be the class passed
566 in argument or, if it inherits from another class, a class constructed
567 by combining the two classes.
571 # IMPORTANT: the registry contains an instance for each model. The class
572 # of each model carries inferred metadata that is shared among the
573 # model's instances for this registry, but not among registries. Hence
574 # we cannot use that "registry class" for combining model classes by
575 # inheritance, since it confuses the metadata inference process.
577 # Keep links to non-inherited constraints in cls; this is useful for
578 # instance when exporting translations
579 cls._local_constraints = cls.__dict__.get('_constraints', [])
580 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
582 # determine inherited models
583 parents = getattr(cls, '_inherit', [])
584 parents = [parents] if isinstance(parents, basestring) else (parents or [])
586 # determine the model's name
587 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
589 # determine the module that introduced the model
590 original_module = pool[name]._original_module if name in parents else cls._module
592 # build the class hierarchy for the model
593 for parent in parents:
594 if parent not in pool:
595 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
596 'You may need to add a dependency on the parent class\' module.' % (name, parent))
597 parent_model = pool[parent]
599 # do no use the class of parent_model, since that class contains
600 # inferred metadata; use its ancestor instead
601 parent_class = type(parent_model).__base__
603 # don't inherit custom fields
604 columns = dict((key, val)
605 for key, val in parent_class._columns.iteritems()
608 columns.update(cls._columns)
610 inherits = dict(parent_class._inherits)
611 inherits.update(cls._inherits)
613 depends = dict(parent_class._depends)
614 for m, fs in cls._depends.iteritems():
615 depends[m] = depends.get(m, []) + fs
617 old_constraints = parent_class._constraints
618 new_constraints = cls._constraints
619 # filter out from old_constraints the ones overridden by a
620 # constraint with the same function name in new_constraints
621 constraints = new_constraints + [oldc
622 for oldc in old_constraints
623 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
624 for newc in new_constraints)
627 sql_constraints = cls._sql_constraints + \
628 parent_class._sql_constraints
634 '_inherits': inherits,
636 '_constraints': constraints,
637 '_sql_constraints': sql_constraints,
639 cls = type(name, (cls, parent_class), attrs)
641 # introduce the "registry class" of the model;
642 # duplicate some attributes so that the ORM can modify them
646 '_columns': dict(cls._columns),
647 '_defaults': {}, # filled by Field._determine_default()
648 '_inherits': dict(cls._inherits),
649 '_depends': dict(cls._depends),
650 '_constraints': list(cls._constraints),
651 '_sql_constraints': list(cls._sql_constraints),
652 '_original_module': original_module,
654 cls = type(cls._name, (cls,), attrs)
656 # instantiate the model, and initialize it
657 model = object.__new__(cls)
658 model.__init__(pool, cr)
662 def _init_function_fields(cls, pool, cr):
663 # initialize the list of non-stored function fields for this model
664 pool._pure_function_fields[cls._name] = []
666 # process store of low-level function fields
667 for fname, column in cls._columns.iteritems():
668 if hasattr(column, 'digits_change'):
669 column.digits_change(cr)
670 # filter out existing store about this field
671 pool._store_function[cls._name] = [
673 for stored in pool._store_function.get(cls._name, [])
674 if (stored[0], stored[1]) != (cls._name, fname)
676 if not isinstance(column, fields.function):
679 # register it on the pool for invalidation
680 pool._pure_function_fields[cls._name].append(fname)
682 # process store parameter
685 get_ids = lambda self, cr, uid, ids, c={}: ids
686 store = {cls._name: (get_ids, None, column.priority, None)}
687 for model, spec in store.iteritems():
689 (fnct, fields2, order, length) = spec
691 (fnct, fields2, order) = spec
694 raise except_orm('Error',
695 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
696 pool._store_function.setdefault(model, [])
697 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
698 if t not in pool._store_function[model]:
699 pool._store_function[model].append(t)
700 pool._store_function[model].sort(key=lambda x: x[4])
703 def _init_manual_fields(cls, cr):
704 # Check whether the query is already done
705 if cls.pool.fields_by_model is not None:
706 manual_fields = cls.pool.fields_by_model.get(cls._name, [])
708 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
709 manual_fields = cr.dictfetchall()
711 for field in manual_fields:
712 if field['name'] in cls._fields:
716 'string': field['field_description'],
717 'required': bool(field['required']),
718 'readonly': bool(field['readonly']),
720 # FIXME: ignore field['serialization_field_id']
721 if field['ttype'] in ('char', 'text', 'html'):
722 attrs['translate'] = bool(field['translate'])
723 attrs['size'] = field['size'] or None
724 elif field['ttype'] in ('selection', 'reference'):
725 attrs['selection'] = eval(field['selection'])
726 elif field['ttype'] == 'many2one':
727 attrs['comodel_name'] = field['relation']
728 attrs['ondelete'] = field['on_delete']
729 attrs['domain'] = eval(field['domain']) if field['domain'] else None
730 elif field['ttype'] == 'one2many':
731 attrs['comodel_name'] = field['relation']
732 attrs['inverse_name'] = field['relation_field']
733 attrs['domain'] = eval(field['domain']) if field['domain'] else None
734 elif field['ttype'] == 'many2many':
735 attrs['comodel_name'] = field['relation']
736 _rel1 = field['relation'].replace('.', '_')
737 _rel2 = field['model'].replace('.', '_')
738 attrs['relation'] = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
739 attrs['column1'] = 'id1'
740 attrs['column2'] = 'id2'
741 attrs['domain'] = eval(field['domain']) if field['domain'] else None
742 cls._add_field(field['name'], Field.by_type[field['ttype']](**attrs))
745 def _init_constraints_onchanges(cls):
746 # store sql constraint error messages
747 for (key, _, msg) in cls._sql_constraints:
748 cls.pool._sql_error[cls._table + '_' + key] = msg
750 # collect constraint and onchange methods
751 cls._constraint_methods = []
752 cls._onchange_methods = defaultdict(list)
753 for attr, func in getmembers(cls, callable):
754 if hasattr(func, '_constrains'):
755 cls._constraint_methods.append(func)
756 if hasattr(func, '_onchange'):
757 for name in func._onchange:
758 cls._onchange_methods[name].append(func)
761 # In the past, this method was registering the model class in the server.
762 # This job is now done entirely by the metaclass MetaModel.
764 # Do not create an instance here. Model instances are created by method
768 def __init__(self, pool, cr):
769 """ Initialize a model and make it part of the given registry.
771 - copy the stored fields' functions in the registry,
772 - retrieve custom fields and add them in the model,
773 - ensure there is a many2one for each _inherits'd parent,
774 - update the children's _columns,
775 - give a chance to each field to initialize itself.
780 # link the class to the registry, and update the registry
782 cls._model = self # backward compatibility
783 pool.add(cls._name, self)
785 # determine description, table, sequence and log_access
786 if not cls._description:
787 cls._description = cls._name
789 cls._table = cls._name.replace('.', '_')
790 if not cls._sequence:
791 cls._sequence = cls._table + '_id_seq'
792 if not hasattr(cls, '_log_access'):
793 # If _log_access is not specified, it is the same value as _auto.
794 cls._log_access = cls._auto
797 if cls.is_transient():
798 cls._transient_check_count = 0
799 cls._transient_max_count = config.get('osv_memory_count_limit')
800 cls._transient_max_hours = config.get('osv_memory_age_limit')
801 assert cls._log_access, \
802 "TransientModels must have log_access turned on, " \
803 "in order to implement their access rights policy"
805 # retrieve new-style fields (from above registry class) and duplicate
806 # them (to avoid clashes with inheritance between different models)
808 above = cls.__bases__[0]
809 for attr, field in getmembers(above, Field.__instancecheck__):
810 cls._add_field(attr, field.new())
812 # introduce magic fields
813 cls._add_magic_fields()
815 # register stuff about low-level function fields and custom fields
816 cls._init_function_fields(pool, cr)
818 # register constraints and onchange methods
819 cls._init_constraints_onchanges()
821 # prepare ormcache, which must be shared by all instances of the model
826 def _is_an_ordinary_table(self):
827 self.env.cr.execute("""\
831 AND relkind = %s""", [self._table, 'r'])
832 return bool(self.env.cr.fetchone())
834 def __export_xml_id(self):
835 """ Return a valid xml_id for the record `self`. """
836 if not self._is_an_ordinary_table():
838 "You can not export the column ID of model %s, because the "
839 "table %s is not an ordinary table."
840 % (self._name, self._table))
841 ir_model_data = self.sudo().env['ir.model.data']
842 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
845 return '%s.%s' % (data[0].module, data[0].name)
850 name = '%s_%s' % (self._table, self.id)
851 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
853 name = '%s_%s_%s' % (self._table, self.id, postfix)
854 ir_model_data.create({
857 'module': '__export__',
860 return '__export__.' + name
863 def __export_rows(self, fields):
864 """ Export fields of the records in `self`.
866 :param fields: list of lists of fields to traverse
867 :return: list of lists of corresponding values
871 # main line of record, initially empty
872 current = [''] * len(fields)
873 lines.append(current)
875 # list of primary fields followed by secondary field(s)
878 # process column by column
879 for i, path in enumerate(fields):
884 if name in primary_done:
888 current[i] = str(record.id)
890 current[i] = record.__export_xml_id()
892 field = record._fields[name]
895 # this part could be simpler, but it has to be done this way
896 # in order to reproduce the former behavior
897 if not isinstance(value, BaseModel):
898 current[i] = field.convert_to_export(value, self.env)
900 primary_done.append(name)
902 # This is a special case, its strange behavior is intended!
903 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
904 xml_ids = [r.__export_xml_id() for r in value]
905 current[i] = ','.join(xml_ids) or False
908 # recursively export the fields that follow name
909 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
910 lines2 = value.__export_rows(fields2)
912 # merge first line with record's main line
913 for j, val in enumerate(lines2[0]):
916 # check value of current field
918 # assign xml_ids, and forget about remaining lines
919 xml_ids = [item[1] for item in value.name_get()]
920 current[i] = ','.join(xml_ids)
922 # append the other lines at the end
930 def export_data(self, fields_to_export, raw_data=False):
931 """ Export fields for selected objects
933 :param fields_to_export: list of fields
934 :param raw_data: True to return value in native Python type
935 :rtype: dictionary with a *datas* matrix
937 This method is used when exporting data via client menu
939 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
941 self = self.with_context(export_raw_data=True)
942 return {'datas': self.__export_rows(fields_to_export)}
944 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
947 Use :meth:`~load` instead
949 Import given data in given module
951 This method is used when importing data via client menu.
953 Example of fields to import for a sale.order::
956 partner_id, (=name_search)
957 order_line/.id, (=database_id)
959 order_line/product_id/id, (=xml id)
960 order_line/price_unit,
961 order_line/product_uom_qty,
962 order_line/product_uom/id (=xml_id)
964 This method returns a 4-tuple with the following structure::
966 (return_code, errored_resource, error_message, unused)
968 * The first item is a return code, it is ``-1`` in case of
969 import error, or the last imported row number in case of success
970 * The second item contains the record data dict that failed to import
971 in case of error, otherwise it's 0
972 * The third item contains an error message string in case of error,
974 * The last item is currently unused, with no specific semantics
976 :param fields: list of fields to import
977 :param datas: data to import
978 :param mode: 'init' or 'update' for record creation
979 :param current_module: module name
980 :param noupdate: flag for record creation
981 :param filename: optional file to store partial import state for recovery
982 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
983 :rtype: (int, dict or 0, str or 0, str or 0)
985 context = dict(context) if context is not None else {}
986 context['_import_current_module'] = current_module
988 fields = map(fix_import_export_id_paths, fields)
989 ir_model_data_obj = self.pool.get('ir.model.data')
992 if m['type'] == 'error':
993 raise Exception(m['message'])
995 if config.get('import_partial') and filename:
996 with open(config.get('import_partial'), 'rb') as partial_import_file:
997 data = pickle.load(partial_import_file)
998 position = data.get(filename, 0)
1002 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1003 self._extract_records(cr, uid, fields, datas,
1004 context=context, log=log),
1005 context=context, log=log):
1006 ir_model_data_obj._update(cr, uid, self._name,
1007 current_module, res, mode=mode, xml_id=xml_id,
1008 noupdate=noupdate, res_id=res_id, context=context)
1009 position = info.get('rows', {}).get('to', 0) + 1
1010 if config.get('import_partial') and filename and (not (position%100)):
1011 with open(config.get('import_partial'), 'rb') as partial_import:
1012 data = pickle.load(partial_import)
1013 data[filename] = position
1014 with open(config.get('import_partial'), 'wb') as partial_import:
1015 pickle.dump(data, partial_import)
1016 if context.get('defer_parent_store_computation'):
1017 self._parent_store_compute(cr)
1019 except Exception, e:
1021 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1023 if context.get('defer_parent_store_computation'):
1024 self._parent_store_compute(cr)
1025 return position, 0, 0, 0
1027 def load(self, cr, uid, fields, data, context=None):
1029 Attempts to load the data matrix, and returns a list of ids (or
1030 ``False`` if there was an error and no id could be generated) and a
1033 The ids are those of the records created and saved (in database), in
1034 the same order they were extracted from the file. They can be passed
1035 directly to :meth:`~read`
1037 :param fields: list of fields to import, at the same index as the corresponding data
1038 :type fields: list(str)
1039 :param data: row-major matrix of data to import
1040 :type data: list(list(str))
1041 :param dict context:
1042 :returns: {ids: list(int)|False, messages: [Message]}
1044 cr.execute('SAVEPOINT model_load')
1047 fields = map(fix_import_export_id_paths, fields)
1048 ModelData = self.pool['ir.model.data'].clear_caches()
1050 fg = self.fields_get(cr, uid, context=context)
1057 for id, xid, record, info in self._convert_records(cr, uid,
1058 self._extract_records(cr, uid, fields, data,
1059 context=context, log=messages.append),
1060 context=context, log=messages.append):
1062 cr.execute('SAVEPOINT model_load_save')
1063 except psycopg2.InternalError, e:
1064 # broken transaction, exit and hope the source error was
1066 if not any(message['type'] == 'error' for message in messages):
1067 messages.append(dict(info, type='error',message=
1068 u"Unknown database error: '%s'" % e))
1071 ids.append(ModelData._update(cr, uid, self._name,
1072 current_module, record, mode=mode, xml_id=xid,
1073 noupdate=noupdate, res_id=id, context=context))
1074 cr.execute('RELEASE SAVEPOINT model_load_save')
1075 except psycopg2.Warning, e:
1076 messages.append(dict(info, type='warning', message=str(e)))
1077 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1078 except psycopg2.Error, e:
1079 messages.append(dict(
1081 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1082 # Failed to write, log to messages, rollback savepoint (to
1083 # avoid broken transaction) and keep going
1084 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1085 except Exception, e:
1086 message = (_('Unknown error during import:') +
1087 ' %s: %s' % (type(e), unicode(e)))
1088 moreinfo = _('Resolve other errors first')
1089 messages.append(dict(info, type='error',
1092 # Failed for some reason, perhaps due to invalid data supplied,
1093 # rollback savepoint and keep going
1094 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1095 if any(message['type'] == 'error' for message in messages):
1096 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1098 return {'ids': ids, 'messages': messages}
1100 def _extract_records(self, cr, uid, fields_, data,
1101 context=None, log=lambda a: None):
1102 """ Generates record dicts from the data sequence.
1104 The result is a generator of dicts mapping field names to raw
1105 (unconverted, unvalidated) values.
1107 For relational fields, if sub-fields were provided the value will be
1108 a list of sub-records
1110 The following sub-fields may be set on the record (by key):
1111 * None is the name_get for the record (to use with name_create/name_search)
1112 * "id" is the External ID for the record
1113 * ".id" is the Database ID for the record
1115 from openerp.fields import Char, Integer
1116 fields = dict(self._fields)
1117 # Fake fields to avoid special cases in extractor
1118 fields[None] = Char('rec_name')
1119 fields['id'] = Char('External ID')
1120 fields['.id'] = Integer('Database ID')
1122 # m2o fields can't be on multiple lines so exclude them from the
1123 # is_relational field rows filter, but special-case it later on to
1124 # be handled with relational fields (as it can have subfields)
1125 is_relational = lambda field: fields[field].relational
1126 get_o2m_values = itemgetter_tuple(
1127 [index for index, field in enumerate(fields_)
1128 if fields[field[0]].type == 'one2many'])
1129 get_nono2m_values = itemgetter_tuple(
1130 [index for index, field in enumerate(fields_)
1131 if fields[field[0]].type != 'one2many'])
1132 # Checks if the provided row has any non-empty non-relational field
1133 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1134 return any(g(row)) and not any(f(row))
1138 if index >= len(data): return
1141 # copy non-relational fields to record dict
1142 record = dict((field[0], value)
1143 for field, value in itertools.izip(fields_, row)
1144 if not is_relational(field[0]))
1146 # Get all following rows which have relational values attached to
1147 # the current record (no non-relational values)
1148 record_span = itertools.takewhile(
1149 only_o2m_values, itertools.islice(data, index + 1, None))
1150 # stitch record row back on for relational fields
1151 record_span = list(itertools.chain([row], record_span))
1152 for relfield in set(
1153 field[0] for field in fields_
1154 if is_relational(field[0])):
1155 # FIXME: how to not use _obj without relying on fields_get?
1156 Model = self.pool[fields[relfield].comodel_name]
1158 # get only cells for this sub-field, should be strictly
1159 # non-empty, field path [None] is for name_get field
1160 indices, subfields = zip(*((index, field[1:] or [None])
1161 for index, field in enumerate(fields_)
1162 if field[0] == relfield))
1164 # return all rows which have at least one value for the
1165 # subfields of relfield
1166 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1167 record[relfield] = [subrecord
1168 for subrecord, _subinfo in Model._extract_records(
1169 cr, uid, subfields, relfield_data,
1170 context=context, log=log)]
1172 yield record, {'rows': {
1174 'to': index + len(record_span) - 1
1176 index += len(record_span)
1178 def _convert_records(self, cr, uid, records,
1179 context=None, log=lambda a: None):
1180 """ Converts records from the source iterable (recursive dicts of
1181 strings) into forms which can be written to the database (via
1182 self.create or (ir.model.data)._update)
1184 :returns: a list of triplets of (id, xid, record)
1185 :rtype: list((int|None, str|None, dict))
1187 if context is None: context = {}
1188 Converter = self.pool['ir.fields.converter']
1189 Translation = self.pool['ir.translation']
1190 fields = dict(self._fields)
1192 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1193 context.get('lang'))
1195 for f, field in fields.iteritems())
1197 convert = Converter.for_model(cr, uid, self, context=context)
1199 def _log(base, field, exception):
1200 type = 'warning' if isinstance(exception, Warning) else 'error'
1201 # logs the logical (not human-readable) field name for automated
1202 # processing of response, but injects human readable in message
1203 record = dict(base, type=type, field=field,
1204 message=unicode(exception.args[0]) % base)
1205 if len(exception.args) > 1 and exception.args[1]:
1206 record.update(exception.args[1])
1209 stream = CountingStream(records)
1210 for record, extras in stream:
1213 # name_get/name_create
1214 if None in record: pass
1221 dbid = int(record['.id'])
1223 # in case of overridden id column
1224 dbid = record['.id']
1225 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1228 record=stream.index,
1230 message=_(u"Unknown database identifier '%s'") % dbid))
1233 converted = convert(record, lambda field, err:\
1234 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1236 yield dbid, xid, converted, dict(extras, record=stream.index)
1239 def _validate_fields(self, field_names):
1240 field_names = set(field_names)
1242 # old-style constraint methods
1243 trans = self.env['ir.translation']
1244 cr, uid, context = self.env.args
1247 for fun, msg, names in self._constraints:
1249 # validation must be context-independent; call `fun` without context
1250 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1252 except Exception, e:
1253 _logger.debug('Exception while validating constraint', exc_info=True)
1255 extra_error = tools.ustr(e)
1258 res_msg = msg(self._model, cr, uid, ids, context=context)
1259 if isinstance(res_msg, tuple):
1260 template, params = res_msg
1261 res_msg = template % params
1263 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1265 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1267 _("Field(s) `%s` failed against a constraint: %s") %
1268 (', '.join(names), res_msg)
1271 raise ValidationError('\n'.join(errors))
1273 # new-style constraint methods
1274 for check in self._constraint_methods:
1275 if set(check._constrains) & field_names:
1278 except ValidationError, e:
1280 except Exception, e:
1281 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1284 def default_get(self, fields_list):
1285 """ default_get(fields) -> default_values
1287 Return default values for the fields in `fields_list`. Default
1288 values are determined by the context, user defaults, and the model
1291 :param fields_list: a list of field names
1292 :return: a dictionary mapping each field name to its corresponding
1293 default value, if it has one.
1296 # trigger view init hook
1297 self.view_init(fields_list)
1300 parent_fields = defaultdict(list)
1302 for name in fields_list:
1303 # 1. look up context
1304 key = 'default_' + name
1305 if key in self._context:
1306 defaults[name] = self._context[key]
1309 # 2. look up ir_values
1310 # Note: performance is good, because get_defaults_dict is cached!
1311 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1312 if name in ir_values_dict:
1313 defaults[name] = ir_values_dict[name]
1316 field = self._fields.get(name)
1318 # 3. look up property fields
1319 # TODO: get rid of this one
1320 if field and field.company_dependent:
1321 defaults[name] = self.env['ir.property'].get(name, self._name)
1324 # 4. look up field.default
1325 if field and field.default:
1326 defaults[name] = field.default(self)
1329 # 5. delegate to parent model
1330 if field and field.inherited:
1331 field = field.related_field
1332 parent_fields[field.model_name].append(field.name)
1334 # convert default values to the right format
1335 defaults = self._convert_to_cache(defaults, validate=False)
1336 defaults = self._convert_to_write(defaults)
1338 # add default values for inherited fields
1339 for model, names in parent_fields.iteritems():
1340 defaults.update(self.env[model].default_get(names))
1344 def fields_get_keys(self, cr, user, context=None):
1345 res = self._columns.keys()
1346 # TODO I believe this loop can be replace by
1347 # res.extend(self._inherit_fields.key())
1348 for parent in self._inherits:
1349 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1352 def _rec_name_fallback(self, cr, uid, context=None):
1353 rec_name = self._rec_name
1354 if rec_name not in self._columns:
1355 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1359 # Overload this method if you need a window title which depends on the context
1361 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1364 def user_has_groups(self, cr, uid, groups, context=None):
1365 """Return true if the user is at least member of one of the groups
1366 in groups_str. Typically used to resolve `groups` attribute
1367 in view and model definitions.
1369 :param str groups: comma-separated list of fully-qualified group
1370 external IDs, e.g.: ``base.group_user,base.group_system``
1371 :return: True if the current user is a member of one of the
1374 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1375 for group_ext_id in groups.split(','))
1377 def _get_default_form_view(self, cr, user, context=None):
1378 """ Generates a default single-line form view using all fields
1379 of the current model except the m2m and o2m ones.
1381 :param cr: database cursor
1382 :param int user: user id
1383 :param dict context: connection context
1384 :returns: a form view as an lxml document
1385 :rtype: etree._Element
1387 view = etree.Element('form', string=self._description)
1388 group = etree.SubElement(view, 'group', col="4")
1389 for fname, field in self._fields.iteritems():
1390 if field.automatic or field.type in ('one2many', 'many2many'):
1393 etree.SubElement(group, 'field', name=fname)
1394 if field.type == 'text':
1395 etree.SubElement(group, 'newline')
1398 def _get_default_search_view(self, cr, user, context=None):
1399 """ Generates a single-field search view, based on _rec_name.
1401 :param cr: database cursor
1402 :param int user: user id
1403 :param dict context: connection context
1404 :returns: a tree view as an lxml document
1405 :rtype: etree._Element
1407 view = etree.Element('search', string=self._description)
1408 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1411 def _get_default_tree_view(self, cr, user, context=None):
1412 """ Generates a single-field tree view, based on _rec_name.
1414 :param cr: database cursor
1415 :param int user: user id
1416 :param dict context: connection context
1417 :returns: a tree view as an lxml document
1418 :rtype: etree._Element
1420 view = etree.Element('tree', string=self._description)
1421 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1424 def _get_default_calendar_view(self, cr, user, context=None):
1425 """ Generates a default calendar view by trying to infer
1426 calendar fields from a number of pre-set attribute names
1428 :param cr: database cursor
1429 :param int user: user id
1430 :param dict context: connection context
1431 :returns: a calendar view
1432 :rtype: etree._Element
1434 def set_first_of(seq, in_, to):
1435 """Sets the first value of `seq` also found in `in_` to
1436 the `to` attribute of the view being closed over.
1438 Returns whether it's found a suitable value (and set it on
1439 the attribute) or not
1447 view = etree.Element('calendar', string=self._description)
1448 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1450 if self._date_name not in self._columns:
1452 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1453 if dt in self._columns:
1454 self._date_name = dt
1459 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1460 view.set('date_start', self._date_name)
1462 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1463 self._columns, 'color')
1465 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1466 self._columns, 'date_stop'):
1467 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1468 self._columns, 'date_delay'):
1470 _('Invalid Object Architecture!'),
1471 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1475 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1476 """ fields_view_get([view_id | view_type='form'])
1478 Get the detailed composition of the requested view like fields, model, view architecture
1480 :param view_id: id of the view or None
1481 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1482 :param toolbar: true to include contextual actions
1483 :param submenu: deprecated
1484 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1485 :raise AttributeError:
1486 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1487 * if some tag other than 'position' is found in parent view
1488 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1492 View = self.pool['ir.ui.view']
1495 'model': self._name,
1496 'field_parent': False,
1499 # try to find a view_id if none provided
1501 # <view_type>_view_ref in context can be used to overrride the default view
1502 view_ref_key = view_type + '_view_ref'
1503 view_ref = context.get(view_ref_key)
1506 module, view_ref = view_ref.split('.', 1)
1507 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1508 view_ref_res = cr.fetchone()
1510 view_id = view_ref_res[0]
1512 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1513 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1517 # otherwise try to find the lowest priority matching ir.ui.view
1518 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1520 # context for post-processing might be overriden
1523 # read the view with inherited views applied
1524 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1525 result['arch'] = root_view['arch']
1526 result['name'] = root_view['name']
1527 result['type'] = root_view['type']
1528 result['view_id'] = root_view['id']
1529 result['field_parent'] = root_view['field_parent']
1530 # override context fro postprocessing
1531 if root_view.get('model') != self._name:
1532 ctx = dict(context, base_model_name=root_view.get('model'))
1534 # fallback on default views methods if no ir.ui.view could be found
1536 get_func = getattr(self, '_get_default_%s_view' % view_type)
1537 arch_etree = get_func(cr, uid, context)
1538 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1539 result['type'] = view_type
1540 result['name'] = 'default'
1541 except AttributeError:
1542 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1544 # Apply post processing, groups and modifiers etc...
1545 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1546 result['arch'] = xarch
1547 result['fields'] = xfields
1549 # Add related action information if aksed
1551 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1557 ir_values_obj = self.pool.get('ir.values')
1558 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1559 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1560 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1561 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1562 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1563 #When multi="True" set it will display only in More of the list view
1564 resrelate = [clean(action) for action in resrelate
1565 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1567 for x in itertools.chain(resprint, resaction, resrelate):
1568 x['string'] = x['name']
1570 result['toolbar'] = {
1572 'action': resaction,
1577 def get_formview_id(self, cr, uid, id, context=None):
1578 """ Return an view id to open the document with. This method is meant to be
1579 overridden in addons that want to give specific view ids for example.
1581 :param int id: id of the document to open
1585 def get_formview_action(self, cr, uid, id, context=None):
1586 """ Return an action to open the document. This method is meant to be
1587 overridden in addons that want to give specific view ids for example.
1589 :param int id: id of the document to open
1591 view_id = self.get_formview_id(cr, uid, id, context=context)
1593 'type': 'ir.actions.act_window',
1594 'res_model': self._name,
1595 'view_type': 'form',
1596 'view_mode': 'form',
1597 'views': [(view_id, 'form')],
1598 'target': 'current',
1602 def get_access_action(self, cr, uid, id, context=None):
1603 """ Return an action to open the document. This method is meant to be
1604 overridden in addons that want to give specific access to the document.
1605 By default it opens the formview of the document.
1607 :paramt int id: id of the document to open
1609 return self.get_formview_action(cr, uid, id, context=context)
1611 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1612 return self.pool['ir.ui.view'].postprocess_and_fields(
1613 cr, uid, self._name, node, view_id, context=context)
1615 def search_count(self, cr, user, args, context=None):
1616 """ search_count(args) -> int
1618 Returns the number of records in the current model matching :ref:`the
1619 provided domain <reference/orm/domains>`.
1621 res = self.search(cr, user, args, context=context, count=True)
1622 if isinstance(res, list):
1626 @api.returns('self')
1627 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1628 """ search(args[, offset=0][, limit=None][, order=None])
1630 Searches for records based on the ``args``
1631 :ref:`search domain <reference/orm/domains>`.
1633 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1634 list to match all records.
1635 :param int offset: number of results to ignore (default: none)
1636 :param int limit: maximum number of records to return (default: all)
1637 :param str order: sort string
1638 :returns: at most ``limit`` records matching the search criteria
1640 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1642 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1645 # display_name, name_get, name_create, name_search
1648 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1649 def _compute_display_name(self):
1650 names = dict(self.name_get())
1652 record.display_name = names.get(record.id, False)
1656 """ name_get() -> [(id, name), ...]
1658 Returns a textual representation for the records in ``self``.
1659 By default this is the value of the ``display_name`` field.
1661 :return: list of pairs ``(id, text_repr)`` for each records
1665 name = self._rec_name
1666 if name in self._fields:
1667 convert = self._fields[name].convert_to_display_name
1669 result.append((record.id, convert(record[name])))
1672 result.append((record.id, "%s,%s" % (record._name, record.id)))
1677 def name_create(self, name):
1678 """ name_create(name) -> record
1680 Create a new record by calling :meth:`~.create` with only one value
1681 provided: the display name of the new record.
1683 The new record will be initialized with any default values
1684 applicable to this model, or provided through the context. The usual
1685 behavior of :meth:`~.create` applies.
1687 :param name: display name of the record to create
1689 :return: the :meth:`~.name_get` pair value of the created record
1692 record = self.create({self._rec_name: name})
1693 return record.name_get()[0]
1695 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1699 def name_search(self, name='', args=None, operator='ilike', limit=100):
1700 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1702 Search for records that have a display name matching the given
1703 `name` pattern when compared with the given `operator`, while also
1704 matching the optional search domain (`args`).
1706 This is used for example to provide suggestions based on a partial
1707 value for a relational field. Sometimes be seen as the inverse
1708 function of :meth:`~.name_get`, but it is not guaranteed to be.
1710 This method is equivalent to calling :meth:`~.search` with a search
1711 domain based on ``display_name`` and then :meth:`~.name_get` on the
1712 result of the search.
1714 :param str name: the name pattern to match
1715 :param list args: optional search domain (see :meth:`~.search` for
1716 syntax), specifying further restrictions
1717 :param str operator: domain operator for matching `name`, such as
1718 ``'like'`` or ``'='``.
1719 :param int limit: optional max number of records to return
1721 :return: list of pairs ``(id, text_repr)`` for all matching records.
1723 return self._name_search(name, args, operator, limit=limit)
1725 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1726 # private implementation of name_search, allows passing a dedicated user
1727 # for the name_get part to solve some access rights issues
1728 args = list(args or [])
1729 # optimize out the default criterion of ``ilike ''`` that matches everything
1730 if not self._rec_name:
1731 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1732 elif not (name == '' and operator == 'ilike'):
1733 args += [(self._rec_name, operator, name)]
1734 access_rights_uid = name_get_uid or user
1735 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1736 res = self.name_get(cr, access_rights_uid, ids, context)
1739 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1742 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1744 fields = self._columns.keys() + self._inherit_fields.keys()
1745 #FIXME: collect all calls to _get_source into one SQL call.
1747 res[lang] = {'code': lang}
1749 if f in self._columns:
1750 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1752 res[lang][f] = res_trans
1754 res[lang][f] = self._columns[f].string
1755 for table in self._inherits:
1756 cols = intersect(self._inherit_fields.keys(), fields)
1757 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1760 res[lang]['code'] = lang
1761 for f in res2[lang]:
1762 res[lang][f] = res2[lang][f]
1765 def write_string(self, cr, uid, id, langs, vals, context=None):
1766 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1767 #FIXME: try to only call the translation in one SQL
1770 if field in self._columns:
1771 src = self._columns[field].string
1772 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1773 for table in self._inherits:
1774 cols = intersect(self._inherit_fields.keys(), vals)
1776 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1779 def _add_missing_default_values(self, cr, uid, values, context=None):
1780 # avoid overriding inherited values when parent is set
1782 for tables, parent_field in self._inherits.items():
1783 if parent_field in values:
1784 avoid_tables.append(tables)
1786 # compute missing fields
1787 missing_defaults = set()
1788 for field in self._columns.keys():
1789 if not field in values:
1790 missing_defaults.add(field)
1791 for field in self._inherit_fields.keys():
1792 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1793 missing_defaults.add(field)
1794 # discard magic fields
1795 missing_defaults -= set(MAGIC_COLUMNS)
1797 if missing_defaults:
1798 # override defaults with the provided values, never allow the other way around
1799 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1801 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1802 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1803 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1804 defaults[dv] = [(6, 0, defaults[dv])]
1805 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1806 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1807 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1808 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1809 defaults.update(values)
1813 def clear_caches(self):
1814 """ Clear the caches
1816 This clears the caches associated to methods decorated with
1817 ``tools.ormcache`` or ``tools.ormcache_multi``.
1820 self._ormcache.clear()
1821 self.pool._any_cache_cleared = True
1822 except AttributeError:
1826 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1827 aggregated_fields, count_field,
1828 read_group_result, read_group_order=None, context=None):
1829 """Helper method for filling in empty groups for all possible values of
1830 the field being grouped by"""
1832 # self._group_by_full should map groupable fields to a method that returns
1833 # a list of all aggregated values that we want to display for this field,
1834 # in the form of a m2o-like pair (key,label).
1835 # This is useful to implement kanban views for instance, where all columns
1836 # should be displayed even if they don't contain any record.
1838 # Grab the list of all groups that should be displayed, including all present groups
1839 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1840 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1841 read_group_order=read_group_order,
1842 access_rights_uid=openerp.SUPERUSER_ID,
1845 result_template = dict.fromkeys(aggregated_fields, False)
1846 result_template[groupby + '_count'] = 0
1847 if remaining_groupbys:
1848 result_template['__context'] = {'group_by': remaining_groupbys}
1850 # Merge the left_side (current results as dicts) with the right_side (all
1851 # possible values as m2o pairs). Both lists are supposed to be using the
1852 # same ordering, and can be merged in one pass.
1855 def append_left(left_side):
1856 grouped_value = left_side[groupby] and left_side[groupby][0]
1857 if not grouped_value in known_values:
1858 result.append(left_side)
1859 known_values[grouped_value] = left_side
1861 known_values[grouped_value].update({count_field: left_side[count_field]})
1862 def append_right(right_side):
1863 grouped_value = right_side[0]
1864 if not grouped_value in known_values:
1865 line = dict(result_template)
1866 line[groupby] = right_side
1867 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1869 known_values[grouped_value] = line
1870 while read_group_result or all_groups:
1871 left_side = read_group_result[0] if read_group_result else None
1872 right_side = all_groups[0] if all_groups else None
1873 assert left_side is None or left_side[groupby] is False \
1874 or isinstance(left_side[groupby], (tuple,list)), \
1875 'M2O-like pair expected, got %r' % left_side[groupby]
1876 assert right_side is None or isinstance(right_side, (tuple,list)), \
1877 'M2O-like pair expected, got %r' % right_side
1878 if left_side is None:
1879 append_right(all_groups.pop(0))
1880 elif right_side is None:
1881 append_left(read_group_result.pop(0))
1882 elif left_side[groupby] == right_side:
1883 append_left(read_group_result.pop(0))
1884 all_groups.pop(0) # discard right_side
1885 elif not left_side[groupby] or not left_side[groupby][0]:
1886 # left side == "Undefined" entry, not present on right_side
1887 append_left(read_group_result.pop(0))
1889 append_right(all_groups.pop(0))
1893 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1896 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1898 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1899 to the query if order should be computed against m2o field.
1900 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1901 :param aggregated_fields: list of aggregated fields in the query
1902 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1903 These dictionaries contains the qualified name of each groupby
1904 (fully qualified SQL name for the corresponding field),
1905 and the (non raw) field name.
1906 :param osv.Query query: the query under construction
1907 :return: (groupby_terms, orderby_terms)
1910 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1911 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1913 return groupby_terms, orderby_terms
1915 self._check_qorder(orderby)
1916 for order_part in orderby.split(','):
1917 order_split = order_part.split()
1918 order_field = order_split[0]
1919 if order_field in groupby_fields:
1921 if self._fields[order_field.split(':')[0]].type == 'many2one':
1922 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1924 orderby_terms.append(order_clause)
1925 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1927 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1928 orderby_terms.append(order)
1929 elif order_field in aggregated_fields:
1930 orderby_terms.append(order_part)
1932 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1933 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1934 self._name, order_part)
1935 return groupby_terms, orderby_terms
1937 def _read_group_process_groupby(self, gb, query, context):
1939 Helper method to collect important information about groupbys: raw
1940 field name, type, time informations, qualified name, ...
1942 split = gb.split(':')
1943 field_type = self._fields[split[0]].type
1944 gb_function = split[1] if len(split) == 2 else None
1945 temporal = field_type in ('date', 'datetime')
1946 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1947 qualified_field = self._inherits_join_calc(split[0], query)
1950 # Careful with week/year formats:
1951 # - yyyy (lower) must always be used, *except* for week+year formats
1952 # - YYYY (upper) must always be used for week+year format
1953 # e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
1954 # and W1 2006 for others
1956 # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
1957 # such as 2006-01-01 being formatted as "January 2005" in some locales.
1958 # Cfr: http://babel.pocoo.org/docs/dates/#date-fields
1959 'day': 'dd MMM yyyy', # yyyy = normal year
1960 'week': "'W'w YYYY", # w YYYY = ISO week-year
1961 'month': 'MMMM yyyy',
1962 'quarter': 'QQQ yyyy',
1966 'day': dateutil.relativedelta.relativedelta(days=1),
1967 'week': datetime.timedelta(days=7),
1968 'month': dateutil.relativedelta.relativedelta(months=1),
1969 'quarter': dateutil.relativedelta.relativedelta(months=3),
1970 'year': dateutil.relativedelta.relativedelta(years=1)
1973 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
1974 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
1975 if field_type == 'boolean':
1976 qualified_field = "coalesce(%s,false)" % qualified_field
1981 'display_format': display_formats[gb_function or 'month'] if temporal else None,
1982 'interval': time_intervals[gb_function or 'month'] if temporal else None,
1983 'tz_convert': tz_convert,
1984 'qualified_field': qualified_field
1987 def _read_group_prepare_data(self, key, value, groupby_dict, context):
1989 Helper method to sanitize the data received by read_group. The None
1990 values are converted to False, and the date/datetime are formatted,
1991 and corrected according to the timezones.
1993 value = False if value is None else value
1994 gb = groupby_dict.get(key)
1995 if gb and gb['type'] in ('date', 'datetime') and value:
1996 if isinstance(value, basestring):
1997 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
1998 value = datetime.datetime.strptime(value, dt_format)
1999 if gb['tz_convert']:
2000 value = pytz.timezone(context['tz']).localize(value)
2003 def _read_group_get_domain(self, groupby, value):
2005 Helper method to construct the domain corresponding to a groupby and
2006 a given value. This is mostly relevant for date/datetime.
2008 if groupby['type'] in ('date', 'datetime') and value:
2009 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2010 domain_dt_begin = value
2011 domain_dt_end = value + groupby['interval']
2012 if groupby['tz_convert']:
2013 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2014 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2015 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2016 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2017 if groupby['type'] == 'many2one' and value:
2019 return [(groupby['field'], '=', value)]
2021 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2023 Helper method to format the data contained in the dictianary data by
2024 adding the domain corresponding to its values, the groupbys in the
2025 context and by properly formatting the date/datetime values.
2027 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2028 for k,v in data.iteritems():
2029 gb = groupby_dict.get(k)
2030 if gb and gb['type'] in ('date', 'datetime') and v:
2031 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2033 data['__domain'] = domain_group + domain
2034 if len(groupby) - len(annotated_groupbys) >= 1:
2035 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2039 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2041 Get the list of records in list view grouped by the given ``groupby`` fields
2043 :param cr: database cursor
2044 :param uid: current user id
2045 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2046 :param list fields: list of fields present in the list view specified on the object
2047 :param list groupby: list of groupby descriptions by which the records will be grouped.
2048 A groupby description is either a field (then it will be grouped by that field)
2049 or a string 'field:groupby_function'. Right now, the only functions supported
2050 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2051 date/datetime fields.
2052 :param int offset: optional number of records to skip
2053 :param int limit: optional max number of records to return
2054 :param dict context: context arguments, like lang, time zone.
2055 :param list orderby: optional ``order by`` specification, for
2056 overriding the natural sort ordering of the
2057 groups, see also :py:meth:`~osv.osv.osv.search`
2058 (supported only for many2one fields currently)
2059 :param bool lazy: if true, the results are only grouped by the first groupby and the
2060 remaining groupbys are put in the __context key. If false, all the groupbys are
2062 :return: list of dictionaries(one dictionary for each record) containing:
2064 * the values of fields grouped by the fields in ``groupby`` argument
2065 * __domain: list of tuples specifying the search criteria
2066 * __context: dictionary with argument like ``groupby``
2067 :rtype: [{'field_name_1': value, ...]
2068 :raise AccessError: * if user has no read rights on the requested object
2069 * if user tries to bypass access rules for read on the requested object
2073 self.check_access_rights(cr, uid, 'read')
2074 query = self._where_calc(cr, uid, domain, context=context)
2075 fields = fields or self._columns.keys()
2077 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2078 groupby_list = groupby[:1] if lazy else groupby
2079 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2080 for gb in groupby_list]
2081 groupby_fields = [g['field'] for g in annotated_groupbys]
2082 order = orderby or ','.join([g for g in groupby_list])
2083 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2085 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2086 for gb in groupby_fields:
2087 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2088 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2089 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2090 if not (gb in self._fields):
2091 # Don't allow arbitrary values, as this would be a SQL injection vector!
2092 raise except_orm(_('Invalid group_by'),
2093 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2095 aggregated_fields = [
2097 if f not in ('id', 'sequence')
2098 if f not in groupby_fields
2099 if f in self._fields
2100 if self._fields[f].type in ('integer', 'float')
2101 if getattr(self._fields[f].base_field.column, '_classic_write')
2104 field_formatter = lambda f: (self._fields[f].group_operator or 'sum', self._inherits_join_calc(f, query), f)
2105 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2107 for gb in annotated_groupbys:
2108 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2110 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2111 from_clause, where_clause, where_clause_params = query.get_sql()
2112 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2113 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2116 count_field += '_count'
2118 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2119 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2122 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2130 'table': self._table,
2131 'count_field': count_field,
2132 'extra_fields': prefix_terms(',', select_terms),
2133 'from': from_clause,
2134 'where': prefix_term('WHERE', where_clause),
2135 'groupby': prefix_terms('GROUP BY', groupby_terms),
2136 'orderby': prefix_terms('ORDER BY', orderby_terms),
2137 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2138 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2140 cr.execute(query, where_clause_params)
2141 fetched_data = cr.dictfetchall()
2143 if not groupby_fields:
2146 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2148 data_ids = [r['id'] for r in fetched_data]
2149 many2onefields = list(set(many2onefields))
2150 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2151 for d in fetched_data:
2152 d.update(data_dict[d['id']])
2154 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2155 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2156 if lazy and groupby_fields[0] in self._group_by_full:
2157 # Right now, read_group only fill results in lazy mode (by default).
2158 # If you need to have the empty groups in 'eager' mode, then the
2159 # method _read_group_fill_results need to be completely reimplemented
2161 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2162 aggregated_fields, count_field, result, read_group_order=order,
2166 def _inherits_join_add(self, current_model, parent_model_name, query):
2168 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2169 :param current_model: current model object
2170 :param parent_model_name: name of the parent model for which the clauses should be added
2171 :param query: query object on which the JOIN should be added
2173 inherits_field = current_model._inherits[parent_model_name]
2174 parent_model = self.pool[parent_model_name]
2175 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2178 def _inherits_join_calc(self, field, query):
2180 Adds missing table select and join clause(s) to ``query`` for reaching
2181 the field coming from an '_inherits' parent table (no duplicates).
2183 :param field: name of inherited field to reach
2184 :param query: query object on which the JOIN should be added
2185 :return: qualified name of field, to be used in SELECT clause
2187 current_table = self
2188 parent_alias = '"%s"' % current_table._table
2189 while field in current_table._inherit_fields and not field in current_table._columns:
2190 parent_model_name = current_table._inherit_fields[field][0]
2191 parent_table = self.pool[parent_model_name]
2192 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2193 current_table = parent_table
2194 return '%s."%s"' % (parent_alias, field)
2196 def _parent_store_compute(self, cr):
2197 if not self._parent_store:
2199 _logger.info('Computing parent left and right for table %s...', self._table)
2200 def browse_rec(root, pos=0):
2202 where = self._parent_name+'='+str(root)
2204 where = self._parent_name+' IS NULL'
2205 if self._parent_order:
2206 where += ' order by '+self._parent_order
2207 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2209 for id in cr.fetchall():
2210 pos2 = browse_rec(id[0], pos2)
2211 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2213 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2214 if self._parent_order:
2215 query += ' order by ' + self._parent_order
2218 for (root,) in cr.fetchall():
2219 pos = browse_rec(root, pos)
2220 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2223 def _update_store(self, cr, f, k):
2224 _logger.info("storing computed values of fields.function '%s'", k)
2225 ss = self._columns[k]._symbol_set
2226 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2227 cr.execute('select id from '+self._table)
2228 ids_lst = map(lambda x: x[0], cr.fetchall())
2230 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2231 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2232 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2233 for key, val in res.items():
2236 # if val is a many2one, just write the ID
2237 if type(val) == tuple:
2239 if val is not False:
2240 cr.execute(update_query, (ss[1](val), key))
2243 def _check_selection_field_value(self, field, value):
2244 """ Check whether value is among the valid values for the given
2245 selection/reference field, and raise an exception if not.
2247 field = self._fields[field]
2248 field.convert_to_cache(value, self)
2250 def _check_removed_columns(self, cr, log=False):
2251 # iterate on the database columns to drop the NOT NULL constraints
2252 # of fields which were required but have been removed (or will be added by another module)
2253 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2254 columns += MAGIC_COLUMNS
2255 cr.execute("SELECT a.attname, a.attnotnull"
2256 " FROM pg_class c, pg_attribute a"
2257 " WHERE c.relname=%s"
2258 " AND c.oid=a.attrelid"
2259 " AND a.attisdropped=%s"
2260 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2261 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2263 for column in cr.dictfetchall():
2265 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2266 column['attname'], self._table, self._name)
2267 if column['attnotnull']:
2268 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2269 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2270 self._table, column['attname'])
2272 def _save_constraint(self, cr, constraint_name, type, definition):
2274 Record the creation of a constraint for this model, to make it possible
2275 to delete it later when the module is uninstalled. Type can be either
2276 'f' or 'u' depending on the constraint being a foreign key or not.
2278 if not self._module:
2279 # no need to save constraints for custom models as they're not part
2282 assert type in ('f', 'u')
2284 SELECT type, definition FROM ir_model_constraint, ir_module_module
2285 WHERE ir_model_constraint.module=ir_module_module.id
2286 AND ir_model_constraint.name=%s
2287 AND ir_module_module.name=%s
2288 """, (constraint_name, self._module))
2289 constraints = cr.dictfetchone()
2292 INSERT INTO ir_model_constraint
2293 (name, date_init, date_update, module, model, type, definition)
2294 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2295 (SELECT id FROM ir_module_module WHERE name=%s),
2296 (SELECT id FROM ir_model WHERE model=%s), %s, %s)""",
2297 (constraint_name, self._module, self._name, type, definition))
2298 elif constraints['type'] != type or (definition and constraints['definition'] != definition):
2300 UPDATE ir_model_constraint
2301 SET date_update=now() AT TIME ZONE 'UTC', type=%s, definition=%s
2302 WHERE name=%s AND module = (SELECT id FROM ir_module_module WHERE name=%s)""",
2303 (type, definition, constraint_name, self._module))
2305 def _save_relation_table(self, cr, relation_table):
2307 Record the creation of a many2many for this model, to make it possible
2308 to delete it later when the module is uninstalled.
2311 SELECT 1 FROM ir_model_relation, ir_module_module
2312 WHERE ir_model_relation.module=ir_module_module.id
2313 AND ir_model_relation.name=%s
2314 AND ir_module_module.name=%s
2315 """, (relation_table, self._module))
2317 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2318 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2319 (SELECT id FROM ir_module_module WHERE name=%s),
2320 (SELECT id FROM ir_model WHERE model=%s))""",
2321 (relation_table, self._module, self._name))
2322 self.invalidate_cache(cr, SUPERUSER_ID)
2324 # checked version: for direct m2o starting from `self`
2325 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2326 assert self.is_transient() or not dest_model.is_transient(), \
2327 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2328 if self.is_transient() and not dest_model.is_transient():
2329 # TransientModel relationships to regular Models are annoying
2330 # usually because they could block deletion due to the FKs.
2331 # So unless stated otherwise we default them to ondelete=cascade.
2332 ondelete = ondelete or 'cascade'
2333 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2334 self._foreign_keys.add(fk_def)
2335 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2337 # unchecked version: for custom cases, such as m2m relationships
2338 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2339 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2340 self._foreign_keys.add(fk_def)
2341 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2343 def _drop_constraint(self, cr, source_table, constraint_name):
2344 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2346 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2347 # Find FK constraint(s) currently established for the m2o field,
2348 # and see whether they are stale or not
2349 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2350 cl2.relname as foreign_table
2351 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2352 pg_attribute as att1, pg_attribute as att2
2353 WHERE con.conrelid = cl1.oid
2354 AND cl1.relname = %s
2355 AND con.confrelid = cl2.oid
2356 AND array_lower(con.conkey, 1) = 1
2357 AND con.conkey[1] = att1.attnum
2358 AND att1.attrelid = cl1.oid
2359 AND att1.attname = %s
2360 AND array_lower(con.confkey, 1) = 1
2361 AND con.confkey[1] = att2.attnum
2362 AND att2.attrelid = cl2.oid
2363 AND att2.attname = %s
2364 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2365 constraints = cr.dictfetchall()
2367 if len(constraints) == 1:
2368 # Is it the right constraint?
2370 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2371 or cons['foreign_table'] != dest_model._table:
2372 # Wrong FK: drop it and recreate
2373 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2374 source_table, cons['constraint_name'])
2375 self._drop_constraint(cr, source_table, cons['constraint_name'])
2377 # it's all good, nothing to do!
2380 # Multiple FKs found for the same field, drop them all, and re-create
2381 for cons in constraints:
2382 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2383 source_table, cons['constraint_name'])
2384 self._drop_constraint(cr, source_table, cons['constraint_name'])
2386 # (re-)create the FK
2387 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2390 def _set_default_value_on_column(self, cr, column_name, context=None):
2391 # ideally, we should use default_get(), but it fails due to ir.values
2395 default = self._defaults.get(column_name)
2396 if callable(default):
2397 default = default(self, cr, SUPERUSER_ID, context)
2399 column = self._columns[column_name]
2400 ss = column._symbol_set
2401 db_default = ss[1](default)
2402 # Write default if non-NULL, except for booleans for which False means
2403 # the same as NULL - this saves us an expensive query on large tables.
2404 write_default = (db_default is not None if column._type != 'boolean'
2407 _logger.debug("Table '%s': setting default value of new column %s to %r",
2408 self._table, column_name, default)
2409 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2410 self._table, column_name, ss[0], column_name)
2411 cr.execute(query, (db_default,))
2412 # this is a disgrace
2415 def _auto_init(self, cr, context=None):
2418 Call _field_create and, unless _auto is False:
2420 - create the corresponding table in database for the model,
2421 - possibly add the parent columns in database,
2422 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2423 'write_date' in database if _log_access is True (the default),
2424 - report on database columns no more existing in _columns,
2425 - remove no more existing not null constraints,
2426 - alter existing database columns to match _columns,
2427 - create database tables to match _columns,
2428 - add database indices to match _columns,
2429 - save in self._foreign_keys a list a foreign keys to create (see
2433 self._foreign_keys = set()
2434 raise_on_invalid_object_name(self._name)
2437 store_compute = False
2438 stored_fields = [] # new-style stored fields with compute
2440 update_custom_fields = context.get('update_custom_fields', False)
2441 self._field_create(cr, context=context)
2442 create = not self._table_exist(cr)
2446 self._create_table(cr)
2449 cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
2450 has_rows = cr.rowcount
2453 if self._parent_store:
2454 if not self._parent_columns_exist(cr):
2455 self._create_parent_columns(cr)
2456 store_compute = True
2458 self._check_removed_columns(cr, log=False)
2460 # iterate on the "object columns"
2461 column_data = self._select_column_data(cr)
2463 for k, f in self._columns.iteritems():
2464 if k == 'id': # FIXME: maybe id should be a regular column?
2466 # Don't update custom (also called manual) fields
2467 if f.manual and not update_custom_fields:
2470 if isinstance(f, fields.one2many):
2471 self._o2m_raise_on_missing_reference(cr, f)
2473 elif isinstance(f, fields.many2many):
2474 self._m2m_raise_or_create_relation(cr, f)
2477 res = column_data.get(k)
2479 # The field is not found as-is in database, try if it
2480 # exists with an old name.
2481 if not res and hasattr(f, 'oldname'):
2482 res = column_data.get(f.oldname)
2484 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2486 column_data[k] = res
2487 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2488 self._table, f.oldname, k)
2490 # The field already exists in database. Possibly
2491 # change its type, rename it, drop it or change its
2494 f_pg_type = res['typname']
2495 f_pg_size = res['size']
2496 f_pg_notnull = res['attnotnull']
2497 if isinstance(f, fields.function) and not f.store and\
2498 not getattr(f, 'nodrop', False):
2499 _logger.info('column %s (%s) converted to a function, removed from table %s',
2500 k, f.string, self._table)
2501 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2503 _schema.debug("Table '%s': dropped column '%s' with cascade",
2507 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2512 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2513 ('varchar', 'text', 'TEXT', ''),
2514 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2515 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2516 ('timestamp', 'date', 'date', '::date'),
2517 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2518 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2520 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2522 with cr.savepoint():
2523 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2524 except psycopg2.NotSupportedError:
2525 # In place alter table cannot be done because a view is depending of this field.
2526 # Do a manual copy. This will drop the view (that will be recreated later)
2527 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2528 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2529 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2530 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2532 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2533 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2535 if (f_pg_type==c[0]) and (f._type==c[1]):
2536 if f_pg_type != f_obj_type:
2538 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2539 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2540 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2541 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2543 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2544 self._table, k, c[0], c[1])
2547 if f_pg_type != f_obj_type:
2551 newname = k + '_moved' + str(i)
2552 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2553 "WHERE c.relname=%s " \
2554 "AND a.attname=%s " \
2555 "AND c.oid=a.attrelid ", (self._table, newname))
2556 if not cr.fetchone()[0]:
2560 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2561 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2562 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2563 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2564 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2565 self._table, k, f_pg_type, f._type, newname)
2567 # if the field is required and hasn't got a NOT NULL constraint
2568 if f.required and f_pg_notnull == 0:
2570 self._set_default_value_on_column(cr, k, context=context)
2571 # add the NOT NULL constraint
2573 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2575 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2578 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2579 "If you want to have it, you should update the records and execute manually:\n"\
2580 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2581 _schema.warning(msg, self._table, k, self._table, k)
2583 elif not f.required and f_pg_notnull == 1:
2584 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2586 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2589 indexname = '%s_%s_index' % (self._table, k)
2590 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2591 res2 = cr.dictfetchall()
2592 if not res2 and f.select:
2593 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2595 if f._type == 'text':
2596 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2597 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2598 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2599 " because there is a length limit for indexable btree values!\n"\
2600 "Use a search view instead if you simply want to make the field searchable."
2601 _schema.warning(msg, self._table, f._type, k)
2602 if res2 and not f.select:
2603 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2605 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2606 _schema.debug(msg, self._table, k, f._type)
2608 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2609 dest_model = self.pool[f._obj]
2610 if dest_model._auto and dest_model._table != 'ir_actions':
2611 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2613 # The field doesn't exist in database. Create it if necessary.
2615 if not isinstance(f, fields.function) or f.store:
2616 # add the missing field
2617 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2618 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2619 _schema.debug("Table '%s': added column '%s' with definition=%s",
2620 self._table, k, get_pg_type(f)[1])
2624 self._set_default_value_on_column(cr, k, context=context)
2626 # remember the functions to call for the stored fields
2627 if isinstance(f, fields.function):
2629 if f.store is not True: # i.e. if f.store is a dict
2630 order = f.store[f.store.keys()[0]][2]
2631 todo_end.append((order, self._update_store, (f, k)))
2633 # remember new-style stored fields with compute method
2634 if k in self._fields and self._fields[k].depends:
2635 stored_fields.append(self._fields[k])
2637 # and add constraints if needed
2638 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2639 if f._obj not in self.pool:
2640 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2641 dest_model = self.pool[f._obj]
2642 ref = dest_model._table
2643 # ir_actions is inherited so foreign key doesn't work on it
2644 if dest_model._auto and ref != 'ir_actions':
2645 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2647 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2651 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2652 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2655 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2656 "Try to re-run: openerp-server --update=module\n"\
2657 "If it doesn't work, update records and execute manually:\n"\
2658 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2659 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2663 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2664 create = not bool(cr.fetchone())
2666 cr.commit() # start a new transaction
2669 self._add_sql_constraints(cr)
2672 self._execute_sql(cr)
2675 self._parent_store_compute(cr)
2679 # trigger computation of new-style stored fields with a compute
2681 _logger.info("Storing computed values of %s fields %s",
2682 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2683 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2684 recs = recs.search([])
2686 map(recs._recompute_todo, stored_fields)
2689 todo_end.append((1000, func, ()))
2693 def _auto_end(self, cr, context=None):
2694 """ Create the foreign keys recorded by _auto_init. """
2695 for t, k, r, d in self._foreign_keys:
2696 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2697 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f', False)
2699 del self._foreign_keys
2702 def _table_exist(self, cr):
2703 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2707 def _create_table(self, cr):
2708 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2709 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2710 _schema.debug("Table '%s': created", self._table)
2713 def _parent_columns_exist(self, cr):
2714 cr.execute("""SELECT c.relname
2715 FROM pg_class c, pg_attribute a
2716 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2717 """, (self._table, 'parent_left'))
2721 def _create_parent_columns(self, cr):
2722 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2723 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2724 if 'parent_left' not in self._columns:
2725 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2727 _schema.debug("Table '%s': added column '%s' with definition=%s",
2728 self._table, 'parent_left', 'INTEGER')
2729 elif not self._columns['parent_left'].select:
2730 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2732 if 'parent_right' not in self._columns:
2733 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2735 _schema.debug("Table '%s': added column '%s' with definition=%s",
2736 self._table, 'parent_right', 'INTEGER')
2737 elif not self._columns['parent_right'].select:
2738 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2740 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2741 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2742 self._parent_name, self._name)
2747 def _select_column_data(self, cr):
2748 # attlen is the number of bytes necessary to represent the type when
2749 # the type has a fixed size. If the type has a varying size attlen is
2750 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2751 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2752 "FROM pg_class c,pg_attribute a,pg_type t " \
2753 "WHERE c.relname=%s " \
2754 "AND c.oid=a.attrelid " \
2755 "AND a.atttypid=t.oid", (self._table,))
2756 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2759 def _o2m_raise_on_missing_reference(self, cr, f):
2760 # TODO this check should be a method on fields.one2many.
2761 if f._obj in self.pool:
2762 other = self.pool[f._obj]
2763 # TODO the condition could use fields_get_keys().
2764 if f._fields_id not in other._columns.keys():
2765 if f._fields_id not in other._inherit_fields.keys():
2766 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2768 def _m2m_raise_or_create_relation(self, cr, f):
2769 m2m_tbl, col1, col2 = f._sql_names(self)
2770 # do not create relations for custom fields as they do not belong to a module
2771 # they will be automatically removed when dropping the corresponding ir.model.field
2772 # table name for custom relation all starts with x_, see __init__
2773 if not m2m_tbl.startswith('x_'):
2774 self._save_relation_table(cr, m2m_tbl)
2775 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2776 if not cr.dictfetchall():
2777 if f._obj not in self.pool:
2778 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2779 dest_model = self.pool[f._obj]
2780 ref = dest_model._table
2781 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2782 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2783 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2784 if not cr.fetchall():
2785 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2786 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2787 if not cr.fetchall():
2788 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2790 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2791 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2792 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2794 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2797 def _add_sql_constraints(self, cr):
2800 Modify this model's database table constraints so they match the one in
2804 def unify_cons_text(txt):
2805 return txt.lower().replace(', ',',').replace(' (','(')
2807 for (key, con, _) in self._sql_constraints:
2808 conname = '%s_%s' % (self._table, key)
2810 # using 1 to get result if no imc but one pgc
2811 cr.execute("""SELECT definition, 1
2812 FROM ir_model_constraint imc
2813 RIGHT JOIN pg_constraint pgc
2814 ON (pgc.conname = imc.name)
2815 WHERE pgc.conname=%s
2817 existing_constraints = cr.dictfetchone()
2821 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2822 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2823 self._table, conname, con),
2824 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2829 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2830 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2831 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2837 if not existing_constraints:
2838 # constraint does not exists:
2839 sql_actions['add']['execute'] = True
2840 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2841 elif unify_cons_text(con) != existing_constraints['definition']:
2842 # constraint exists but its definition has changed:
2843 sql_actions['drop']['execute'] = True
2844 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints['definition'] or '', )
2845 sql_actions['add']['execute'] = True
2846 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2848 # we need to add the constraint:
2849 self._save_constraint(cr, conname, 'u', unify_cons_text(con))
2850 sql_actions = [item for item in sql_actions.values()]
2851 sql_actions.sort(key=lambda x: x['order'])
2852 for sql_action in [action for action in sql_actions if action['execute']]:
2854 cr.execute(sql_action['query'])
2856 _schema.debug(sql_action['msg_ok'])
2858 _schema.warning(sql_action['msg_err'])
2862 def _execute_sql(self, cr):
2863 """ Execute the SQL code from the _sql attribute (if any)."""
2864 if hasattr(self, "_sql"):
2865 for line in self._sql.split(';'):
2866 line2 = line.replace('\n', '').strip()
2872 # Update objects that uses this one to update their _inherits fields
2876 def _inherits_reload(cls):
2877 """ Recompute the _inherit_fields mapping, and inherited fields. """
2880 for parent_model, parent_field in cls._inherits.iteritems():
2881 parent = cls.pool[parent_model]
2882 # old-api struct for _inherit_fields
2883 for name, column in parent._columns.iteritems():
2884 struct[name] = (parent_model, parent_field, column, parent_model)
2885 for name, source in parent._inherit_fields.iteritems():
2886 struct[name] = (parent_model, parent_field, source[2], source[3])
2887 # new-api fields for _fields
2888 for name, field in parent._fields.iteritems():
2889 fields[name] = field.new(
2891 related=(parent_field, name),
2896 cls._inherit_fields = struct
2897 cls._all_columns = cls._get_column_infos()
2899 # add inherited fields that are not redefined locally
2900 for name, field in fields.iteritems():
2901 if name not in cls._fields:
2902 cls._add_field(name, field)
2905 def _get_column_infos(cls):
2906 """Returns a dict mapping all fields names (direct fields and
2907 inherited field via _inherits) to a ``column_info`` struct
2908 giving detailed columns """
2910 # do not inverse for loops, since local fields may hide inherited ones!
2911 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2912 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2913 for k, col in cls._columns.iteritems():
2914 result[k] = fields.column_info(k, col)
2918 def _inherits_check(cls):
2919 for table, field_name in cls._inherits.items():
2920 if field_name not in cls._columns:
2921 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2922 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2923 required=True, ondelete="cascade")
2924 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2925 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2926 cls._columns[field_name].required = True
2927 cls._columns[field_name].ondelete = "cascade"
2929 # reflect fields with delegate=True in dictionary cls._inherits
2930 for field in cls._fields.itervalues():
2931 if field.type == 'many2one' and not field.related and field.delegate:
2932 if not field.required:
2933 _logger.warning("Field %s with delegate=True must be required.", field)
2934 field.required = True
2935 if field.ondelete.lower() not in ('cascade', 'restrict'):
2936 field.ondelete = 'cascade'
2937 cls._inherits[field.comodel_name] = field.name
2940 def _prepare_setup_fields(self):
2941 """ Prepare the setup of fields once the models have been loaded. """
2942 type(self)._setup_done = False
2943 for name, field in self._fields.items():
2945 del self._fields[name]
2950 def _setup_fields(self):
2951 """ Setup the fields (dependency triggers, etc). """
2955 cls._setup_done = True
2957 # first make sure that parent models are all set up
2958 for parent in self._inherits:
2959 self.env[parent]._setup_fields()
2961 # retrieve custom fields
2962 if not self._context.get('_setup_fields_partial'):
2963 cls._init_manual_fields(self._cr)
2965 # retrieve inherited fields
2966 cls._inherits_check()
2967 cls._inherits_reload()
2970 for field in cls._fields.itervalues():
2971 field.setup(self.env)
2973 # update columns (fields may have changed)
2974 for name, field in cls._fields.iteritems():
2976 cls._columns[name] = field.to_column()
2978 # group fields by compute to determine field.computed_fields
2979 fields_by_compute = defaultdict(list)
2980 for field in cls._fields.itervalues():
2982 field.computed_fields = fields_by_compute[field.compute]
2983 field.computed_fields.append(field)
2985 field.computed_fields = []
2988 for func in cls._constraint_methods:
2989 if not all(name in cls._fields for name in func._constrains):
2990 _logger.warning("@constrains%r parameters must be field names", func._constrains)
2991 for name in cls._onchange_methods:
2992 if name not in cls._fields:
2993 func = cls._onchange_methods[name]
2994 _logger.warning("@onchange%r parameters must be field names", func._onchange)
2997 for name in cls._defaults:
2998 assert name in cls._fields, \
2999 "Model %s has a default for nonexiting field %s" % (cls._name, name)
3003 assert cls._rec_name in cls._fields, \
3004 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
3005 elif 'name' in cls._fields:
3006 cls._rec_name = 'name'
3007 elif 'x_name' in cls._fields:
3008 cls._rec_name = 'x_name'
3010 def fields_get(self, cr, user, allfields=None, context=None, write_access=True, attributes=None):
3011 """ fields_get([fields][, attributes])
3013 Return the definition of each field.
3015 The returned value is a dictionary (indiced by field name) of
3016 dictionaries. The _inherits'd fields are included. The string, help,
3017 and selection (if present) attributes are translated.
3019 :param allfields: list of fields to document, all if empty or not provided
3020 :param attributes: list of description attributes to return for each field, all if empty or not provided
3022 recs = self.browse(cr, user, [], context)
3024 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3025 readonly = not (has_access('write') or has_access('create'))
3028 for fname, field in self._fields.iteritems():
3029 if allfields and fname not in allfields:
3031 if not field.setup_done:
3033 if field.groups and not recs.user_has_groups(field.groups):
3036 description = field.get_description(recs.env)
3038 description['readonly'] = True
3039 description['states'] = {}
3041 description = {k: v for k, v in description.iteritems()
3043 res[fname] = description
3047 def get_empty_list_help(self, cr, user, help, context=None):
3048 """ Generic method giving the help message displayed when having
3049 no result to display in a list or kanban view. By default it returns
3050 the help given in parameter that is generally the help message
3051 defined in the action.
3055 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3057 Check the user access rights on the given fields. This raises Access
3058 Denied if the user does not have the rights. Otherwise it returns the
3059 fields (as is if the fields is not falsy, or the readable/writable
3060 fields if fields is falsy).
3062 if user == SUPERUSER_ID:
3063 return fields or list(self._fields)
3066 """ determine whether user has access to field `fname` """
3067 field = self._fields.get(fname)
3068 if field and field.groups:
3069 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3074 fields = filter(valid, self._fields)
3076 invalid_fields = set(filter(lambda name: not valid(name), fields))
3078 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3079 operation, user, self._name, ', '.join(invalid_fields))
3081 _('The requested operation cannot be completed due to security restrictions. '
3082 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3083 (self._description, operation))
3087 # add explicit old-style implementation to read()
3089 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3090 records = self.browse(cr, user, ids, context)
3091 result = BaseModel.read(records, fields, load=load)
3092 return result if isinstance(ids, list) else (bool(result) and result[0])
3094 # new-style implementation of read()
3096 def read(self, fields=None, load='_classic_read'):
3099 Reads the requested fields for the records in `self`, low-level/RPC
3100 method. In Python code, prefer :meth:`~.browse`.
3102 :param fields: list of field names to return (default is all fields)
3103 :return: a list of dictionaries mapping field names to their values,
3104 with one dictionary per record
3105 :raise AccessError: if user has no read rights on some of the given
3108 # check access rights
3109 self.check_access_rights('read')
3110 fields = self.check_field_access_rights('read', fields)
3112 # split fields into stored and computed fields
3113 stored, computed = [], []
3115 if name in self._columns:
3117 elif name in self._fields:
3118 computed.append(name)
3120 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3122 # fetch stored fields from the database to the cache
3123 self._read_from_database(stored)
3125 # retrieve results from records; this takes values from the cache and
3126 # computes remaining fields
3128 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3129 use_name_get = (load == '_classic_read')
3132 values = {'id': record.id}
3133 for name, field in name_fields:
3134 values[name] = field.convert_to_read(record[name], use_name_get)
3135 result.append(values)
3136 except MissingError:
3142 def _prefetch_field(self, field):
3143 """ Read from the database in order to fetch `field` (:class:`Field`
3144 instance) for `self` in cache.
3146 # fetch the records of this model without field_name in their cache
3147 records = self._in_cache_without(field)
3149 if len(records) > PREFETCH_MAX:
3150 records = records[:PREFETCH_MAX] | self
3152 # determine which fields can be prefetched
3153 if not self.env.in_draft and \
3154 self._context.get('prefetch_fields', True) and \
3155 self._columns[field.name]._prefetch:
3156 # prefetch all classic and many2one fields that the user can access
3158 for fname, fcolumn in self._columns.iteritems()
3159 if fcolumn._prefetch
3160 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3163 fnames = {field.name}
3165 # important: never prefetch fields to recompute!
3166 get_recs_todo = self.env.field_todo
3167 for fname in list(fnames):
3168 if get_recs_todo(self._fields[fname]):
3169 if fname == field.name:
3170 records -= get_recs_todo(field)
3172 fnames.discard(fname)
3174 # fetch records with read()
3175 assert self in records and field.name in fnames
3178 result = records.read(list(fnames), load='_classic_write')
3182 # check the cache, and update it if necessary
3183 if not self._cache.contains(field):
3184 for values in result:
3185 record = self.browse(values.pop('id'))
3186 record._cache.update(record._convert_to_cache(values, validate=False))
3187 if not self._cache.contains(field):
3188 e = AccessError("No value found for %s.%s" % (self, field.name))
3189 self._cache[field] = FailedValue(e)
3192 def _read_from_database(self, field_names):
3193 """ Read the given fields of the records in `self` from the database,
3194 and store them in cache. Access errors are also stored in cache.
3197 cr, user, context = env.args
3199 # FIXME: The query construction needs to be rewritten using the internal Query
3200 # object, as in search(), to avoid ambiguous column references when
3201 # reading/sorting on a table that is auto_joined to another table with
3202 # common columns (e.g. the magical columns)
3204 # Construct a clause for the security rules.
3205 # 'tables' holds the list of tables necessary for the SELECT, including
3206 # the ir.rule clauses, and contains at least self._table.
3207 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3209 # determine the fields that are stored as columns in self._table
3210 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3212 # we need fully-qualified column names in case len(tables) > 1
3214 if isinstance(self._columns.get(f), fields.binary) and \
3215 context.get('bin_size_%s' % f, context.get('bin_size')):
3216 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3217 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3219 return '%s."%s"' % (self._table, f)
3220 qual_names = map(qualify, set(fields_pre + ['id']))
3222 query = """ SELECT %(qual_names)s FROM %(tables)s
3223 WHERE %(table)s.id IN %%s AND (%(extra)s)
3226 'qual_names': ",".join(qual_names),
3227 'tables': ",".join(tables),
3228 'table': self._table,
3229 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3230 'order': self._parent_order or self._order,
3234 for sub_ids in cr.split_for_in_conditions(self.ids):
3235 cr.execute(query, [tuple(sub_ids)] + rule_params)
3236 result.extend(cr.dictfetchall())
3238 ids = [vals['id'] for vals in result]
3241 # translate the fields if necessary
3242 if context.get('lang'):
3243 ir_translation = env['ir.translation']
3244 for f in fields_pre:
3245 if self._columns[f].translate:
3246 #TODO: optimize out of this loop
3247 res_trans = ir_translation._get_ids(
3248 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3250 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3252 # apply the symbol_get functions of the fields we just read
3253 for f in fields_pre:
3254 symbol_get = self._columns[f]._symbol_get
3257 vals[f] = symbol_get(vals[f])
3259 # store result in cache for POST fields
3261 record = self.browse(vals['id'])
3262 record._cache.update(record._convert_to_cache(vals, validate=False))
3264 # determine the fields that must be processed now
3265 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3267 # Compute POST fields, grouped by multi
3268 by_multi = defaultdict(list)
3269 for f in fields_post:
3270 by_multi[self._columns[f]._multi].append(f)
3272 for multi, fs in by_multi.iteritems():
3274 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3275 assert res2 is not None, \
3276 'The function field "%s" on the "%s" model returned None\n' \
3277 '(a dictionary was expected).' % (fs[0], self._name)
3279 # TOCHECK : why got string instend of dict in python2.6
3280 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3281 multi_fields = res2.get(vals['id'], {})
3284 vals[f] = multi_fields.get(f, [])
3287 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3290 vals[f] = res2[vals['id']]
3294 # Warn about deprecated fields now that fields_pre and fields_post are computed
3295 for f in field_names:
3296 column = self._columns[f]
3297 if column.deprecated:
3298 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3300 # store result in cache
3302 record = self.browse(vals.pop('id'))
3303 record._cache.update(record._convert_to_cache(vals, validate=False))
3305 # store failed values in cache for the records that could not be read
3306 fetched = self.browse(ids)
3307 missing = self - fetched
3309 extras = fetched - self
3312 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3313 ', '.join(map(repr, missing._ids)),
3314 ', '.join(map(repr, extras._ids)),
3316 # store an access error exception in existing records
3318 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3319 (self._name, 'read')
3321 forbidden = missing.exists()
3322 forbidden._cache.update(FailedValue(exc))
3323 # store a missing error exception in non-existing records
3325 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3327 (missing - forbidden)._cache.update(FailedValue(exc))
3330 def get_metadata(self):
3332 Returns some metadata about the given records.
3334 :return: list of ownership dictionaries for each requested record
3335 :rtype: list of dictionaries with the following keys:
3338 * create_uid: user who created the record
3339 * create_date: date when the record was created
3340 * write_uid: last user who changed the record
3341 * write_date: date of the last change to the record
3342 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3345 if self._log_access:
3346 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3347 quoted_table = '"%s"' % self._table
3348 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3349 query = '''SELECT %s, __imd.module, __imd.name
3350 FROM %s LEFT JOIN ir_model_data __imd
3351 ON (__imd.model = %%s and __imd.res_id = %s.id)
3352 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3353 self._cr.execute(query, (self._name, tuple(self.ids)))
3354 res = self._cr.dictfetchall()
3356 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3357 names = dict(self.env['res.users'].browse(uids).name_get())
3361 value = r[key] = r[key] or False
3362 if key in ('write_uid', 'create_uid') and value in names:
3363 r[key] = (value, names[value])
3364 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3365 del r['name'], r['module']
3368 def _check_concurrency(self, cr, ids, context):
3371 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3373 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3374 for sub_ids in cr.split_for_in_conditions(ids):
3377 id_ref = "%s,%s" % (self._name, id)
3378 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3380 ids_to_check.extend([id, update_date])
3381 if not ids_to_check:
3383 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3386 # mention the first one only to keep the error message readable
3387 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3389 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3390 """Verify the returned rows after applying record rules matches
3391 the length of `ids`, and raise an appropriate exception if it does not.
3395 ids, result_ids = set(ids), set(result_ids)
3396 missing_ids = ids - result_ids
3398 # Attempt to distinguish record rule restriction vs deleted records,
3399 # to provide a more specific error message - check if the missinf
3400 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3401 forbidden_ids = [x[0] for x in cr.fetchall()]
3403 # the missing ids are (at least partially) hidden by access rules
3404 if uid == SUPERUSER_ID:
3406 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3407 raise except_orm(_('Access Denied'),
3408 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3409 (self._description, operation))
3411 # If we get here, the missing_ids are not in the database
3412 if operation in ('read','unlink'):
3413 # No need to warn about deleting an already deleted record.
3414 # And no error when reading a record that was deleted, to prevent spurious
3415 # errors for non-transactional search/read sequences coming from clients
3417 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3418 raise except_orm(_('Missing document(s)'),
3419 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3422 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3423 """Verifies that the operation given by ``operation`` is allowed for the user
3424 according to the access rights."""
3425 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3427 def check_access_rule(self, cr, uid, ids, operation, context=None):
3428 """Verifies that the operation given by ``operation`` is allowed for the user
3429 according to ir.rules.
3431 :param operation: one of ``write``, ``unlink``
3432 :raise except_orm: * if current ir.rules do not permit this operation.
3433 :return: None if the operation is allowed
3435 if uid == SUPERUSER_ID:
3438 if self.is_transient():
3439 # Only one single implicit access rule for transient models: owner only!
3440 # This is ok to hardcode because we assert that TransientModels always
3441 # have log_access enabled so that the create_uid column is always there.
3442 # And even with _inherits, these fields are always present in the local
3443 # table too, so no need for JOINs.
3444 cr.execute("""SELECT distinct create_uid
3446 WHERE id IN %%s""" % self._table, (tuple(ids),))
3447 uids = [x[0] for x in cr.fetchall()]
3448 if len(uids) != 1 or uids[0] != uid:
3449 raise except_orm(_('Access Denied'),
3450 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3452 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3454 where_clause = ' and ' + ' and '.join(where_clause)
3455 for sub_ids in cr.split_for_in_conditions(ids):
3456 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3457 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3458 [sub_ids] + where_params)
3459 returned_ids = [x['id'] for x in cr.dictfetchall()]
3460 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3462 def create_workflow(self, cr, uid, ids, context=None):
3463 """Create a workflow instance for each given record IDs."""
3464 from openerp import workflow
3466 workflow.trg_create(uid, self._name, res_id, cr)
3467 # self.invalidate_cache(cr, uid, context=context) ?
3470 def delete_workflow(self, cr, uid, ids, context=None):
3471 """Delete the workflow instances bound to the given record IDs."""
3472 from openerp import workflow
3474 workflow.trg_delete(uid, self._name, res_id, cr)
3475 self.invalidate_cache(cr, uid, context=context)
3478 def step_workflow(self, cr, uid, ids, context=None):
3479 """Reevaluate the workflow instances of the given record IDs."""
3480 from openerp import workflow
3482 workflow.trg_write(uid, self._name, res_id, cr)
3483 # self.invalidate_cache(cr, uid, context=context) ?
3486 def signal_workflow(self, cr, uid, ids, signal, context=None):
3487 """Send given workflow signal and return a dict mapping ids to workflow results"""
3488 from openerp import workflow
3491 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3492 # self.invalidate_cache(cr, uid, context=context) ?
3495 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3496 """ Rebind the workflow instance bound to the given 'old' record IDs to
3497 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3499 from openerp import workflow
3500 for old_id, new_id in old_new_ids:
3501 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3502 self.invalidate_cache(cr, uid, context=context)
3505 def unlink(self, cr, uid, ids, context=None):
3508 Deletes the records of the current set
3510 :raise AccessError: * if user has no unlink rights on the requested object
3511 * if user tries to bypass access rules for unlink on the requested object
3512 :raise UserError: if the record is default property for other records
3517 if isinstance(ids, (int, long)):
3520 result_store = self._store_get_values(cr, uid, ids, self._fields.keys(), context)
3522 # for recomputing new-style fields
3523 recs = self.browse(cr, uid, ids, context)
3524 recs.modified(self._fields)
3526 self._check_concurrency(cr, ids, context)
3528 self.check_access_rights(cr, uid, 'unlink')
3530 ir_property = self.pool.get('ir.property')
3532 # Check if the records are used as default properties.
3533 domain = [('res_id', '=', False),
3534 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3536 if ir_property.search(cr, uid, domain, context=context):
3537 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3539 # Delete the records' properties.
3540 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3541 ir_property.unlink(cr, uid, property_ids, context=context)
3543 self.delete_workflow(cr, uid, ids, context=context)
3545 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3546 pool_model_data = self.pool.get('ir.model.data')
3547 ir_values_obj = self.pool.get('ir.values')
3548 ir_attachment_obj = self.pool.get('ir.attachment')
3549 for sub_ids in cr.split_for_in_conditions(ids):
3550 cr.execute('delete from ' + self._table + ' ' \
3551 'where id IN %s', (sub_ids,))
3553 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3554 # as these are not connected with real database foreign keys, and would be dangling references.
3555 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3556 # to avoid possible side-effects during admin calls.
3557 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3558 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3559 # Step 2. Marching towards the real deletion of referenced records
3561 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3563 # For the same reason, removing the record relevant to ir_values
3564 ir_value_ids = ir_values_obj.search(cr, uid,
3565 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3568 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3570 # For the same reason, removing the record relevant to ir_attachment
3571 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3572 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3573 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3574 if ir_attachment_ids:
3575 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3577 # invalidate the *whole* cache, since the orm does not handle all
3578 # changes made in the database, like cascading delete!
3579 recs.invalidate_cache()
3581 for order, obj_name, store_ids, fields in result_store:
3582 if obj_name == self._name:
3583 effective_store_ids = set(store_ids) - set(ids)
3585 effective_store_ids = store_ids
3586 if effective_store_ids:
3587 obj = self.pool[obj_name]
3588 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3589 rids = map(lambda x: x[0], cr.fetchall())
3591 obj._store_set_values(cr, uid, rids, fields, context)
3593 # recompute new-style fields
3602 def write(self, vals):
3605 Updates all records in the current set with the provided values.
3607 :param dict vals: fields to update and the value to set on them e.g::
3609 {'foo': 1, 'bar': "Qux"}
3611 will set the field ``foo`` to ``1`` and the field ``bar`` to
3612 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3614 :raise AccessError: * if user has no write rights on the requested object
3615 * if user tries to bypass access rules for write on the requested object
3616 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3617 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3619 * For numeric fields (:class:`~openerp.fields.Integer`,
3620 :class:`~openerp.fields.Float`) the value should be of the
3622 * For :class:`~openerp.fields.Boolean`, the value should be a
3623 :class:`python:bool`
3624 * For :class:`~openerp.fields.Selection`, the value should match the
3625 selection values (generally :class:`python:str`, sometimes
3626 :class:`python:int`)
3627 * For :class:`~openerp.fields.Many2one`, the value should be the
3628 database identifier of the record to set
3629 * Other non-relational fields use a string for value
3633 for historical and compatibility reasons,
3634 :class:`~openerp.fields.Date` and
3635 :class:`~openerp.fields.Datetime` fields use strings as values
3636 (written and read) rather than :class:`~python:datetime.date` or
3637 :class:`~python:datetime.datetime`. These date strings are
3638 UTC-only and formatted according to
3639 :const:`openerp.tools.misc.DEFAULT_SERVER_DATE_FORMAT` and
3640 :const:`openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT`
3641 * .. _openerp/models/relationals/format:
3643 :class:`~openerp.fields.One2many` and
3644 :class:`~openerp.fields.Many2many` use a special "commands" format to
3645 manipulate the set of records stored in/associated with the field.
3647 This format is a list of triplets executed sequentially, where each
3648 triplet is a command to execute on the set of records. Not all
3649 commands apply in all situations. Possible commands are:
3652 adds a new record created from the provided ``value`` dict.
3654 updates an existing record of id ``id`` with the values in
3655 ``values``. Can not be used in :meth:`~.create`.
3657 removes the record of id ``id`` from the set, then deletes it
3658 (from the database). Can not be used in :meth:`~.create`.
3660 removes the record of id ``id`` from the set, but does not
3661 delete it. Can not be used on
3662 :class:`~openerp.fields.One2many`. Can not be used in
3665 adds an existing record of id ``id`` to the set. Can not be
3666 used on :class:`~openerp.fields.One2many`.
3668 removes all records from the set, equivalent to using the
3669 command ``3`` on every record explicitly. Can not be used on
3670 :class:`~openerp.fields.One2many`. Can not be used in
3673 replaces all existing records in the set by the ``ids`` list,
3674 equivalent to using the command ``5`` followed by a command
3675 ``4`` for each ``id`` in ``ids``. Can not be used on
3676 :class:`~openerp.fields.One2many`.
3678 .. note:: Values marked as ``_`` in the list above are ignored and
3679 can be anything, generally ``0`` or ``False``.
3684 self._check_concurrency(self._ids)
3685 self.check_access_rights('write')
3687 # No user-driven update of these columns
3688 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3689 vals.pop(field, None)
3691 # split up fields into old-style and pure new-style ones
3692 old_vals, new_vals, unknown = {}, {}, []
3693 for key, val in vals.iteritems():
3694 field = self._fields.get(key)
3696 if field.column or field.inherited:
3698 if field.inverse and not field.inherited:
3704 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3706 # write old-style fields with (low-level) method _write
3708 self._write(old_vals)
3710 # put the values of pure new-style fields into cache, and inverse them
3713 record._cache.update(record._convert_to_cache(new_vals, update=True))
3714 for key in new_vals:
3715 self._fields[key].determine_inverse(self)
3719 def _write(self, cr, user, ids, vals, context=None):
3720 # low-level implementation of write()
3725 self.check_field_access_rights(cr, user, 'write', vals.keys())
3726 deleted_related = defaultdict(list)
3727 for field in vals.keys():
3729 if field in self._columns:
3730 fobj = self._columns[field]
3731 elif field in self._inherit_fields:
3732 fobj = self._inherit_fields[field][2]
3735 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3736 for wtuple in vals[field]:
3737 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3738 deleted_related[fobj._obj].append(wtuple[1])
3743 for group in groups:
3744 module = group.split(".")[0]
3745 grp = group.split(".")[1]
3746 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3747 (grp, module, 'res.groups', user))
3748 readonly = cr.fetchall()
3749 if readonly[0][0] >= 1:
3756 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3758 # for recomputing new-style fields
3759 recs = self.browse(cr, user, ids, context)
3760 modified_fields = list(vals)
3761 if self._log_access:
3762 modified_fields += ['write_date', 'write_uid']
3763 recs.modified(modified_fields)
3765 parents_changed = []
3766 parent_order = self._parent_order or self._order
3767 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3768 # The parent_left/right computation may take up to
3769 # 5 seconds. No need to recompute the values if the
3770 # parent is the same.
3771 # Note: to respect parent_order, nodes must be processed in
3772 # order, so ``parents_changed`` must be ordered properly.
3773 parent_val = vals[self._parent_name]
3775 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3776 (self._table, self._parent_name, self._parent_name, parent_order)
3777 cr.execute(query, (tuple(ids), parent_val))
3779 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3780 (self._table, self._parent_name, parent_order)
3781 cr.execute(query, (tuple(ids),))
3782 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3784 updates = [] # list of (column, expr) or (column, pattern, value)
3788 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3790 ffield = self._fields.get(field)
3791 if ffield and ffield.deprecated:
3792 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, ffield.deprecated)
3793 if field in self._columns:
3794 column = self._columns[field]
3795 if hasattr(column, 'selection') and vals[field]:
3796 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3797 if column._classic_write and not hasattr(column, '_fnct_inv'):
3798 if (not totranslate) or not column.translate:
3799 updates.append((field, '%s', column._symbol_set[1](vals[field])))
3800 direct.append(field)
3802 upd_todo.append(field)
3804 updend.append(field)
3806 if self._log_access:
3807 updates.append(('write_uid', '%s', user))
3808 updates.append(('write_date', "(now() at time zone 'UTC')"))
3809 direct.append('write_uid')
3810 direct.append('write_date')
3813 self.check_access_rule(cr, user, ids, 'write', context=context)
3814 query = 'UPDATE "%s" SET %s WHERE id IN %%s' % (
3815 self._table, ','.join('"%s"=%s' % u[:2] for u in updates),
3817 params = tuple(u[2] for u in updates if len(u) > 2)
3818 for sub_ids in cr.split_for_in_conditions(ids):
3819 cr.execute(query, params + (sub_ids,))
3820 if cr.rowcount != len(sub_ids):
3821 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3826 if self._columns[f].translate:
3827 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3830 # Inserting value to DB
3831 context_wo_lang = dict(context, lang=None)
3832 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3833 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3835 # invalidate and mark new-style fields to recompute; do this before
3836 # setting other fields, because it can require the value of computed
3837 # fields, e.g., a one2many checking constraints on records
3838 recs.modified(direct)
3840 # call the 'set' method of fields which are not classic_write
3841 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3843 # default element in context must be removed when call a one2many or many2many
3844 rel_context = context.copy()
3845 for c in context.items():
3846 if c[0].startswith('default_'):
3847 del rel_context[c[0]]
3849 for field in upd_todo:
3851 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3853 # for recomputing new-style fields
3854 recs.modified(upd_todo)
3856 unknown_fields = updend[:]
3857 for table in self._inherits:
3858 col = self._inherits[table]
3860 for sub_ids in cr.split_for_in_conditions(ids):
3861 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3862 'where id IN %s', (sub_ids,))
3863 nids.extend([x[0] for x in cr.fetchall()])
3867 if self._inherit_fields[val][0] == table:
3869 unknown_fields.remove(val)
3871 self.pool[table].write(cr, user, nids, v, context)
3875 'No such field(s) in model %s: %s.',
3876 self._name, ', '.join(unknown_fields))
3878 # check Python constraints
3879 recs._validate_fields(vals)
3881 # TODO: use _order to set dest at the right position and not first node of parent
3882 # We can't defer parent_store computation because the stored function
3883 # fields that are computer may refer (directly or indirectly) to
3884 # parent_left/right (via a child_of domain)
3887 self.pool._init_parent[self._name] = True
3889 order = self._parent_order or self._order
3890 parent_val = vals[self._parent_name]
3892 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3894 clause, params = '%s IS NULL' % (self._parent_name,), ()
3896 for id in parents_changed:
3897 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3898 pleft, pright = cr.fetchone()
3899 distance = pright - pleft + 1
3901 # Positions of current siblings, to locate proper insertion point;
3902 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3903 # after each update, in case several nodes are sequentially inserted one
3904 # next to the other (i.e computed incrementally)
3905 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3906 parents = cr.fetchall()
3908 # Find Position of the element
3910 for (parent_pright, parent_id) in parents:
3913 position = parent_pright and parent_pright + 1 or 1
3915 # It's the first node of the parent
3920 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3921 position = cr.fetchone()[0] + 1
3923 if pleft < position <= pright:
3924 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3926 if pleft < position:
3927 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3928 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3929 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3931 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3932 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3933 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3934 recs.invalidate_cache(['parent_left', 'parent_right'])
3936 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3940 for order, model_name, ids_to_update, fields_to_recompute in result:
3941 key = (model_name, tuple(fields_to_recompute))
3942 done.setdefault(key, {})
3943 # avoid to do several times the same computation
3945 for id in ids_to_update:
3946 if id not in done[key]:
3947 done[key][id] = True
3948 if id not in deleted_related[model_name]:
3950 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3952 # recompute new-style fields
3953 if context.get('recompute', True):
3956 self.step_workflow(cr, user, ids, context=context)
3960 # TODO: Should set perm to user.xxx
3963 @api.returns('self', lambda value: value.id)
3964 def create(self, vals):
3965 """ create(vals) -> record
3967 Creates a new record for the model.
3969 The new record is initialized using the values from ``vals`` and
3970 if necessary those from :meth:`~.default_get`.
3973 values for the model's fields, as a dictionary::
3975 {'field_name': field_value, ...}
3977 see :meth:`~.write` for details
3978 :return: new record created
3979 :raise AccessError: * if user has no create rights on the requested object
3980 * if user tries to bypass access rules for create on the requested object
3981 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3982 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3984 self.check_access_rights('create')
3986 # add missing defaults, and drop fields that may not be set by user
3987 vals = self._add_missing_default_values(vals)
3988 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3989 vals.pop(field, None)
3991 # split up fields into old-style and pure new-style ones
3992 old_vals, new_vals, unknown = {}, {}, []
3993 for key, val in vals.iteritems():
3994 field = self._fields.get(key)
3996 if field.column or field.inherited:
3998 if field.inverse and not field.inherited:
4004 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
4006 # create record with old-style fields
4007 record = self.browse(self._create(old_vals))
4009 # put the values of pure new-style fields into cache, and inverse them
4010 record._cache.update(record._convert_to_cache(new_vals))
4011 for key in new_vals:
4012 self._fields[key].determine_inverse(record)
4016 def _create(self, cr, user, vals, context=None):
4017 # low-level implementation of create()
4021 if self.is_transient():
4022 self._transient_vacuum(cr, user)
4025 for v in self._inherits:
4026 if self._inherits[v] not in vals:
4029 tocreate[v] = {'id': vals[self._inherits[v]]}
4032 # list of column assignments defined as tuples like:
4033 # (column_name, format_string, column_value)
4034 # (column_name, sql_formula)
4035 # Those tuples will be used by the string formatting for the INSERT
4037 ('id', "nextval('%s')" % self._sequence),
4042 for v in vals.keys():
4043 if v in self._inherit_fields and v not in self._columns:
4044 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4045 tocreate[table][v] = vals[v]
4048 if (v not in self._inherit_fields) and (v not in self._columns):
4050 unknown_fields.append(v)
4053 'No such field(s) in model %s: %s.',
4054 self._name, ', '.join(unknown_fields))
4056 for table in tocreate:
4057 if self._inherits[table] in vals:
4058 del vals[self._inherits[table]]
4060 record_id = tocreate[table].pop('id', None)
4062 if record_id is None or not record_id:
4063 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4065 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4067 updates.append((self._inherits[table], '%s', record_id))
4069 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4070 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4072 for bool_field in bool_fields:
4073 if bool_field not in vals:
4074 vals[bool_field] = False
4076 for field in vals.keys():
4078 if field in self._columns:
4079 fobj = self._columns[field]
4081 fobj = self._inherit_fields[field][2]
4087 for group in groups:
4088 module = group.split(".")[0]
4089 grp = group.split(".")[1]
4090 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4091 (grp, module, 'res.groups', user))
4092 readonly = cr.fetchall()
4093 if readonly[0][0] >= 1:
4096 elif readonly[0][0] == 0:
4104 current_field = self._columns[field]
4105 if current_field._classic_write:
4106 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4108 #for the function fields that receive a value, we set them directly in the database
4109 #(they may be required), but we also need to trigger the _fct_inv()
4110 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4111 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4112 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4113 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4114 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4115 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4116 #after the release but, definitively, the behavior shouldn't be different for related and function
4118 upd_todo.append(field)
4120 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4121 #related. See the above TODO comment for further explanations.
4122 if not isinstance(current_field, fields.related):
4123 upd_todo.append(field)
4124 if field in self._columns \
4125 and hasattr(current_field, 'selection') \
4127 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4128 if self._log_access:
4129 updates.append(('create_uid', '%s', user))
4130 updates.append(('write_uid', '%s', user))
4131 updates.append(('create_date', "(now() at time zone 'UTC')"))
4132 updates.append(('write_date', "(now() at time zone 'UTC')"))
4134 # the list of tuples used in this formatting corresponds to
4135 # tuple(field_name, format, value)
4136 # In some case, for example (id, create_date, write_date) we does not
4137 # need to read the third value of the tuple, because the real value is
4138 # encoded in the second value (the format).
4140 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4142 ', '.join('"%s"' % u[0] for u in updates),
4143 ', '.join(u[1] for u in updates)
4145 tuple([u[2] for u in updates if len(u) > 2])
4148 id_new, = cr.fetchone()
4149 recs = self.browse(cr, user, id_new, context)
4151 if self._parent_store and not context.get('defer_parent_store_computation'):
4153 self.pool._init_parent[self._name] = True
4155 parent = vals.get(self._parent_name, False)
4157 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4159 result_p = cr.fetchall()
4160 for (pleft,) in result_p:
4165 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4166 pleft_old = cr.fetchone()[0]
4169 cr.execute('select max(parent_right) from '+self._table)
4170 pleft = cr.fetchone()[0] or 0
4171 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4172 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4173 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4174 recs.invalidate_cache(['parent_left', 'parent_right'])
4176 # invalidate and mark new-style fields to recompute; do this before
4177 # setting other fields, because it can require the value of computed
4178 # fields, e.g., a one2many checking constraints on records
4179 recs.modified([u[0] for u in updates])
4181 # call the 'set' method of fields which are not classic_write
4182 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4184 # default element in context must be remove when call a one2many or many2many
4185 rel_context = context.copy()
4186 for c in context.items():
4187 if c[0].startswith('default_'):
4188 del rel_context[c[0]]
4191 for field in upd_todo:
4192 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4194 # for recomputing new-style fields
4195 recs.modified(upd_todo)
4197 # check Python constraints
4198 recs._validate_fields(vals)
4200 if context.get('recompute', True):
4201 result += self._store_get_values(cr, user, [id_new],
4202 list(set(vals.keys() + self._inherits.values())),
4206 for order, model_name, ids, fields2 in result:
4207 if not (model_name, ids, fields2) in done:
4208 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4209 done.append((model_name, ids, fields2))
4210 # recompute new-style fields
4213 if self._log_create and context.get('recompute', True):
4214 message = self._description + \
4216 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4217 "' " + _("created.")
4218 self.log(cr, user, id_new, message, True, context=context)
4220 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4221 self.create_workflow(cr, user, [id_new], context=context)
4224 def _store_get_values(self, cr, uid, ids, fields, context):
4225 """Returns an ordered list of fields.function to call due to
4226 an update operation on ``fields`` of records with ``ids``,
4227 obtained by calling the 'store' triggers of these fields,
4228 as setup by their 'store' attribute.
4230 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4232 if fields is None: fields = []
4233 stored_functions = self.pool._store_function.get(self._name, [])
4235 # use indexed names for the details of the stored_functions:
4236 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4238 # only keep store triggers that should be triggered for the ``fields``
4240 triggers_to_compute = (
4241 f for f in stored_functions
4242 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4246 target_id_results = {}
4247 for store_trigger in triggers_to_compute:
4248 target_func_id_ = id(store_trigger[target_ids_func_])
4249 if target_func_id_ not in target_id_results:
4250 # use admin user for accessing objects having rules defined on store fields
4251 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4252 target_ids = target_id_results[target_func_id_]
4254 # the compound key must consider the priority and model name
4255 key = (store_trigger[priority_], store_trigger[model_name_])
4256 for target_id in target_ids:
4257 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4259 # Here to_compute_map looks like:
4260 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4261 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4262 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4265 # Now we need to generate the batch function calls list
4267 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4269 for ((priority,model), id_map) in to_compute_map.iteritems():
4270 trigger_ids_maps = {}
4271 # function_ids_maps =
4272 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4273 for target_id, triggers in id_map.iteritems():
4274 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4275 for triggers, target_ids in trigger_ids_maps.iteritems():
4276 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4277 [t[func_field_to_compute_] for t in triggers]))
4280 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4283 def _store_set_values(self, cr, uid, ids, fields, context):
4284 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4285 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4290 if self._log_access:
4291 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4295 field_dict.setdefault(r[0], [])
4296 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4297 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4298 for i in self.pool._store_function.get(self._name, []):
4300 up_write_date = write_date + datetime.timedelta(hours=i[5])
4301 if datetime.datetime.now() < up_write_date:
4303 field_dict[r[0]].append(i[1])
4309 if self._columns[f]._multi not in keys:
4310 keys.append(self._columns[f]._multi)
4311 todo.setdefault(self._columns[f]._multi, [])
4312 todo[self._columns[f]._multi].append(f)
4316 # use admin user for accessing objects having rules defined on store fields
4317 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4318 for id, value in result.items():
4320 for f in value.keys():
4321 if f in field_dict[id]:
4323 updates = [] # list of (column, pattern, value)
4327 column = self._columns[v]
4328 if column._type == 'many2one':
4330 value[v] = value[v][0]
4333 updates.append((v, '%s', column._symbol_set[1](value[v])))
4335 query = 'UPDATE "%s" SET %s WHERE id = %%s' % (
4336 self._table, ','.join('"%s"=%s' % u[:2] for u in updates),
4338 params = tuple(u[2] for u in updates)
4339 cr.execute(query, params + (id,))
4343 column = self._columns[f]
4344 # use admin user for accessing objects having rules defined on store fields
4345 result = column.get(cr, self, ids, f, SUPERUSER_ID, context=context)
4346 for r in result.keys():
4348 if r in field_dict.keys():
4349 if f in field_dict[r]:
4351 for id, value in result.items():
4352 if column._type == 'many2one':
4357 query = 'UPDATE "%s" SET "%s"=%%s WHERE id = %%s' % (
4360 cr.execute(query, (column._symbol_set[1](value), id))
4362 # invalidate and mark new-style fields to recompute
4363 self.browse(cr, uid, ids, context).modified(fields)
4367 # TODO: ameliorer avec NULL
4368 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4369 """Computes the WHERE clause needed to implement an OpenERP domain.
4370 :param domain: the domain to compute
4372 :param active_test: whether the default filtering of records with ``active``
4373 field set to ``False`` should be applied.
4374 :return: the query expressing the given domain as provided in domain
4375 :rtype: osv.query.Query
4380 # if the object has a field named 'active', filter out all inactive
4381 # records unless they were explicitely asked for
4382 if 'active' in self._fields and active_test and context.get('active_test', True):
4384 # the item[0] trick below works for domain items and '&'/'|'/'!'
4386 if not any(item[0] == 'active' for item in domain):
4387 domain.insert(0, ('active', '=', 1))
4389 domain = [('active', '=', 1)]
4392 e = expression.expression(cr, user, domain, self, context)
4393 tables = e.get_tables()
4394 where_clause, where_params = e.to_sql()
4395 where_clause = where_clause and [where_clause] or []
4397 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4399 return Query(tables, where_clause, where_params)
4401 def _check_qorder(self, word):
4402 if not regex_order.match(word):
4403 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4406 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4407 """Add what's missing in ``query`` to implement all appropriate ir.rules
4408 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4410 :param query: the current query object
4412 if uid == SUPERUSER_ID:
4415 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4416 """ :param parent_model: name of the parent model, if the added
4417 clause comes from a parent model
4421 # as inherited rules are being applied, we need to add the missing JOIN
4422 # to reach the parent table (if it was not JOINed yet in the query)
4423 parent_alias = self._inherits_join_add(self, parent_model, query)
4424 # inherited rules are applied on the external table -> need to get the alias and replace
4425 parent_table = self.pool[parent_model]._table
4426 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4427 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4429 for table in added_tables:
4430 # table is just a table name -> switch to the full alias
4431 if table == '"%s"' % parent_table:
4432 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4433 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4435 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4436 added_tables = new_tables
4437 query.where_clause += added_clause
4438 query.where_clause_params += added_params
4439 for table in added_tables:
4440 if table not in query.tables:
4441 query.tables.append(table)
4445 # apply main rules on the object
4446 rule_obj = self.pool.get('ir.rule')
4447 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4448 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4450 # apply ir.rules from the parents (through _inherits)
4451 for inherited_model in self._inherits:
4452 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4453 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4454 parent_model=inherited_model)
4456 def _generate_m2o_order_by(self, order_field, query):
4458 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4459 either native m2o fields or function/related fields that are stored, including
4460 intermediate JOINs for inheritance if required.
4462 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4464 if order_field not in self._columns and order_field in self._inherit_fields:
4465 # also add missing joins for reaching the table containing the m2o field
4466 qualified_field = self._inherits_join_calc(order_field, query)
4467 order_field_column = self._inherit_fields[order_field][2]
4469 qualified_field = '"%s"."%s"' % (self._table, order_field)
4470 order_field_column = self._columns[order_field]
4472 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4473 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4474 _logger.debug("Many2one function/related fields must be stored " \
4475 "to be used as ordering fields! Ignoring sorting for %s.%s",
4476 self._name, order_field)
4479 # figure out the applicable order_by for the m2o
4480 dest_model = self.pool[order_field_column._obj]
4481 m2o_order = dest_model._order
4482 if not regex_order.match(m2o_order):
4483 # _order is complex, can't use it here, so we default to _rec_name
4484 m2o_order = dest_model._rec_name
4486 # extract the field names, to be able to qualify them and add desc/asc
4488 for order_part in m2o_order.split(","):
4489 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4490 m2o_order = m2o_order_list
4492 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4493 # as we don't want to exclude results that have NULL values for the m2o
4494 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4495 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4496 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4497 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4499 def _generate_order_by(self, order_spec, query):
4501 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4502 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4504 :raise" except_orm in case order_spec is malformed
4506 order_by_clause = ''
4507 order_spec = order_spec or self._order
4509 order_by_elements = []
4510 self._check_qorder(order_spec)
4511 for order_part in order_spec.split(','):
4512 order_split = order_part.strip().split(' ')
4513 order_field = order_split[0].strip()
4514 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4517 if order_field == 'id':
4518 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4519 elif order_field in self._columns:
4520 order_column = self._columns[order_field]
4521 if order_column._classic_read:
4522 inner_clause = '"%s"."%s"' % (self._table, order_field)
4523 elif order_column._type == 'many2one':
4524 inner_clause = self._generate_m2o_order_by(order_field, query)
4526 continue # ignore non-readable or "non-joinable" fields
4527 elif order_field in self._inherit_fields:
4528 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4529 order_column = parent_obj._columns[order_field]
4530 if order_column._classic_read:
4531 inner_clause = self._inherits_join_calc(order_field, query)
4532 elif order_column._type == 'many2one':
4533 inner_clause = self._generate_m2o_order_by(order_field, query)
4535 continue # ignore non-readable or "non-joinable" fields
4537 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4538 if order_column and order_column._type == 'boolean':
4539 inner_clause = "COALESCE(%s, false)" % inner_clause
4541 if isinstance(inner_clause, list):
4542 for clause in inner_clause:
4543 order_by_elements.append("%s %s" % (clause, order_direction))
4545 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4546 if order_by_elements:
4547 order_by_clause = ",".join(order_by_elements)
4549 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4551 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4553 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4554 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4555 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4556 This is ok at the security level because this method is private and not callable through XML-RPC.
4558 :param access_rights_uid: optional user ID to use when checking access rights
4559 (not for ir.rules, this is only for ir.model.access)
4563 self.check_access_rights(cr, access_rights_uid or user, 'read')
4565 # For transient models, restrict acces to the current user, except for the super-user
4566 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4567 args = expression.AND(([('create_uid', '=', user)], args or []))
4569 query = self._where_calc(cr, user, args, context=context)
4570 self._apply_ir_rules(cr, user, query, 'read', context=context)
4571 order_by = self._generate_order_by(order, query)
4572 from_clause, where_clause, where_clause_params = query.get_sql()
4574 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4577 # Ignore order, limit and offset when just counting, they don't make sense and could
4579 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4580 cr.execute(query_str, where_clause_params)
4584 limit_str = limit and ' limit %d' % limit or ''
4585 offset_str = offset and ' offset %d' % offset or ''
4586 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4587 cr.execute(query_str, where_clause_params)
4590 # TDE note: with auto_join, we could have several lines about the same result
4591 # i.e. a lead with several unread messages; we uniquify the result using
4592 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4593 def _uniquify_list(seq):
4595 return [x for x in seq if x not in seen and not seen.add(x)]
4597 return _uniquify_list([x[0] for x in res])
4599 # returns the different values ever entered for one field
4600 # this is used, for example, in the client when the user hits enter on
4602 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4605 if field in self._inherit_fields:
4606 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4608 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4610 def copy_data(self, cr, uid, id, default=None, context=None):
4612 Copy given record's data with all its fields values
4614 :param cr: database cursor
4615 :param uid: current user id
4616 :param id: id of the record to copy
4617 :param default: field values to override in the original values of the copied record
4618 :type default: dictionary
4619 :param context: context arguments, like lang, time zone
4620 :type context: dictionary
4621 :return: dictionary containing all the field values
4627 # avoid recursion through already copied records in case of circular relationship
4628 seen_map = context.setdefault('__copy_data_seen', {})
4629 if id in seen_map.setdefault(self._name, []):
4631 seen_map[self._name].append(id)
4635 if 'state' not in default:
4636 if 'state' in self._defaults:
4637 if callable(self._defaults['state']):
4638 default['state'] = self._defaults['state'](self, cr, uid, context)
4640 default['state'] = self._defaults['state']
4642 # build a black list of fields that should not be copied
4643 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4644 whitelist = set(name for name, field in self._fields.iteritems() if not field.inherited)
4646 def blacklist_given_fields(obj):
4647 # blacklist the fields that are given by inheritance
4648 for other, field_to_other in obj._inherits.items():
4649 blacklist.add(field_to_other)
4650 if field_to_other in default:
4651 # all the fields of 'other' are given by the record: default[field_to_other],
4652 # except the ones redefined in self
4653 blacklist.update(set(self.pool[other]._fields) - whitelist)
4655 blacklist_given_fields(self.pool[other])
4656 # blacklist deprecated fields
4657 for name, field in obj._fields.iteritems():
4658 if field.deprecated:
4661 blacklist_given_fields(self)
4664 fields_to_copy = dict((f,fi) for f, fi in self._fields.iteritems()
4667 if f not in blacklist)
4669 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4673 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4676 for f, field in fields_to_copy.iteritems():
4677 if field.type == 'many2one':
4678 res[f] = data[f] and data[f][0]
4679 elif field.type == 'one2many':
4680 other = self.pool[field.comodel_name]
4681 # duplicate following the order of the ids because we'll rely on
4682 # it later for copying translations in copy_translation()!
4683 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4684 # the lines are duplicated using the wrong (old) parent, but then
4685 # are reassigned to the correct one thanks to the (0, 0, ...)
4686 res[f] = [(0, 0, line) for line in lines if line]
4687 elif field.type == 'many2many':
4688 res[f] = [(6, 0, data[f])]
4694 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4698 # avoid recursion through already copied records in case of circular relationship
4699 seen_map = context.setdefault('__copy_translations_seen',{})
4700 if old_id in seen_map.setdefault(self._name,[]):
4702 seen_map[self._name].append(old_id)
4704 trans_obj = self.pool.get('ir.translation')
4706 for field_name, field in self._fields.iteritems():
4709 # removing the lang to compare untranslated values
4710 context_wo_lang = dict(context, lang=None)
4711 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4712 # we must recursively copy the translations for o2o and o2m
4713 if field.type == 'one2many':
4714 target_obj = self.pool[field.comodel_name]
4715 # here we rely on the order of the ids to match the translations
4716 # as foreseen in copy_data()
4717 old_children = sorted(r.id for r in old_record[field_name])
4718 new_children = sorted(r.id for r in new_record[field_name])
4719 for (old_child, new_child) in zip(old_children, new_children):
4720 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4721 # and for translatable fields we keep them for copy
4722 elif getattr(field, 'translate', False):
4723 if field_name in self._columns:
4724 trans_name = self._name + "," + field_name
4727 elif field_name in self._inherit_fields:
4728 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4729 # get the id of the parent record to set the translation
4730 inherit_field_name = self._inherit_fields[field_name][1]
4731 target_id = new_record[inherit_field_name].id
4732 source_id = old_record[inherit_field_name].id
4736 trans_ids = trans_obj.search(cr, uid, [
4737 ('name', '=', trans_name),
4738 ('res_id', '=', source_id)
4740 user_lang = context.get('lang')
4741 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4743 # remove source to avoid triggering _set_src
4744 del record['source']
4745 record.update({'res_id': target_id})
4746 if user_lang and user_lang == record['lang']:
4747 # 'source' to force the call to _set_src
4748 # 'value' needed if value is changed in copy(), want to see the new_value
4749 record['source'] = old_record[field_name]
4750 record['value'] = new_record[field_name]
4751 trans_obj.create(cr, uid, record, context=context)
4753 @api.returns('self', lambda value: value.id)
4754 def copy(self, cr, uid, id, default=None, context=None):
4755 """ copy(default=None)
4757 Duplicate record with given id updating it with default values
4759 :param dict default: dictionary of field values to override in the
4760 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4761 :returns: new record
4766 context = context.copy()
4767 data = self.copy_data(cr, uid, id, default, context)
4768 new_id = self.create(cr, uid, data, context)
4769 self.copy_translations(cr, uid, id, new_id, context)
4773 @api.returns('self')
4775 """ exists() -> records
4777 Returns the subset of records in `self` that exist, and marks deleted
4778 records as such in cache. It can be used as a test on records::
4783 By convention, new records are returned as existing.
4785 ids = filter(None, self._ids) # ids to check in database
4788 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4789 self._cr.execute(query, (ids,))
4790 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4791 [id for id in self._ids if not id]) # new ids
4792 existing = self.browse(ids)
4793 if len(existing) < len(self):
4794 # mark missing records in cache with a failed value
4795 exc = MissingError(_("Record does not exist or has been deleted."))
4796 (self - existing)._cache.update(FailedValue(exc))
4799 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4800 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4802 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4803 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4804 return self._check_recursion(cr, uid, ids, context, parent)
4806 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4808 Verifies that there is no loop in a hierarchical structure of records,
4809 by following the parent relationship using the **parent** field until a loop
4810 is detected or until a top-level record is found.
4812 :param cr: database cursor
4813 :param uid: current user id
4814 :param ids: list of ids of records to check
4815 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4816 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4819 parent = self._parent_name
4821 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4822 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4825 while current_id is not None:
4826 cr.execute(query, (current_id,))
4827 result = cr.fetchone()
4828 current_id = result[0] if result else None
4829 if current_id == id:
4833 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4835 Verifies that there is no loop in a hierarchical structure of records,
4836 by following the parent relationship using the **parent** field until a loop
4837 is detected or until a top-level record is found.
4839 :param cr: database cursor
4840 :param uid: current user id
4841 :param ids: list of ids of records to check
4842 :param field_name: field to check
4843 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4846 field = self._fields.get(field_name)
4847 if not (field and field.type == 'many2many' and
4848 field.comodel_name == self._name and field.store):
4849 # field must be a many2many on itself
4850 raise ValueError('invalid field_name: %r' % (field_name,))
4852 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % \
4853 (field.column2, field.relation, field.column1)
4857 for i in range(0, len(ids_parent), cr.IN_MAX):
4859 sub_ids_parent = ids_parent[i:j]
4860 cr.execute(query, (tuple(sub_ids_parent),))
4861 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4862 ids_parent = ids_parent2
4863 for i in ids_parent:
4868 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4869 """Retrieve the External ID(s) of any database record.
4871 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4873 :return: map of ids to the list of their fully qualified External IDs
4874 in the form ``module.key``, or an empty list when there's no External
4875 ID for a record, e.g.::
4877 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4880 ir_model_data = self.pool.get('ir.model.data')
4881 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4882 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4885 # can't use dict.fromkeys() as the list would be shared!
4887 for record in data_results:
4888 result[record['res_id']].append('%(module)s.%(name)s' % record)
4891 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4892 """Retrieve the External ID of any database record, if there
4893 is one. This method works as a possible implementation
4894 for a function field, to be able to add it to any
4895 model object easily, referencing it as ``Model.get_external_id``.
4897 When multiple External IDs exist for a record, only one
4898 of them is returned (randomly).
4900 :return: map of ids to their fully qualified XML ID,
4901 defaulting to an empty string when there's none
4902 (to be usable as a function field),
4905 { 'id': 'module.ext_id',
4908 results = self._get_xml_ids(cr, uid, ids)
4909 for k, v in results.iteritems():
4916 # backwards compatibility
4917 get_xml_id = get_external_id
4918 _get_xml_ids = _get_external_ids
4920 def print_report(self, cr, uid, ids, name, data, context=None):
4922 Render the report `name` for the given IDs. The report must be defined
4923 for this model, not another.
4925 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4926 assert self._name == report.table
4927 return report.create(cr, uid, ids, data, context)
4931 def is_transient(cls):
4932 """ Return whether the model is transient.
4934 See :class:`TransientModel`.
4937 return cls._transient
4939 def _transient_clean_rows_older_than(self, cr, seconds):
4940 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4941 # Never delete rows used in last 5 minutes
4942 seconds = max(seconds, 300)
4943 query = ("SELECT id FROM " + self._table + " WHERE"
4944 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4945 " < ((now() at time zone 'UTC') - interval %s)")
4946 cr.execute(query, ("%s seconds" % seconds,))
4947 ids = [x[0] for x in cr.fetchall()]
4948 self.unlink(cr, SUPERUSER_ID, ids)
4950 def _transient_clean_old_rows(self, cr, max_count):
4951 # Check how many rows we have in the table
4952 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4954 if res[0][0] <= max_count:
4955 return # max not reached, nothing to do
4956 self._transient_clean_rows_older_than(cr, 300)
4958 def _transient_vacuum(self, cr, uid, force=False):
4959 """Clean the transient records.
4961 This unlinks old records from the transient model tables whenever the
4962 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4963 Actual cleaning will happen only once every "_transient_check_time" calls.
4964 This means this method can be called frequently called (e.g. whenever
4965 a new record is created).
4966 Example with both max_hours and max_count active:
4967 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4968 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4969 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4970 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4971 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4972 would immediately cause the maximum to be reached again.
4973 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4975 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4976 _transient_check_time = 20 # arbitrary limit on vacuum executions
4977 self._transient_check_count += 1
4978 if not force and (self._transient_check_count < _transient_check_time):
4979 return True # no vacuum cleaning this time
4980 self._transient_check_count = 0
4982 # Age-based expiration
4983 if self._transient_max_hours:
4984 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4986 # Count-based expiration
4987 if self._transient_max_count:
4988 self._transient_clean_old_rows(cr, self._transient_max_count)
4992 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4993 """ Serializes one2many and many2many commands into record dictionaries
4994 (as if all the records came from the database via a read()). This
4995 method is aimed at onchange methods on one2many and many2many fields.
4997 Because commands might be creation commands, not all record dicts
4998 will contain an ``id`` field. Commands matching an existing record
4999 will have an ``id``.
5001 :param field_name: name of the one2many or many2many field matching the commands
5002 :type field_name: str
5003 :param commands: one2many or many2many commands to execute on ``field_name``
5004 :type commands: list((int|False, int|False, dict|False))
5005 :param fields: list of fields to read from the database, when applicable
5006 :type fields: list(str)
5007 :returns: records in a shape similar to that returned by ``read()``
5008 (except records may be missing the ``id`` field if they don't exist in db)
5011 result = [] # result (list of dict)
5012 record_ids = [] # ids of records to read
5013 updates = {} # {id: dict} of updates on particular records
5015 for command in commands or []:
5016 if not isinstance(command, (list, tuple)):
5017 record_ids.append(command)
5018 elif command[0] == 0:
5019 result.append(command[2])
5020 elif command[0] == 1:
5021 record_ids.append(command[1])
5022 updates.setdefault(command[1], {}).update(command[2])
5023 elif command[0] in (2, 3):
5024 record_ids = [id for id in record_ids if id != command[1]]
5025 elif command[0] == 4:
5026 record_ids.append(command[1])
5027 elif command[0] == 5:
5028 result, record_ids = [], []
5029 elif command[0] == 6:
5030 result, record_ids = [], list(command[2])
5032 # read the records and apply the updates
5033 other_model = self.pool[self._fields[field_name].comodel_name]
5034 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
5035 record.update(updates.get(record['id'], {}))
5036 result.append(record)
5040 # for backward compatibility
5041 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5043 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5045 Performs a ``search()`` followed by a ``read()``.
5047 :param cr: database cursor
5048 :param user: current user id
5049 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5050 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5051 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5052 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5053 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5054 :param context: context arguments.
5055 :return: List of dictionaries containing the asked fields.
5056 :rtype: List of dictionaries.
5059 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5063 if fields and fields == ['id']:
5064 # shortcut read if we only want the ids
5065 return [{'id': id} for id in record_ids]
5067 # read() ignores active_test, but it would forward it to any downstream search call
5068 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5069 # was presumably only meant for the main search().
5070 # TODO: Move this to read() directly?
5071 read_ctx = dict(context or {})
5072 read_ctx.pop('active_test', None)
5074 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5075 if len(result) <= 1:
5079 index = dict((r['id'], r) for r in result)
5080 return [index[x] for x in record_ids if x in index]
5082 def _register_hook(self, cr):
5083 """ stuff to do right after the registry is built """
5087 def _patch_method(cls, name, method):
5088 """ Monkey-patch a method for all instances of this model. This replaces
5089 the method called `name` by `method` in the given class.
5090 The original method is then accessible via ``method.origin``, and it
5091 can be restored with :meth:`~._revert_method`.
5096 def do_write(self, values):
5097 # do stuff, and call the original method
5098 return do_write.origin(self, values)
5100 # patch method write of model
5101 model._patch_method('write', do_write)
5103 # this will call do_write
5104 records = model.search([...])
5107 # restore the original method
5108 model._revert_method('write')
5110 origin = getattr(cls, name)
5111 method.origin = origin
5112 # propagate decorators from origin to method, and apply api decorator
5113 wrapped = api.guess(api.propagate(origin, method))
5114 wrapped.origin = origin
5115 setattr(cls, name, wrapped)
5118 def _revert_method(cls, name):
5119 """ Revert the original method called `name` in the given class.
5120 See :meth:`~._patch_method`.
5122 method = getattr(cls, name)
5123 setattr(cls, name, method.origin)
5128 # An instance represents an ordered collection of records in a given
5129 # execution environment. The instance object refers to the environment, and
5130 # the records themselves are represented by their cache dictionary. The 'id'
5131 # of each record is found in its corresponding cache dictionary.
5133 # This design has the following advantages:
5134 # - cache access is direct and thus fast;
5135 # - one can consider records without an 'id' (see new records);
5136 # - the global cache is only an index to "resolve" a record 'id'.
5140 def _browse(cls, env, ids):
5141 """ Create an instance attached to `env`; `ids` is a tuple of record
5144 records = object.__new__(cls)
5147 env.prefetch[cls._name].update(ids)
5151 def browse(self, cr, uid, arg=None, context=None):
5152 ids = _normalize_ids(arg)
5153 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5154 return self._browse(Environment(cr, uid, context or {}), ids)
5157 def browse(self, arg=None):
5158 """ browse([ids]) -> records
5160 Returns a recordset for the ids provided as parameter in the current
5163 Can take no ids, a single id or a sequence of ids.
5165 ids = _normalize_ids(arg)
5166 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5167 return self._browse(self.env, ids)
5170 # Internal properties, for manipulating the instance's implementation
5175 """ List of actual record ids in this recordset (ignores placeholder
5176 ids for records to create)
5178 return filter(None, list(self._ids))
5180 # backward-compatibility with former browse records
5181 _cr = property(lambda self: self.env.cr)
5182 _uid = property(lambda self: self.env.uid)
5183 _context = property(lambda self: self.env.context)
5186 # Conversion methods
5189 def ensure_one(self):
5190 """ Verifies that the current recorset holds a single record. Raises
5191 an exception otherwise.
5195 raise except_orm("ValueError", "Expected singleton: %s" % self)
5197 def with_env(self, env):
5198 """ Returns a new version of this recordset attached to the provided
5201 :type env: :class:`~openerp.api.Environment`
5203 return self._browse(env, self._ids)
5205 def sudo(self, user=SUPERUSER_ID):
5206 """ sudo([user=SUPERUSER])
5208 Returns a new version of this recordset attached to the provided
5211 return self.with_env(self.env(user=user))
5213 def with_context(self, *args, **kwargs):
5214 """ with_context([context][, **overrides]) -> records
5216 Returns a new version of this recordset attached to an extended
5219 The extended context is either the provided ``context`` in which
5220 ``overrides`` are merged or the *current* context in which
5221 ``overrides`` are merged e.g.::
5223 # current context is {'key1': True}
5224 r2 = records.with_context({}, key2=True)
5225 # -> r2._context is {'key2': True}
5226 r2 = records.with_context(key2=True)
5227 # -> r2._context is {'key1': True, 'key2': True}
5229 context = dict(args[0] if args else self._context, **kwargs)
5230 return self.with_env(self.env(context=context))
5232 def _convert_to_cache(self, values, update=False, validate=True):
5233 """ Convert the `values` dictionary into cached values.
5235 :param update: whether the conversion is made for updating `self`;
5236 this is necessary for interpreting the commands of *2many fields
5237 :param validate: whether values must be checked
5239 fields = self._fields
5240 target = self if update else self.browse()
5242 name: fields[name].convert_to_cache(value, target, validate=validate)
5243 for name, value in values.iteritems()
5247 def _convert_to_write(self, values):
5248 """ Convert the `values` dictionary into the format of :meth:`write`. """
5249 fields = self._fields
5251 for name, value in values.iteritems():
5253 value = fields[name].convert_to_write(value)
5254 if not isinstance(value, NewId):
5255 result[name] = value
5259 # Record traversal and update
5262 def _mapped_func(self, func):
5263 """ Apply function `func` on all records in `self`, and return the
5264 result as a list or a recordset (if `func` returns recordsets).
5267 vals = [func(rec) for rec in self]
5268 return reduce(operator.or_, vals) if isinstance(vals[0], BaseModel) else vals
5271 return vals if isinstance(vals, BaseModel) else []
5273 def mapped(self, func):
5274 """ Apply `func` on all records in `self`, and return the result as a
5275 list or a recordset (if `func` return recordsets). In the latter
5276 case, the order of the returned recordset is arbritrary.
5278 :param func: a function or a dot-separated sequence of field names
5280 if isinstance(func, basestring):
5282 for name in func.split('.'):
5283 recs = recs._mapped_func(operator.itemgetter(name))
5286 return self._mapped_func(func)
5288 def _mapped_cache(self, name_seq):
5289 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5290 field names, and only cached values are used.
5293 for name in name_seq.split('.'):
5294 field = recs._fields[name]
5295 null = field.null(self.env)
5296 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5299 def filtered(self, func):
5300 """ Select the records in `self` such that `func(rec)` is true, and
5301 return them as a recordset.
5303 :param func: a function or a dot-separated sequence of field names
5305 if isinstance(func, basestring):
5307 func = lambda rec: filter(None, rec.mapped(name))
5308 return self.browse([rec.id for rec in self if func(rec)])
5310 def sorted(self, key=None, reverse=False):
5311 """ Return the recordset `self` ordered by `key`.
5313 :param key: either a function of one argument that returns a
5314 comparison key for each record, or ``None``, in which case
5315 records are ordered according the default model's order
5317 :param reverse: if ``True``, return the result in reverse order
5320 recs = self.search([('id', 'in', self.ids)])
5321 return self.browse(reversed(recs._ids)) if reverse else recs
5323 return self.browse(map(int, sorted(self, key=key, reverse=reverse)))
5325 def update(self, values):
5326 """ Update record `self[0]` with `values`. """
5327 for name, value in values.iteritems():
5331 # New records - represent records that do not exist in the database yet;
5332 # they are used to perform onchanges.
5336 def new(self, values={}):
5337 """ new([values]) -> record
5339 Return a new record instance attached to the current environment and
5340 initialized with the provided ``value``. The record is *not* created
5341 in database, it only exists in memory.
5343 record = self.browse([NewId()])
5344 record._cache.update(record._convert_to_cache(values, update=True))
5346 if record.env.in_onchange:
5347 # The cache update does not set inverse fields, so do it manually.
5348 # This is useful for computing a function field on secondary
5349 # records, if that field depends on the main record.
5351 field = self._fields.get(name)
5353 for invf in field.inverse_fields:
5354 invf._update(record[name], record)
5359 # Dirty flags, to mark record fields modified (in draft mode)
5362 def _is_dirty(self):
5363 """ Return whether any record in `self` is dirty. """
5364 dirty = self.env.dirty
5365 return any(record in dirty for record in self)
5367 def _get_dirty(self):
5368 """ Return the list of field names for which `self` is dirty. """
5369 dirty = self.env.dirty
5370 return list(dirty.get(self, ()))
5372 def _set_dirty(self, field_name):
5373 """ Mark the records in `self` as dirty for the given `field_name`. """
5374 dirty = self.env.dirty
5376 dirty[record].add(field_name)
5382 def __nonzero__(self):
5383 """ Test whether `self` is nonempty. """
5384 return bool(getattr(self, '_ids', True))
5387 """ Return the size of `self`. """
5388 return len(self._ids)
5391 """ Return an iterator over `self`. """
5392 for id in self._ids:
5393 yield self._browse(self.env, (id,))
5395 def __contains__(self, item):
5396 """ Test whether `item` (record or field name) is an element of `self`.
5397 In the first case, the test is fully equivalent to::
5399 any(item == record for record in self)
5401 if isinstance(item, BaseModel) and self._name == item._name:
5402 return len(item) == 1 and item.id in self._ids
5403 elif isinstance(item, basestring):
5404 return item in self._fields
5406 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5408 def __add__(self, other):
5409 """ Return the concatenation of two recordsets. """
5410 if not isinstance(other, BaseModel) or self._name != other._name:
5411 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5412 return self.browse(self._ids + other._ids)
5414 def __sub__(self, other):
5415 """ Return the recordset of all the records in `self` that are not in `other`. """
5416 if not isinstance(other, BaseModel) or self._name != other._name:
5417 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5418 other_ids = set(other._ids)
5419 return self.browse([id for id in self._ids if id not in other_ids])
5421 def __and__(self, other):
5422 """ Return the intersection of two recordsets.
5423 Note that recordset order is not preserved.
5425 if not isinstance(other, BaseModel) or self._name != other._name:
5426 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5427 return self.browse(set(self._ids) & set(other._ids))
5429 def __or__(self, other):
5430 """ Return the union of two recordsets.
5431 Note that recordset order is not preserved.
5433 if not isinstance(other, BaseModel) or self._name != other._name:
5434 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5435 return self.browse(set(self._ids) | set(other._ids))
5437 def __eq__(self, other):
5438 """ Test whether two recordsets are equivalent (up to reordering). """
5439 if not isinstance(other, BaseModel):
5441 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5443 return self._name == other._name and set(self._ids) == set(other._ids)
5445 def __ne__(self, other):
5446 return not self == other
5448 def __lt__(self, other):
5449 if not isinstance(other, BaseModel) or self._name != other._name:
5450 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5451 return set(self._ids) < set(other._ids)
5453 def __le__(self, other):
5454 if not isinstance(other, BaseModel) or self._name != other._name:
5455 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5456 return set(self._ids) <= set(other._ids)
5458 def __gt__(self, other):
5459 if not isinstance(other, BaseModel) or self._name != other._name:
5460 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5461 return set(self._ids) > set(other._ids)
5463 def __ge__(self, other):
5464 if not isinstance(other, BaseModel) or self._name != other._name:
5465 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5466 return set(self._ids) >= set(other._ids)
5472 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5474 def __unicode__(self):
5475 return unicode(str(self))
5480 if hasattr(self, '_ids'):
5481 return hash((self._name, frozenset(self._ids)))
5483 return hash(self._name)
5485 def __getitem__(self, key):
5486 """ If `key` is an integer or a slice, return the corresponding record
5487 selection as an instance (attached to `self.env`).
5488 Otherwise read the field `key` of the first record in `self`.
5492 inst = model.search(dom) # inst is a recordset
5493 r4 = inst[3] # fourth record in inst
5494 rs = inst[10:20] # subset of inst
5495 nm = rs['name'] # name of first record in inst
5497 if isinstance(key, basestring):
5498 # important: one must call the field's getter
5499 return self._fields[key].__get__(self, type(self))
5500 elif isinstance(key, slice):
5501 return self._browse(self.env, self._ids[key])
5503 return self._browse(self.env, (self._ids[key],))
5505 def __setitem__(self, key, value):
5506 """ Assign the field `key` to `value` in record `self`. """
5507 # important: one must call the field's setter
5508 return self._fields[key].__set__(self, value)
5511 # Cache and recomputation management
5516 """ Return the cache of `self`, mapping field names to values. """
5517 return RecordCache(self)
5520 def _in_cache_without(self, field):
5521 """ Make sure `self` is present in cache (for prefetching), and return
5522 the records of model `self` in cache that have no value for `field`
5523 (:class:`Field` instance).
5526 prefetch_ids = env.prefetch[self._name]
5527 prefetch_ids.update(self._ids)
5528 ids = filter(None, prefetch_ids - set(env.cache[field]))
5529 return self.browse(ids)
5533 """ Clear the records cache.
5536 The record cache is automatically invalidated.
5538 self.invalidate_cache()
5541 def invalidate_cache(self, fnames=None, ids=None):
5542 """ Invalidate the record caches after some records have been modified.
5543 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5545 :param fnames: the list of modified fields, or ``None`` for all fields
5546 :param ids: the list of modified record ids, or ``None`` for all
5550 return self.env.invalidate_all()
5551 fields = self._fields.values()
5553 fields = map(self._fields.__getitem__, fnames)
5555 # invalidate fields and inverse fields, too
5556 spec = [(f, ids) for f in fields] + \
5557 [(invf, None) for f in fields for invf in f.inverse_fields]
5558 self.env.invalidate(spec)
5561 def modified(self, fnames):
5562 """ Notify that fields have been modified on `self`. This invalidates
5563 the cache, and prepares the recomputation of stored function fields
5564 (new-style fields only).
5566 :param fnames: iterable of field names that have been modified on
5569 # each field knows what to invalidate and recompute
5571 for fname in fnames:
5572 spec += self._fields[fname].modified(self)
5576 for env in self.env.all
5577 for field in env.cache
5579 # invalidate non-stored fields.function which are currently cached
5580 spec += [(f, None) for f in self.pool.pure_function_fields
5581 if f in cached_fields]
5583 self.env.invalidate(spec)
5585 def _recompute_check(self, field):
5586 """ If `field` must be recomputed on some record in `self`, return the
5587 corresponding records that must be recomputed.
5589 return self.env.check_todo(field, self)
5591 def _recompute_todo(self, field):
5592 """ Mark `field` to be recomputed. """
5593 self.env.add_todo(field, self)
5595 def _recompute_done(self, field):
5596 """ Mark `field` as recomputed. """
5597 self.env.remove_todo(field, self)
5600 def recompute(self):
5601 """ Recompute stored function fields. The fields and records to
5602 recompute have been determined by method :meth:`modified`.
5604 while self.env.has_todo():
5605 field, recs = self.env.get_todo()
5606 # evaluate the fields to recompute, and save them to database
5607 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5609 values = rec._convert_to_write({
5610 f.name: rec[f.name] for f in field.computed_fields
5613 except MissingError:
5615 # mark the computed fields as done
5616 map(recs._recompute_done, field.computed_fields)
5619 # Generic onchange method
5622 def _has_onchange(self, field, other_fields):
5623 """ Return whether `field` should trigger an onchange event in the
5624 presence of `other_fields`.
5626 # test whether self has an onchange method for field, or field is a
5627 # dependency of any field in other_fields
5628 return field.name in self._onchange_methods or \
5629 any(dep in other_fields for dep in field.dependents)
5632 def _onchange_spec(self, view_info=None):
5633 """ Return the onchange spec from a view description; if not given, the
5634 result of ``self.fields_view_get()`` is used.
5638 # for traversing the XML arch and populating result
5639 def process(node, info, prefix):
5640 if node.tag == 'field':
5641 name = node.attrib['name']
5642 names = "%s.%s" % (prefix, name) if prefix else name
5643 if not result.get(names):
5644 result[names] = node.attrib.get('on_change')
5645 # traverse the subviews included in relational fields
5646 for subinfo in info['fields'][name].get('views', {}).itervalues():
5647 process(etree.fromstring(subinfo['arch']), subinfo, names)
5650 process(child, info, prefix)
5652 if view_info is None:
5653 view_info = self.fields_view_get()
5654 process(etree.fromstring(view_info['arch']), view_info, '')
5657 def _onchange_eval(self, field_name, onchange, result):
5658 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5659 on record `self`. Value assignments are applied on `self`, while
5660 domain and warning messages are put in dictionary `result`.
5662 onchange = onchange.strip()
5665 if onchange in ("1", "true"):
5666 for method in self._onchange_methods.get(field_name, ()):
5667 method_res = method(self)
5670 if 'domain' in method_res:
5671 result.setdefault('domain', {}).update(method_res['domain'])
5672 if 'warning' in method_res:
5673 result['warning'] = method_res['warning']
5677 match = onchange_v7.match(onchange)
5679 method, params = match.groups()
5681 # evaluate params -> tuple
5682 global_vars = {'context': self._context, 'uid': self._uid}
5683 if self._context.get('field_parent'):
5684 class RawRecord(object):
5685 def __init__(self, record):
5686 self._record = record
5687 def __getattr__(self, name):
5688 field = self._record._fields[name]
5689 value = self._record[name]
5690 return field.convert_to_onchange(value)
5691 record = self[self._context['field_parent']]
5692 global_vars['parent'] = RawRecord(record)
5694 key: self._fields[key].convert_to_onchange(val)
5695 for key, val in self._cache.iteritems()
5697 params = eval("[%s]" % params, global_vars, field_vars)
5699 # call onchange method
5700 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5701 method_res = getattr(self._model, method)(*args)
5702 if not isinstance(method_res, dict):
5704 if 'value' in method_res:
5705 method_res['value'].pop('id', None)
5706 self.update(self._convert_to_cache(method_res['value'], validate=False))
5707 if 'domain' in method_res:
5708 result.setdefault('domain', {}).update(method_res['domain'])
5709 if 'warning' in method_res:
5710 result['warning'] = method_res['warning']
5713 def onchange(self, values, field_name, field_onchange):
5714 """ Perform an onchange on the given field.
5716 :param values: dictionary mapping field names to values, giving the
5717 current state of modification
5718 :param field_name: name of the modified field_name
5719 :param field_onchange: dictionary mapping field names to their
5724 if field_name and field_name not in self._fields:
5727 # determine subfields for field.convert_to_write() below
5729 subfields = defaultdict(set)
5730 for dotname in field_onchange:
5732 secondary.append(dotname)
5733 name, subname = dotname.split('.')
5734 subfields[name].add(subname)
5736 # create a new record with values, and attach `self` to it
5737 with env.do_in_onchange():
5738 record = self.new(values)
5739 values = dict(record._cache)
5740 # attach `self` with a different context (for cache consistency)
5741 record._origin = self.with_context(__onchange=True)
5743 # determine which field should be triggered an onchange
5744 todo = set([field_name]) if field_name else set(values)
5747 # dummy assignment: trigger invalidations on the record
5749 value = record[name]
5750 field = self._fields[name]
5751 if not field_name and field.type == 'many2one' and field.delegate and not value:
5752 # do not nullify all fields of parent record for new records
5754 record[name] = value
5756 result = {'value': {}}
5764 with env.do_in_onchange():
5765 # apply field-specific onchange methods
5766 if field_onchange.get(name):
5767 record._onchange_eval(name, field_onchange[name], result)
5769 # force re-evaluation of function fields on secondary records
5770 for field_seq in secondary:
5771 record.mapped(field_seq)
5773 # determine which fields have been modified
5774 for name, oldval in values.iteritems():
5775 field = self._fields[name]
5776 newval = record[name]
5777 if field.type in ('one2many', 'many2many'):
5778 if newval != oldval or newval._is_dirty():
5779 # put new value in result
5780 result['value'][name] = field.convert_to_write(
5781 newval, record._origin, subfields.get(name),
5785 # keep result: newval may have been dirty before
5788 if newval != oldval:
5789 # put new value in result
5790 result['value'][name] = field.convert_to_write(
5791 newval, record._origin, subfields.get(name),
5795 # clean up result to not return another value
5796 result['value'].pop(name, None)
5798 # At the moment, the client does not support updates on a *2many field
5799 # while this one is modified by the user.
5800 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5801 result['value'].pop(field_name, None)
5806 class RecordCache(MutableMapping):
5807 """ Implements a proxy dictionary to read/update the cache of a record.
5808 Upon iteration, it looks like a dictionary mapping field names to
5809 values. However, fields may be used as keys as well.
5811 def __init__(self, records):
5812 self._recs = records
5814 def contains(self, field):
5815 """ Return whether `records[0]` has a value for `field` in cache. """
5816 if isinstance(field, basestring):
5817 field = self._recs._fields[field]
5818 return self._recs.id in self._recs.env.cache[field]
5820 def __contains__(self, field):
5821 """ Return whether `records[0]` has a regular value for `field` in cache. """
5822 if isinstance(field, basestring):
5823 field = self._recs._fields[field]
5824 dummy = SpecialValue(None)
5825 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5826 return not isinstance(value, SpecialValue)
5828 def __getitem__(self, field):
5829 """ Return the cached value of `field` for `records[0]`. """
5830 if isinstance(field, basestring):
5831 field = self._recs._fields[field]
5832 value = self._recs.env.cache[field][self._recs.id]
5833 return value.get() if isinstance(value, SpecialValue) else value
5835 def __setitem__(self, field, value):
5836 """ Assign the cached value of `field` for all records in `records`. """
5837 if isinstance(field, basestring):
5838 field = self._recs._fields[field]
5839 values = dict.fromkeys(self._recs._ids, value)
5840 self._recs.env.cache[field].update(values)
5842 def update(self, *args, **kwargs):
5843 """ Update the cache of all records in `records`. If the argument is a
5844 `SpecialValue`, update all fields (except "magic" columns).
5846 if args and isinstance(args[0], SpecialValue):
5847 values = dict.fromkeys(self._recs._ids, args[0])
5848 for name, field in self._recs._fields.iteritems():
5850 self._recs.env.cache[field].update(values)
5852 return super(RecordCache, self).update(*args, **kwargs)
5854 def __delitem__(self, field):
5855 """ Remove the cached value of `field` for all `records`. """
5856 if isinstance(field, basestring):
5857 field = self._recs._fields[field]
5858 field_cache = self._recs.env.cache[field]
5859 for id in self._recs._ids:
5860 field_cache.pop(id, None)
5863 """ Iterate over the field names with a regular value in cache. """
5864 cache, id = self._recs.env.cache, self._recs.id
5865 dummy = SpecialValue(None)
5866 for name, field in self._recs._fields.iteritems():
5867 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5871 """ Return the number of fields with a regular value in cache. """
5872 return sum(1 for name in self)
5874 class Model(BaseModel):
5875 """Main super-class for regular database-persisted OpenERP models.
5877 OpenERP models are created by inheriting from this class::
5882 The system will later instantiate the class once per database (on
5883 which the class' module is installed).
5886 _register = False # not visible in ORM registry, meant to be python-inherited only
5887 _transient = False # True in a TransientModel
5889 class TransientModel(BaseModel):
5890 """Model super-class for transient records, meant to be temporarily
5891 persisted, and regularly vaccuum-cleaned.
5893 A TransientModel has a simplified access rights management,
5894 all users can create new records, and may only access the
5895 records they created. The super-user has unrestricted access
5896 to all TransientModel records.
5899 _register = False # not visible in ORM registry, meant to be python-inherited only
5902 class AbstractModel(BaseModel):
5903 """Abstract Model super-class for creating an abstract class meant to be
5904 inherited by regular models (Models or TransientModels) but not meant to
5905 be usable on its own, or persisted.
5907 Technical note: we don't want to make AbstractModel the super-class of
5908 Model or BaseModel because it would not make sense to put the main
5909 definition of persistence methods such as create() in it, and still we
5910 should be able to override them within an AbstractModel.
5912 _auto = False # don't create any database backend for AbstractModels
5913 _register = False # not visible in ORM registry, meant to be python-inherited only
5916 def itemgetter_tuple(items):
5917 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5918 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5923 return lambda gettable: (gettable[items[0]],)
5924 return operator.itemgetter(*items)
5926 def convert_pgerror_23502(model, fields, info, e):
5927 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5928 r'not-null constraint\n',
5930 field_name = m and m.group('field')
5931 if not m or field_name not in fields:
5932 return {'message': unicode(e)}
5933 message = _(u"Missing required value for the field '%s'.") % field_name
5934 field = fields.get(field_name)
5936 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5939 'field': field_name,
5942 def convert_pgerror_23505(model, fields, info, e):
5943 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5945 field_name = m and m.group('field')
5946 if not m or field_name not in fields:
5947 return {'message': unicode(e)}
5948 message = _(u"The value for the field '%s' already exists.") % field_name
5949 field = fields.get(field_name)
5951 message = _(u"%s This might be '%s' in the current model, or a field "
5952 u"of the same name in an o2m.") % (message, field['string'])
5955 'field': field_name,
5958 PGERROR_TO_OE = defaultdict(
5959 # shape of mapped converters
5960 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5961 # not_null_violation
5962 '23502': convert_pgerror_23502,
5963 # unique constraint error
5964 '23505': convert_pgerror_23505,
5967 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5968 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5970 Various implementations were tested on the corpus of all browse() calls
5971 performed during a full crawler run (after having installed all website_*
5972 modules) and this one was the most efficient overall.
5974 A possible bit of correctness was sacrificed by not doing any test on
5975 Iterable and just assuming that any non-atomic type was an iterable of
5980 # much of the corpus is falsy objects (empty list, tuple or set, None)
5984 # `type in set` is significantly faster (because more restrictive) than
5985 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5986 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5987 # (and looks much worse) in most cases, but over millions of calls it
5988 # does have a very minor effect.
5989 if arg.__class__ in atoms:
5994 # keep those imports here to avoid dependency cycle errors
5995 from .osv import expression
5996 from .fields import Field, SpecialValue, FailedValue
5998 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: