1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
51 from collections import defaultdict, MutableMapping
52 from inspect import getmembers
55 import dateutil.relativedelta
57 from lxml import etree
60 from . import SUPERUSER_ID
63 from .api import Environment
64 from .exceptions import except_orm, AccessError, MissingError, ValidationError
65 from .osv import fields
66 from .osv.query import Query
67 from .tools import lazy_property, ormcache
68 from .tools.config import config
69 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
70 from .tools.safe_eval import safe_eval as eval
71 from .tools.translate import _
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
77 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
78 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
80 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def check_object_name(name):
84 """ Check if the given name is a valid openerp object name.
86 The _name attribute in osv and osv_memory object is subject to
87 some restrictions. This function returns True or False whether
88 the given name is allowed or not.
90 TODO: this is an approximation. The goal in this approximation
91 is to disallow uppercase characters (in some places, we quote
92 table/column names and in other not, which leads to this kind
95 psycopg2.ProgrammingError: relation "xxx" does not exist).
97 The same restriction should apply to both osv and osv_memory
98 objects for consistency.
101 if regex_object_name.match(name) is None:
105 def raise_on_invalid_object_name(name):
106 if not check_object_name(name):
107 msg = "The _name attribute %s is not valid." % name
109 raise except_orm('ValueError', msg)
111 POSTGRES_CONFDELTYPES = {
119 def intersect(la, lb):
120 return filter(lambda x: x in lb, la)
123 """ Test whether functions `f` and `g` are identical or have the same name """
124 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
126 def fix_import_export_id_paths(fieldname):
128 Fixes the id fields in import and exports, and splits field paths
131 :param str fieldname: name of the field to import/export
132 :return: split field name
135 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
136 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
137 return fixed_external_id.split('/')
139 def pg_varchar(size=0):
140 """ Returns the VARCHAR declaration for the provided size:
142 * If no size (or an empty or negative size is provided) return an
144 * Otherwise return a VARCHAR(n)
146 :type int size: varchar size, optional
150 if not isinstance(size, int):
151 raise TypeError("VARCHAR parameter should be an int, got %s"
154 return 'VARCHAR(%d)' % size
157 FIELDS_TO_PGTYPES = {
158 fields.boolean: 'bool',
159 fields.integer: 'int4',
163 fields.datetime: 'timestamp',
164 fields.binary: 'bytea',
165 fields.many2one: 'int4',
166 fields.serialized: 'text',
169 def get_pg_type(f, type_override=None):
171 :param fields._column f: field to get a Postgres type for
172 :param type type_override: use the provided type for dispatching instead of the field's own type
173 :returns: (postgres_identification_type, postgres_type_specification)
176 field_type = type_override or type(f)
178 if field_type in FIELDS_TO_PGTYPES:
179 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
180 elif issubclass(field_type, fields.float):
181 # Explicit support for "falsy" digits (0, False) to indicate a
182 # NUMERIC field with no fixed precision. The values will be saved
183 # in the database with all significant digits.
184 # FLOAT8 type is still the default when there is no precision because
185 # it is faster for most operations (sums, etc.)
186 if f.digits is not None:
187 pg_type = ('numeric', 'NUMERIC')
189 pg_type = ('float8', 'DOUBLE PRECISION')
190 elif issubclass(field_type, (fields.char, fields.reference)):
191 pg_type = ('varchar', pg_varchar(f.size))
192 elif issubclass(field_type, fields.selection):
193 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
194 or getattr(f, 'size', None) == -1:
195 pg_type = ('int4', 'INTEGER')
197 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
198 elif issubclass(field_type, fields.function):
199 if f._type == 'selection':
200 pg_type = ('varchar', pg_varchar())
202 pg_type = get_pg_type(f, getattr(fields, f._type))
204 _logger.warning('%s type not supported!', field_type)
210 class MetaModel(api.Meta):
211 """ Metaclass for the models.
213 This class is used as the metaclass for the class :class:`BaseModel` to
214 discover the models defined in a module (without instanciating them).
215 If the automatic discovery is not needed, it is possible to set the model's
216 ``_register`` attribute to False.
220 module_to_models = {}
222 def __init__(self, name, bases, attrs):
223 if not self._register:
224 self._register = True
225 super(MetaModel, self).__init__(name, bases, attrs)
228 if not hasattr(self, '_module'):
229 # The (OpenERP) module name can be in the `openerp.addons` namespace
230 # or not. For instance, module `sale` can be imported as
231 # `openerp.addons.sale` (the right way) or `sale` (for backward
233 module_parts = self.__module__.split('.')
234 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
235 module_name = self.__module__.split('.')[2]
237 module_name = self.__module__.split('.')[0]
238 self._module = module_name
240 # Remember which models to instanciate for this module.
242 self.module_to_models.setdefault(self._module, []).append(self)
244 # check for new-api conversion error: leave comma after field definition
245 for key, val in attrs.iteritems():
246 if type(val) is tuple and len(val) == 1 and isinstance(val[0], Field):
247 _logger.error("Trailing comma after field definition: %s.%s", self, key)
249 # transform columns into new-style fields (enables field inheritance)
250 for name, column in self._columns.iteritems():
251 if name in self.__dict__:
252 _logger.warning("In class %s, field %r overriding an existing value", self, name)
253 setattr(self, name, column.to_field())
257 """ Pseudo-ids for new records. """
258 def __nonzero__(self):
261 IdType = (int, long, basestring, NewId)
264 # maximum number of prefetched records
267 # special columns automatically created by the ORM
268 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
269 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
271 class BaseModel(object):
272 """ Base class for OpenERP models.
274 OpenERP models are created by inheriting from this class' subclasses:
276 * :class:`Model` for regular database-persisted models
278 * :class:`TransientModel` for temporary data, stored in the database but
279 automatically vaccuumed every so often
281 * :class:`AbstractModel` for abstract super classes meant to be shared by
282 multiple inheriting model
284 The system automatically instantiates every model once per database. Those
285 instances represent the available models on each database, and depend on
286 which modules are installed on that database. The actual class of each
287 instance is built from the Python classes that create and inherit from the
290 Every model instance is a "recordset", i.e., an ordered collection of
291 records of the model. Recordsets are returned by methods like
292 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
293 explicit representation: a record is represented as a recordset of one
296 To create a class that should not be instantiated, the _register class
297 attribute may be set to False.
299 __metaclass__ = MetaModel
300 _auto = True # create database backend
301 _register = False # Set to false if the model shouldn't be automatically discovered.
308 _parent_name = 'parent_id'
309 _parent_store = False
310 _parent_order = False
316 _translate = True # set to False to disable translations export for this model
318 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
319 # to include in the _read_group, if grouped on this field
323 _transient = False # True in a TransientModel
326 # { 'parent_model': 'm2o_field', ... }
329 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
330 # model from which it is inherits'd, r is the (local) field towards m, f
331 # is the _column object itself, and n is the original (i.e. top-most)
334 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
335 # field_column_obj, origina_parent_model), ... }
338 # Mapping field name/column_info object
339 # This is similar to _inherit_fields but:
340 # 1. includes self fields,
341 # 2. uses column_info instead of a triple.
342 # Warning: _all_columns is deprecated, use _fields instead
347 _sql_constraints = []
349 # model dependencies, for models backed up by sql views:
350 # {model_name: field_names, ...}
353 CONCURRENCY_CHECK_FIELD = '__last_update'
355 def log(self, cr, uid, id, message, secondary=False, context=None):
356 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
358 def view_init(self, cr, uid, fields_list, context=None):
359 """Override this method to do specific things when a view on the object is opened."""
362 def _field_create(self, cr, context=None):
363 """ Create entries in ir_model_fields for all the model's fields.
365 If necessary, also create an entry in ir_model, and if called from the
366 modules loading scheme (by receiving 'module' in the context), also
367 create entries in ir_model_data (for the model and the fields).
369 - create an entry in ir_model (if there is not already one),
370 - create an entry in ir_model_data (if there is not already one, and if
371 'module' is in the context),
372 - update ir_model_fields with the fields found in _columns
373 (TODO there is some redundancy as _columns is updated from
374 ir_model_fields in __init__).
379 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
381 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
382 model_id = cr.fetchone()[0]
383 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
385 model_id = cr.fetchone()[0]
386 if 'module' in context:
387 name_id = 'model_'+self._name.replace('.', '_')
388 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
390 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
391 (name_id, context['module'], 'ir.model', model_id)
394 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
396 for rec in cr.dictfetchall():
397 cols[rec['name']] = rec
399 ir_model_fields_obj = self.pool.get('ir.model.fields')
401 # sparse field should be created at the end, as it depends on its serialized field already existing
402 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
403 for (k, f) in model_fields:
405 'model_id': model_id,
408 'field_description': f.string,
410 'relation': f._obj or '',
411 'select_level': tools.ustr(int(f.select)),
412 'readonly': (f.readonly and 1) or 0,
413 'required': (f.required and 1) or 0,
414 'selectable': (f.selectable and 1) or 0,
415 'translate': (f.translate and 1) or 0,
416 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
417 'serialization_field_id': None,
419 if getattr(f, 'serialization_field', None):
420 # resolve link to serialization_field if specified by name
421 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
422 if not serialization_field_id:
423 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
424 vals['serialization_field_id'] = serialization_field_id[0]
426 # When its a custom field,it does not contain f.select
427 if context.get('field_state', 'base') == 'manual':
428 if context.get('field_name', '') == k:
429 vals['select_level'] = context.get('select', '0')
430 #setting value to let the problem NOT occur next time
432 vals['select_level'] = cols[k]['select_level']
435 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
436 id = cr.fetchone()[0]
438 cr.execute("""INSERT INTO ir_model_fields (
439 id, model_id, model, name, field_description, ttype,
440 relation,state,select_level,relation_field, translate, serialization_field_id
442 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
444 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
445 vals['relation'], 'base',
446 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
448 if 'module' in context:
449 name1 = 'field_' + self._table + '_' + k
450 cr.execute("select name from ir_model_data where name=%s", (name1,))
452 name1 = name1 + "_" + str(id)
453 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
454 (name1, context['module'], 'ir.model.fields', id)
457 for key, val in vals.items():
458 if cols[k][key] != vals[key]:
459 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
460 cr.execute("""UPDATE ir_model_fields SET
461 model_id=%s, field_description=%s, ttype=%s, relation=%s,
462 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
464 model=%s AND name=%s""", (
465 vals['model_id'], vals['field_description'], vals['ttype'],
467 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
470 self.invalidate_cache(cr, SUPERUSER_ID)
473 def _add_field(cls, name, field):
474 """ Add the given `field` under the given `name` in the class """
475 # add field as an attribute and in cls._fields (for reflection)
476 if not isinstance(getattr(cls, name, field), Field):
477 _logger.warning("In model %r, field %r overriding existing value", cls._name, name)
478 setattr(cls, name, field)
479 cls._fields[name] = field
481 # basic setup of field
482 field.set_class_name(cls, name)
484 if field.store or field.column:
485 cls._columns[name] = field.to_column()
487 # remove potential column that may be overridden by field
488 cls._columns.pop(name, None)
491 def _pop_field(cls, name):
492 """ Remove the field with the given `name` from the model.
493 This method should only be used for manual fields.
495 field = cls._fields.pop(name)
496 cls._columns.pop(name, None)
497 cls._all_columns.pop(name, None)
498 if hasattr(cls, name):
503 def _add_magic_fields(cls):
504 """ Introduce magic fields on the current class
506 * id is a "normal" field (with a specific getter)
507 * create_uid, create_date, write_uid and write_date have become
509 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
510 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
511 to get the same structure as the previous
512 ``(now() at time zone 'UTC')::timestamp``::
514 # select (now() at time zone 'UTC')::timestamp;
516 ----------------------------
517 2013-06-18 08:30:37.292809
519 >>> str(datetime.datetime.utcnow())
520 '2013-06-18 08:31:32.821177'
522 def add(name, field):
523 """ add `field` with the given `name` if it does not exist yet """
524 if name not in cls._fields:
525 cls._add_field(name, field)
530 # this field 'id' must override any other column or field
531 cls._add_field('id', fields.Id(automatic=True))
533 add('display_name', fields.Char(string='Display Name', automatic=True,
534 compute='_compute_display_name'))
537 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
538 add('create_date', fields.Datetime(string='Created on', automatic=True))
539 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
540 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
541 last_modified_name = 'compute_concurrency_field_with_access'
543 last_modified_name = 'compute_concurrency_field'
545 # this field must override any other column or field
546 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
547 string='Last Modified on', compute=last_modified_name, automatic=True))
550 def compute_concurrency_field(self):
551 self[self.CONCURRENCY_CHECK_FIELD] = \
552 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
555 @api.depends('create_date', 'write_date')
556 def compute_concurrency_field_with_access(self):
557 self[self.CONCURRENCY_CHECK_FIELD] = \
558 self.write_date or self.create_date or \
559 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
562 # Goal: try to apply inheritance at the instanciation level and
563 # put objects in the pool var
566 def _build_model(cls, pool, cr):
567 """ Instanciate a given model.
569 This class method instanciates the class of some model (i.e. a class
570 deriving from osv or osv_memory). The class might be the class passed
571 in argument or, if it inherits from another class, a class constructed
572 by combining the two classes.
576 # IMPORTANT: the registry contains an instance for each model. The class
577 # of each model carries inferred metadata that is shared among the
578 # model's instances for this registry, but not among registries. Hence
579 # we cannot use that "registry class" for combining model classes by
580 # inheritance, since it confuses the metadata inference process.
582 # Keep links to non-inherited constraints in cls; this is useful for
583 # instance when exporting translations
584 cls._local_constraints = cls.__dict__.get('_constraints', [])
585 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
587 # determine inherited models
588 parents = getattr(cls, '_inherit', [])
589 parents = [parents] if isinstance(parents, basestring) else (parents or [])
591 # determine the model's name
592 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
594 # determine the module that introduced the model
595 original_module = pool[name]._original_module if name in parents else cls._module
597 # build the class hierarchy for the model
598 for parent in parents:
599 if parent not in pool:
600 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
601 'You may need to add a dependency on the parent class\' module.' % (name, parent))
602 parent_model = pool[parent]
604 # do no use the class of parent_model, since that class contains
605 # inferred metadata; use its ancestor instead
606 parent_class = type(parent_model).__base__
608 # don't inherit custom fields
609 columns = dict((key, val)
610 for key, val in parent_class._columns.iteritems()
613 columns.update(cls._columns)
615 inherits = dict(parent_class._inherits)
616 inherits.update(cls._inherits)
618 depends = dict(parent_class._depends)
619 for m, fs in cls._depends.iteritems():
620 depends[m] = depends.get(m, []) + fs
622 old_constraints = parent_class._constraints
623 new_constraints = cls._constraints
624 # filter out from old_constraints the ones overridden by a
625 # constraint with the same function name in new_constraints
626 constraints = new_constraints + [oldc
627 for oldc in old_constraints
628 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
629 for newc in new_constraints)
632 sql_constraints = cls._sql_constraints + \
633 parent_class._sql_constraints
639 '_inherits': inherits,
641 '_constraints': constraints,
642 '_sql_constraints': sql_constraints,
644 cls = type(name, (cls, parent_class), attrs)
646 # introduce the "registry class" of the model;
647 # duplicate some attributes so that the ORM can modify them
651 '_columns': dict(cls._columns),
652 '_defaults': {}, # filled by Field._determine_default()
653 '_inherits': dict(cls._inherits),
654 '_depends': dict(cls._depends),
655 '_constraints': list(cls._constraints),
656 '_sql_constraints': list(cls._sql_constraints),
657 '_original_module': original_module,
659 cls = type(cls._name, (cls,), attrs)
661 # instantiate the model, and initialize it
662 model = object.__new__(cls)
663 model.__init__(pool, cr)
667 def _init_function_fields(cls, pool, cr):
668 # initialize the list of non-stored function fields for this model
669 pool._pure_function_fields[cls._name] = []
671 # process store of low-level function fields
672 for fname, column in cls._columns.iteritems():
673 if hasattr(column, 'digits_change'):
674 column.digits_change(cr)
675 # filter out existing store about this field
676 pool._store_function[cls._name] = [
678 for stored in pool._store_function.get(cls._name, [])
679 if (stored[0], stored[1]) != (cls._name, fname)
681 if not isinstance(column, fields.function):
684 # register it on the pool for invalidation
685 pool._pure_function_fields[cls._name].append(fname)
687 # process store parameter
690 get_ids = lambda self, cr, uid, ids, c={}: ids
691 store = {cls._name: (get_ids, None, column.priority, None)}
692 for model, spec in store.iteritems():
694 (fnct, fields2, order, length) = spec
696 (fnct, fields2, order) = spec
699 raise except_orm('Error',
700 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
701 pool._store_function.setdefault(model, [])
702 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
703 if t not in pool._store_function[model]:
704 pool._store_function[model].append(t)
705 pool._store_function[model].sort(key=lambda x: x[4])
708 def _init_manual_fields(cls, cr):
709 # Check whether the query is already done
710 if cls.pool.fields_by_model is not None:
711 manual_fields = cls.pool.fields_by_model.get(cls._name, [])
713 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
714 manual_fields = cr.dictfetchall()
716 for field in manual_fields:
717 if field['name'] in cls._fields:
721 'string': field['field_description'],
722 'required': bool(field['required']),
723 'readonly': bool(field['readonly']),
725 # FIXME: ignore field['serialization_field_id']
726 if field['ttype'] in ('char', 'text', 'html'):
727 attrs['translate'] = bool(field['translate'])
728 attrs['size'] = field['size'] or None
729 elif field['ttype'] in ('selection', 'reference'):
730 attrs['selection'] = eval(field['selection'])
731 elif field['ttype'] == 'many2one':
732 attrs['comodel_name'] = field['relation']
733 attrs['ondelete'] = field['on_delete']
734 attrs['domain'] = eval(field['domain']) if field['domain'] else None
735 elif field['ttype'] == 'one2many':
736 attrs['comodel_name'] = field['relation']
737 attrs['inverse_name'] = field['relation_field']
738 attrs['domain'] = eval(field['domain']) if field['domain'] else None
739 elif field['ttype'] == 'many2many':
740 attrs['comodel_name'] = field['relation']
741 _rel1 = field['relation'].replace('.', '_')
742 _rel2 = field['model'].replace('.', '_')
743 attrs['relation'] = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
744 attrs['column1'] = 'id1'
745 attrs['column2'] = 'id2'
746 attrs['domain'] = eval(field['domain']) if field['domain'] else None
747 cls._add_field(field['name'], Field.by_type[field['ttype']](**attrs))
750 def _init_constraints_onchanges(cls):
751 # store sql constraint error messages
752 for (key, _, msg) in cls._sql_constraints:
753 cls.pool._sql_error[cls._table + '_' + key] = msg
755 # collect constraint and onchange methods
756 cls._constraint_methods = []
757 cls._onchange_methods = defaultdict(list)
758 for attr, func in getmembers(cls, callable):
759 if hasattr(func, '_constrains'):
760 cls._constraint_methods.append(func)
761 if hasattr(func, '_onchange'):
762 for name in func._onchange:
763 cls._onchange_methods[name].append(func)
766 # In the past, this method was registering the model class in the server.
767 # This job is now done entirely by the metaclass MetaModel.
769 # Do not create an instance here. Model instances are created by method
773 def __init__(self, pool, cr):
774 """ Initialize a model and make it part of the given registry.
776 - copy the stored fields' functions in the registry,
777 - retrieve custom fields and add them in the model,
778 - ensure there is a many2one for each _inherits'd parent,
779 - update the children's _columns,
780 - give a chance to each field to initialize itself.
785 # link the class to the registry, and update the registry
787 cls._model = self # backward compatibility
788 pool.add(cls._name, self)
790 # determine description, table, sequence and log_access
791 if not cls._description:
792 cls._description = cls._name
794 cls._table = cls._name.replace('.', '_')
795 if not cls._sequence:
796 cls._sequence = cls._table + '_id_seq'
797 if not hasattr(cls, '_log_access'):
798 # If _log_access is not specified, it is the same value as _auto.
799 cls._log_access = cls._auto
802 if cls.is_transient():
803 cls._transient_check_count = 0
804 cls._transient_max_count = config.get('osv_memory_count_limit')
805 cls._transient_max_hours = config.get('osv_memory_age_limit')
806 assert cls._log_access, \
807 "TransientModels must have log_access turned on, " \
808 "in order to implement their access rights policy"
810 # retrieve new-style fields (from above registry class) and duplicate
811 # them (to avoid clashes with inheritance between different models)
813 above = cls.__bases__[0]
814 for attr, field in getmembers(above, Field.__instancecheck__):
815 cls._add_field(attr, field.new())
817 # introduce magic fields
818 cls._add_magic_fields()
820 # register stuff about low-level function fields and custom fields
821 cls._init_function_fields(pool, cr)
823 # register constraints and onchange methods
824 cls._init_constraints_onchanges()
826 # prepare ormcache, which must be shared by all instances of the model
831 def _is_an_ordinary_table(self):
832 self.env.cr.execute("""\
836 AND relkind = %s""", [self._table, 'r'])
837 return bool(self.env.cr.fetchone())
839 def __export_xml_id(self):
840 """ Return a valid xml_id for the record `self`. """
841 if not self._is_an_ordinary_table():
843 "You can not export the column ID of model %s, because the "
844 "table %s is not an ordinary table."
845 % (self._name, self._table))
846 ir_model_data = self.sudo().env['ir.model.data']
847 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
850 return '%s.%s' % (data[0].module, data[0].name)
855 name = '%s_%s' % (self._table, self.id)
856 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
858 name = '%s_%s_%s' % (self._table, self.id, postfix)
859 ir_model_data.create({
862 'module': '__export__',
865 return '__export__.' + name
868 def __export_rows(self, fields):
869 """ Export fields of the records in `self`.
871 :param fields: list of lists of fields to traverse
872 :return: list of lists of corresponding values
876 # main line of record, initially empty
877 current = [''] * len(fields)
878 lines.append(current)
880 # list of primary fields followed by secondary field(s)
883 # process column by column
884 for i, path in enumerate(fields):
889 if name in primary_done:
893 current[i] = str(record.id)
895 current[i] = record.__export_xml_id()
897 field = record._fields[name]
900 # this part could be simpler, but it has to be done this way
901 # in order to reproduce the former behavior
902 if not isinstance(value, BaseModel):
903 current[i] = field.convert_to_export(value, self.env)
905 primary_done.append(name)
907 # This is a special case, its strange behavior is intended!
908 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
909 xml_ids = [r.__export_xml_id() for r in value]
910 current[i] = ','.join(xml_ids) or False
913 # recursively export the fields that follow name
914 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
915 lines2 = value.__export_rows(fields2)
917 # merge first line with record's main line
918 for j, val in enumerate(lines2[0]):
921 # check value of current field
923 # assign xml_ids, and forget about remaining lines
924 xml_ids = [item[1] for item in value.name_get()]
925 current[i] = ','.join(xml_ids)
927 # append the other lines at the end
935 def export_data(self, fields_to_export, raw_data=False):
936 """ Export fields for selected objects
938 :param fields_to_export: list of fields
939 :param raw_data: True to return value in native Python type
940 :rtype: dictionary with a *datas* matrix
942 This method is used when exporting data via client menu
944 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
946 self = self.with_context(export_raw_data=True)
947 return {'datas': self.__export_rows(fields_to_export)}
949 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
952 Use :meth:`~load` instead
954 Import given data in given module
956 This method is used when importing data via client menu.
958 Example of fields to import for a sale.order::
961 partner_id, (=name_search)
962 order_line/.id, (=database_id)
964 order_line/product_id/id, (=xml id)
965 order_line/price_unit,
966 order_line/product_uom_qty,
967 order_line/product_uom/id (=xml_id)
969 This method returns a 4-tuple with the following structure::
971 (return_code, errored_resource, error_message, unused)
973 * The first item is a return code, it is ``-1`` in case of
974 import error, or the last imported row number in case of success
975 * The second item contains the record data dict that failed to import
976 in case of error, otherwise it's 0
977 * The third item contains an error message string in case of error,
979 * The last item is currently unused, with no specific semantics
981 :param fields: list of fields to import
982 :param datas: data to import
983 :param mode: 'init' or 'update' for record creation
984 :param current_module: module name
985 :param noupdate: flag for record creation
986 :param filename: optional file to store partial import state for recovery
987 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
988 :rtype: (int, dict or 0, str or 0, str or 0)
990 context = dict(context) if context is not None else {}
991 context['_import_current_module'] = current_module
993 fields = map(fix_import_export_id_paths, fields)
994 ir_model_data_obj = self.pool.get('ir.model.data')
997 if m['type'] == 'error':
998 raise Exception(m['message'])
1000 if config.get('import_partial') and filename:
1001 with open(config.get('import_partial'), 'rb') as partial_import_file:
1002 data = pickle.load(partial_import_file)
1003 position = data.get(filename, 0)
1007 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1008 self._extract_records(cr, uid, fields, datas,
1009 context=context, log=log),
1010 context=context, log=log):
1011 ir_model_data_obj._update(cr, uid, self._name,
1012 current_module, res, mode=mode, xml_id=xml_id,
1013 noupdate=noupdate, res_id=res_id, context=context)
1014 position = info.get('rows', {}).get('to', 0) + 1
1015 if config.get('import_partial') and filename and (not (position%100)):
1016 with open(config.get('import_partial'), 'rb') as partial_import:
1017 data = pickle.load(partial_import)
1018 data[filename] = position
1019 with open(config.get('import_partial'), 'wb') as partial_import:
1020 pickle.dump(data, partial_import)
1021 if context.get('defer_parent_store_computation'):
1022 self._parent_store_compute(cr)
1024 except Exception, e:
1026 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1028 if context.get('defer_parent_store_computation'):
1029 self._parent_store_compute(cr)
1030 return position, 0, 0, 0
1032 def load(self, cr, uid, fields, data, context=None):
1034 Attempts to load the data matrix, and returns a list of ids (or
1035 ``False`` if there was an error and no id could be generated) and a
1038 The ids are those of the records created and saved (in database), in
1039 the same order they were extracted from the file. They can be passed
1040 directly to :meth:`~read`
1042 :param fields: list of fields to import, at the same index as the corresponding data
1043 :type fields: list(str)
1044 :param data: row-major matrix of data to import
1045 :type data: list(list(str))
1046 :param dict context:
1047 :returns: {ids: list(int)|False, messages: [Message]}
1049 cr.execute('SAVEPOINT model_load')
1052 fields = map(fix_import_export_id_paths, fields)
1053 ModelData = self.pool['ir.model.data'].clear_caches()
1055 fg = self.fields_get(cr, uid, context=context)
1062 for id, xid, record, info in self._convert_records(cr, uid,
1063 self._extract_records(cr, uid, fields, data,
1064 context=context, log=messages.append),
1065 context=context, log=messages.append):
1067 cr.execute('SAVEPOINT model_load_save')
1068 except psycopg2.InternalError, e:
1069 # broken transaction, exit and hope the source error was
1071 if not any(message['type'] == 'error' for message in messages):
1072 messages.append(dict(info, type='error',message=
1073 u"Unknown database error: '%s'" % e))
1076 ids.append(ModelData._update(cr, uid, self._name,
1077 current_module, record, mode=mode, xml_id=xid,
1078 noupdate=noupdate, res_id=id, context=context))
1079 cr.execute('RELEASE SAVEPOINT model_load_save')
1080 except psycopg2.Warning, e:
1081 messages.append(dict(info, type='warning', message=str(e)))
1082 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1083 except psycopg2.Error, e:
1084 messages.append(dict(
1086 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1087 # Failed to write, log to messages, rollback savepoint (to
1088 # avoid broken transaction) and keep going
1089 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1090 except Exception, e:
1091 message = (_('Unknown error during import:') +
1092 ' %s: %s' % (type(e), unicode(e)))
1093 moreinfo = _('Resolve other errors first')
1094 messages.append(dict(info, type='error',
1097 # Failed for some reason, perhaps due to invalid data supplied,
1098 # rollback savepoint and keep going
1099 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1100 if any(message['type'] == 'error' for message in messages):
1101 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1103 return {'ids': ids, 'messages': messages}
1105 def _extract_records(self, cr, uid, fields_, data,
1106 context=None, log=lambda a: None):
1107 """ Generates record dicts from the data sequence.
1109 The result is a generator of dicts mapping field names to raw
1110 (unconverted, unvalidated) values.
1112 For relational fields, if sub-fields were provided the value will be
1113 a list of sub-records
1115 The following sub-fields may be set on the record (by key):
1116 * None is the name_get for the record (to use with name_create/name_search)
1117 * "id" is the External ID for the record
1118 * ".id" is the Database ID for the record
1120 from openerp.fields import Char, Integer
1121 fields = dict(self._fields)
1122 # Fake fields to avoid special cases in extractor
1123 fields[None] = Char('rec_name')
1124 fields['id'] = Char('External ID')
1125 fields['.id'] = Integer('Database ID')
1127 # m2o fields can't be on multiple lines so exclude them from the
1128 # is_relational field rows filter, but special-case it later on to
1129 # be handled with relational fields (as it can have subfields)
1130 is_relational = lambda field: fields[field].relational
1131 get_o2m_values = itemgetter_tuple(
1132 [index for index, field in enumerate(fields_)
1133 if fields[field[0]].type == 'one2many'])
1134 get_nono2m_values = itemgetter_tuple(
1135 [index for index, field in enumerate(fields_)
1136 if fields[field[0]].type != 'one2many'])
1137 # Checks if the provided row has any non-empty non-relational field
1138 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1139 return any(g(row)) and not any(f(row))
1143 if index >= len(data): return
1146 # copy non-relational fields to record dict
1147 record = dict((field[0], value)
1148 for field, value in itertools.izip(fields_, row)
1149 if not is_relational(field[0]))
1151 # Get all following rows which have relational values attached to
1152 # the current record (no non-relational values)
1153 record_span = itertools.takewhile(
1154 only_o2m_values, itertools.islice(data, index + 1, None))
1155 # stitch record row back on for relational fields
1156 record_span = list(itertools.chain([row], record_span))
1157 for relfield in set(
1158 field[0] for field in fields_
1159 if is_relational(field[0])):
1160 # FIXME: how to not use _obj without relying on fields_get?
1161 Model = self.pool[fields[relfield].comodel_name]
1163 # get only cells for this sub-field, should be strictly
1164 # non-empty, field path [None] is for name_get field
1165 indices, subfields = zip(*((index, field[1:] or [None])
1166 for index, field in enumerate(fields_)
1167 if field[0] == relfield))
1169 # return all rows which have at least one value for the
1170 # subfields of relfield
1171 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1172 record[relfield] = [subrecord
1173 for subrecord, _subinfo in Model._extract_records(
1174 cr, uid, subfields, relfield_data,
1175 context=context, log=log)]
1177 yield record, {'rows': {
1179 'to': index + len(record_span) - 1
1181 index += len(record_span)
1183 def _convert_records(self, cr, uid, records,
1184 context=None, log=lambda a: None):
1185 """ Converts records from the source iterable (recursive dicts of
1186 strings) into forms which can be written to the database (via
1187 self.create or (ir.model.data)._update)
1189 :returns: a list of triplets of (id, xid, record)
1190 :rtype: list((int|None, str|None, dict))
1192 if context is None: context = {}
1193 Converter = self.pool['ir.fields.converter']
1194 Translation = self.pool['ir.translation']
1195 fields = dict(self._fields)
1197 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1198 context.get('lang'))
1200 for f, field in fields.iteritems())
1202 convert = Converter.for_model(cr, uid, self, context=context)
1204 def _log(base, field, exception):
1205 type = 'warning' if isinstance(exception, Warning) else 'error'
1206 # logs the logical (not human-readable) field name for automated
1207 # processing of response, but injects human readable in message
1208 record = dict(base, type=type, field=field,
1209 message=unicode(exception.args[0]) % base)
1210 if len(exception.args) > 1 and exception.args[1]:
1211 record.update(exception.args[1])
1214 stream = CountingStream(records)
1215 for record, extras in stream:
1218 # name_get/name_create
1219 if None in record: pass
1226 dbid = int(record['.id'])
1228 # in case of overridden id column
1229 dbid = record['.id']
1230 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1233 record=stream.index,
1235 message=_(u"Unknown database identifier '%s'") % dbid))
1238 converted = convert(record, lambda field, err:\
1239 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1241 yield dbid, xid, converted, dict(extras, record=stream.index)
1244 def _validate_fields(self, field_names):
1245 field_names = set(field_names)
1247 # old-style constraint methods
1248 trans = self.env['ir.translation']
1249 cr, uid, context = self.env.args
1252 for fun, msg, names in self._constraints:
1254 # validation must be context-independent; call `fun` without context
1255 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1257 except Exception, e:
1258 _logger.debug('Exception while validating constraint', exc_info=True)
1260 extra_error = tools.ustr(e)
1263 res_msg = msg(self._model, cr, uid, ids, context=context)
1264 if isinstance(res_msg, tuple):
1265 template, params = res_msg
1266 res_msg = template % params
1268 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1270 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1272 _("Field(s) `%s` failed against a constraint: %s") %
1273 (', '.join(names), res_msg)
1276 raise ValidationError('\n'.join(errors))
1278 # new-style constraint methods
1279 for check in self._constraint_methods:
1280 if set(check._constrains) & field_names:
1283 except ValidationError, e:
1285 except Exception, e:
1286 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1289 def default_get(self, fields_list):
1290 """ default_get(fields) -> default_values
1292 Return default values for the fields in `fields_list`. Default
1293 values are determined by the context, user defaults, and the model
1296 :param fields_list: a list of field names
1297 :return: a dictionary mapping each field name to its corresponding
1298 default value, if it has one.
1301 # trigger view init hook
1302 self.view_init(fields_list)
1305 parent_fields = defaultdict(list)
1307 for name in fields_list:
1308 # 1. look up context
1309 key = 'default_' + name
1310 if key in self._context:
1311 defaults[name] = self._context[key]
1314 # 2. look up ir_values
1315 # Note: performance is good, because get_defaults_dict is cached!
1316 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1317 if name in ir_values_dict:
1318 defaults[name] = ir_values_dict[name]
1321 field = self._fields.get(name)
1323 # 3. look up property fields
1324 # TODO: get rid of this one
1325 if field and field.company_dependent:
1326 defaults[name] = self.env['ir.property'].get(name, self._name)
1329 # 4. look up field.default
1330 if field and field.default:
1331 defaults[name] = field.default(self)
1334 # 5. delegate to parent model
1335 if field and field.inherited:
1336 field = field.related_field
1337 parent_fields[field.model_name].append(field.name)
1339 # convert default values to the right format
1340 defaults = self._convert_to_cache(defaults, validate=False)
1341 defaults = self._convert_to_write(defaults)
1343 # add default values for inherited fields
1344 for model, names in parent_fields.iteritems():
1345 defaults.update(self.env[model].default_get(names))
1349 def fields_get_keys(self, cr, user, context=None):
1350 res = self._columns.keys()
1351 # TODO I believe this loop can be replace by
1352 # res.extend(self._inherit_fields.key())
1353 for parent in self._inherits:
1354 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1357 def _rec_name_fallback(self, cr, uid, context=None):
1358 rec_name = self._rec_name
1359 if rec_name not in self._columns:
1360 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1364 # Overload this method if you need a window title which depends on the context
1366 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1369 def user_has_groups(self, cr, uid, groups, context=None):
1370 """Return true if the user is at least member of one of the groups
1371 in groups_str. Typically used to resolve `groups` attribute
1372 in view and model definitions.
1374 :param str groups: comma-separated list of fully-qualified group
1375 external IDs, e.g.: ``base.group_user,base.group_system``
1376 :return: True if the current user is a member of one of the
1379 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1380 for group_ext_id in groups.split(','))
1382 def _get_default_form_view(self, cr, user, context=None):
1383 """ Generates a default single-line form view using all fields
1384 of the current model except the m2m and o2m ones.
1386 :param cr: database cursor
1387 :param int user: user id
1388 :param dict context: connection context
1389 :returns: a form view as an lxml document
1390 :rtype: etree._Element
1392 view = etree.Element('form', string=self._description)
1393 group = etree.SubElement(view, 'group', col="4")
1394 for fname, field in self._fields.iteritems():
1395 if field.automatic or field.type in ('one2many', 'many2many'):
1398 etree.SubElement(group, 'field', name=fname)
1399 if field.type == 'text':
1400 etree.SubElement(group, 'newline')
1403 def _get_default_search_view(self, cr, user, context=None):
1404 """ Generates a single-field search view, based on _rec_name.
1406 :param cr: database cursor
1407 :param int user: user id
1408 :param dict context: connection context
1409 :returns: a tree view as an lxml document
1410 :rtype: etree._Element
1412 view = etree.Element('search', string=self._description)
1413 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1416 def _get_default_tree_view(self, cr, user, context=None):
1417 """ Generates a single-field tree view, based on _rec_name.
1419 :param cr: database cursor
1420 :param int user: user id
1421 :param dict context: connection context
1422 :returns: a tree view as an lxml document
1423 :rtype: etree._Element
1425 view = etree.Element('tree', string=self._description)
1426 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1429 def _get_default_calendar_view(self, cr, user, context=None):
1430 """ Generates a default calendar view by trying to infer
1431 calendar fields from a number of pre-set attribute names
1433 :param cr: database cursor
1434 :param int user: user id
1435 :param dict context: connection context
1436 :returns: a calendar view
1437 :rtype: etree._Element
1439 def set_first_of(seq, in_, to):
1440 """Sets the first value of `seq` also found in `in_` to
1441 the `to` attribute of the view being closed over.
1443 Returns whether it's found a suitable value (and set it on
1444 the attribute) or not
1452 view = etree.Element('calendar', string=self._description)
1453 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1455 if self._date_name not in self._columns:
1457 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1458 if dt in self._columns:
1459 self._date_name = dt
1464 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1465 view.set('date_start', self._date_name)
1467 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1468 self._columns, 'color')
1470 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1471 self._columns, 'date_stop'):
1472 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1473 self._columns, 'date_delay'):
1475 _('Invalid Object Architecture!'),
1476 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1480 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1481 """ fields_view_get([view_id | view_type='form'])
1483 Get the detailed composition of the requested view like fields, model, view architecture
1485 :param view_id: id of the view or None
1486 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1487 :param toolbar: true to include contextual actions
1488 :param submenu: deprecated
1489 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1490 :raise AttributeError:
1491 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1492 * if some tag other than 'position' is found in parent view
1493 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1497 View = self.pool['ir.ui.view']
1500 'model': self._name,
1501 'field_parent': False,
1504 # try to find a view_id if none provided
1506 # <view_type>_view_ref in context can be used to overrride the default view
1507 view_ref_key = view_type + '_view_ref'
1508 view_ref = context.get(view_ref_key)
1511 module, view_ref = view_ref.split('.', 1)
1512 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1513 view_ref_res = cr.fetchone()
1515 view_id = view_ref_res[0]
1517 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1518 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1522 # otherwise try to find the lowest priority matching ir.ui.view
1523 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1525 # context for post-processing might be overriden
1528 # read the view with inherited views applied
1529 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1530 result['arch'] = root_view['arch']
1531 result['name'] = root_view['name']
1532 result['type'] = root_view['type']
1533 result['view_id'] = root_view['id']
1534 result['field_parent'] = root_view['field_parent']
1535 # override context fro postprocessing
1536 if root_view.get('model') != self._name:
1537 ctx = dict(context, base_model_name=root_view.get('model'))
1539 # fallback on default views methods if no ir.ui.view could be found
1541 get_func = getattr(self, '_get_default_%s_view' % view_type)
1542 arch_etree = get_func(cr, uid, context)
1543 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1544 result['type'] = view_type
1545 result['name'] = 'default'
1546 except AttributeError:
1547 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1549 # Apply post processing, groups and modifiers etc...
1550 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1551 result['arch'] = xarch
1552 result['fields'] = xfields
1554 # Add related action information if aksed
1556 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1562 ir_values_obj = self.pool.get('ir.values')
1563 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1564 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1565 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1566 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1567 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1568 #When multi="True" set it will display only in More of the list view
1569 resrelate = [clean(action) for action in resrelate
1570 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1572 for x in itertools.chain(resprint, resaction, resrelate):
1573 x['string'] = x['name']
1575 result['toolbar'] = {
1577 'action': resaction,
1582 def get_formview_id(self, cr, uid, id, context=None):
1583 """ Return an view id to open the document with. This method is meant to be
1584 overridden in addons that want to give specific view ids for example.
1586 :param int id: id of the document to open
1590 def get_formview_action(self, cr, uid, id, context=None):
1591 """ Return an action to open the document. This method is meant to be
1592 overridden in addons that want to give specific view ids for example.
1594 :param int id: id of the document to open
1596 view_id = self.get_formview_id(cr, uid, id, context=context)
1598 'type': 'ir.actions.act_window',
1599 'res_model': self._name,
1600 'view_type': 'form',
1601 'view_mode': 'form',
1602 'views': [(view_id, 'form')],
1603 'target': 'current',
1607 def get_access_action(self, cr, uid, id, context=None):
1608 """ Return an action to open the document. This method is meant to be
1609 overridden in addons that want to give specific access to the document.
1610 By default it opens the formview of the document.
1612 :paramt int id: id of the document to open
1614 return self.get_formview_action(cr, uid, id, context=context)
1616 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1617 return self.pool['ir.ui.view'].postprocess_and_fields(
1618 cr, uid, self._name, node, view_id, context=context)
1620 def search_count(self, cr, user, args, context=None):
1621 """ search_count(args) -> int
1623 Returns the number of records in the current model matching :ref:`the
1624 provided domain <reference/orm/domains>`.
1626 res = self.search(cr, user, args, context=context, count=True)
1627 if isinstance(res, list):
1631 @api.returns('self')
1632 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1633 """ search(args[, offset=0][, limit=None][, order=None])
1635 Searches for records based on the ``args``
1636 :ref:`search domain <reference/orm/domains>`.
1638 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1639 list to match all records.
1640 :param int offset: number of results to ignore (default: none)
1641 :param int limit: maximum number of records to return (default: all)
1642 :param str order: sort string
1643 :returns: at most ``limit`` records matching the search criteria
1645 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1647 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1650 # display_name, name_get, name_create, name_search
1653 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1654 def _compute_display_name(self):
1655 names = dict(self.name_get())
1657 record.display_name = names.get(record.id, False)
1661 """ name_get() -> [(id, name), ...]
1663 Returns a textual representation for the records in ``self``.
1664 By default this is the value of the ``display_name`` field.
1666 :return: list of pairs ``(id, text_repr)`` for each records
1670 name = self._rec_name
1671 if name in self._fields:
1672 convert = self._fields[name].convert_to_display_name
1674 result.append((record.id, convert(record[name])))
1677 result.append((record.id, "%s,%s" % (record._name, record.id)))
1682 def name_create(self, name):
1683 """ name_create(name) -> record
1685 Create a new record by calling :meth:`~.create` with only one value
1686 provided: the display name of the new record.
1688 The new record will be initialized with any default values
1689 applicable to this model, or provided through the context. The usual
1690 behavior of :meth:`~.create` applies.
1692 :param name: display name of the record to create
1694 :return: the :meth:`~.name_get` pair value of the created record
1697 record = self.create({self._rec_name: name})
1698 return record.name_get()[0]
1700 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1704 def name_search(self, name='', args=None, operator='ilike', limit=100):
1705 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1707 Search for records that have a display name matching the given
1708 `name` pattern when compared with the given `operator`, while also
1709 matching the optional search domain (`args`).
1711 This is used for example to provide suggestions based on a partial
1712 value for a relational field. Sometimes be seen as the inverse
1713 function of :meth:`~.name_get`, but it is not guaranteed to be.
1715 This method is equivalent to calling :meth:`~.search` with a search
1716 domain based on ``display_name`` and then :meth:`~.name_get` on the
1717 result of the search.
1719 :param str name: the name pattern to match
1720 :param list args: optional search domain (see :meth:`~.search` for
1721 syntax), specifying further restrictions
1722 :param str operator: domain operator for matching `name`, such as
1723 ``'like'`` or ``'='``.
1724 :param int limit: optional max number of records to return
1726 :return: list of pairs ``(id, text_repr)`` for all matching records.
1728 return self._name_search(name, args, operator, limit=limit)
1730 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1731 # private implementation of name_search, allows passing a dedicated user
1732 # for the name_get part to solve some access rights issues
1733 args = list(args or [])
1734 # optimize out the default criterion of ``ilike ''`` that matches everything
1735 if not self._rec_name:
1736 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1737 elif not (name == '' and operator == 'ilike'):
1738 args += [(self._rec_name, operator, name)]
1739 access_rights_uid = name_get_uid or user
1740 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1741 res = self.name_get(cr, access_rights_uid, ids, context)
1744 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1747 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1749 fields = self._columns.keys() + self._inherit_fields.keys()
1750 #FIXME: collect all calls to _get_source into one SQL call.
1752 res[lang] = {'code': lang}
1754 if f in self._columns:
1755 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1757 res[lang][f] = res_trans
1759 res[lang][f] = self._columns[f].string
1760 for table in self._inherits:
1761 cols = intersect(self._inherit_fields.keys(), fields)
1762 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1765 res[lang]['code'] = lang
1766 for f in res2[lang]:
1767 res[lang][f] = res2[lang][f]
1770 def write_string(self, cr, uid, id, langs, vals, context=None):
1771 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1772 #FIXME: try to only call the translation in one SQL
1775 if field in self._columns:
1776 src = self._columns[field].string
1777 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1778 for table in self._inherits:
1779 cols = intersect(self._inherit_fields.keys(), vals)
1781 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1784 def _add_missing_default_values(self, cr, uid, values, context=None):
1785 # avoid overriding inherited values when parent is set
1787 for tables, parent_field in self._inherits.items():
1788 if parent_field in values:
1789 avoid_tables.append(tables)
1791 # compute missing fields
1792 missing_defaults = set()
1793 for field in self._columns.keys():
1794 if not field in values:
1795 missing_defaults.add(field)
1796 for field in self._inherit_fields.keys():
1797 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1798 missing_defaults.add(field)
1799 # discard magic fields
1800 missing_defaults -= set(MAGIC_COLUMNS)
1802 if missing_defaults:
1803 # override defaults with the provided values, never allow the other way around
1804 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1806 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1807 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1808 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1809 defaults[dv] = [(6, 0, defaults[dv])]
1810 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1811 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1812 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1813 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1814 defaults.update(values)
1818 def clear_caches(self):
1819 """ Clear the caches
1821 This clears the caches associated to methods decorated with
1822 ``tools.ormcache`` or ``tools.ormcache_multi``.
1825 self._ormcache.clear()
1826 self.pool._any_cache_cleared = True
1827 except AttributeError:
1831 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1832 aggregated_fields, count_field,
1833 read_group_result, read_group_order=None, context=None):
1834 """Helper method for filling in empty groups for all possible values of
1835 the field being grouped by"""
1837 # self._group_by_full should map groupable fields to a method that returns
1838 # a list of all aggregated values that we want to display for this field,
1839 # in the form of a m2o-like pair (key,label).
1840 # This is useful to implement kanban views for instance, where all columns
1841 # should be displayed even if they don't contain any record.
1843 # Grab the list of all groups that should be displayed, including all present groups
1844 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1845 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1846 read_group_order=read_group_order,
1847 access_rights_uid=openerp.SUPERUSER_ID,
1850 result_template = dict.fromkeys(aggregated_fields, False)
1851 result_template[groupby + '_count'] = 0
1852 if remaining_groupbys:
1853 result_template['__context'] = {'group_by': remaining_groupbys}
1855 # Merge the left_side (current results as dicts) with the right_side (all
1856 # possible values as m2o pairs). Both lists are supposed to be using the
1857 # same ordering, and can be merged in one pass.
1860 def append_left(left_side):
1861 grouped_value = left_side[groupby] and left_side[groupby][0]
1862 if not grouped_value in known_values:
1863 result.append(left_side)
1864 known_values[grouped_value] = left_side
1866 known_values[grouped_value].update({count_field: left_side[count_field]})
1867 def append_right(right_side):
1868 grouped_value = right_side[0]
1869 if not grouped_value in known_values:
1870 line = dict(result_template)
1871 line[groupby] = right_side
1872 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1874 known_values[grouped_value] = line
1875 while read_group_result or all_groups:
1876 left_side = read_group_result[0] if read_group_result else None
1877 right_side = all_groups[0] if all_groups else None
1878 assert left_side is None or left_side[groupby] is False \
1879 or isinstance(left_side[groupby], (tuple,list)), \
1880 'M2O-like pair expected, got %r' % left_side[groupby]
1881 assert right_side is None or isinstance(right_side, (tuple,list)), \
1882 'M2O-like pair expected, got %r' % right_side
1883 if left_side is None:
1884 append_right(all_groups.pop(0))
1885 elif right_side is None:
1886 append_left(read_group_result.pop(0))
1887 elif left_side[groupby] == right_side:
1888 append_left(read_group_result.pop(0))
1889 all_groups.pop(0) # discard right_side
1890 elif not left_side[groupby] or not left_side[groupby][0]:
1891 # left side == "Undefined" entry, not present on right_side
1892 append_left(read_group_result.pop(0))
1894 append_right(all_groups.pop(0))
1898 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1901 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1903 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1904 to the query if order should be computed against m2o field.
1905 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1906 :param aggregated_fields: list of aggregated fields in the query
1907 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1908 These dictionaries contains the qualified name of each groupby
1909 (fully qualified SQL name for the corresponding field),
1910 and the (non raw) field name.
1911 :param osv.Query query: the query under construction
1912 :return: (groupby_terms, orderby_terms)
1915 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1916 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1918 return groupby_terms, orderby_terms
1920 self._check_qorder(orderby)
1921 for order_part in orderby.split(','):
1922 order_split = order_part.split()
1923 order_field = order_split[0]
1924 if order_field in groupby_fields:
1926 if self._fields[order_field.split(':')[0]].type == 'many2one':
1927 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1929 orderby_terms.append(order_clause)
1930 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1932 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1933 orderby_terms.append(order)
1934 elif order_field in aggregated_fields:
1935 orderby_terms.append(order_part)
1937 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1938 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1939 self._name, order_part)
1940 return groupby_terms, orderby_terms
1942 def _read_group_process_groupby(self, gb, query, context):
1944 Helper method to collect important information about groupbys: raw
1945 field name, type, time informations, qualified name, ...
1947 split = gb.split(':')
1948 field_type = self._fields[split[0]].type
1949 gb_function = split[1] if len(split) == 2 else None
1950 temporal = field_type in ('date', 'datetime')
1951 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1952 qualified_field = self._inherits_join_calc(split[0], query)
1955 # Careful with week/year formats:
1956 # - yyyy (lower) must always be used, *except* for week+year formats
1957 # - YYYY (upper) must always be used for week+year format
1958 # e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
1959 # and W1 2006 for others
1961 # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
1962 # such as 2006-01-01 being formatted as "January 2005" in some locales.
1963 # Cfr: http://babel.pocoo.org/docs/dates/#date-fields
1964 'day': 'dd MMM yyyy', # yyyy = normal year
1965 'week': "'W'w YYYY", # w YYYY = ISO week-year
1966 'month': 'MMMM yyyy',
1967 'quarter': 'QQQ yyyy',
1971 'day': dateutil.relativedelta.relativedelta(days=1),
1972 'week': datetime.timedelta(days=7),
1973 'month': dateutil.relativedelta.relativedelta(months=1),
1974 'quarter': dateutil.relativedelta.relativedelta(months=3),
1975 'year': dateutil.relativedelta.relativedelta(years=1)
1978 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
1979 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
1980 if field_type == 'boolean':
1981 qualified_field = "coalesce(%s,false)" % qualified_field
1986 'display_format': display_formats[gb_function or 'month'] if temporal else None,
1987 'interval': time_intervals[gb_function or 'month'] if temporal else None,
1988 'tz_convert': tz_convert,
1989 'qualified_field': qualified_field
1992 def _read_group_prepare_data(self, key, value, groupby_dict, context):
1994 Helper method to sanitize the data received by read_group. The None
1995 values are converted to False, and the date/datetime are formatted,
1996 and corrected according to the timezones.
1998 value = False if value is None else value
1999 gb = groupby_dict.get(key)
2000 if gb and gb['type'] in ('date', 'datetime') and value:
2001 if isinstance(value, basestring):
2002 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2003 value = datetime.datetime.strptime(value, dt_format)
2004 if gb['tz_convert']:
2005 value = pytz.timezone(context['tz']).localize(value)
2008 def _read_group_get_domain(self, groupby, value):
2010 Helper method to construct the domain corresponding to a groupby and
2011 a given value. This is mostly relevant for date/datetime.
2013 if groupby['type'] in ('date', 'datetime') and value:
2014 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2015 domain_dt_begin = value
2016 domain_dt_end = value + groupby['interval']
2017 if groupby['tz_convert']:
2018 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2019 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2020 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2021 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2022 if groupby['type'] == 'many2one' and value:
2024 return [(groupby['field'], '=', value)]
2026 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2028 Helper method to format the data contained in the dictianary data by
2029 adding the domain corresponding to its values, the groupbys in the
2030 context and by properly formatting the date/datetime values.
2032 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2033 for k,v in data.iteritems():
2034 gb = groupby_dict.get(k)
2035 if gb and gb['type'] in ('date', 'datetime') and v:
2036 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2038 data['__domain'] = domain_group + domain
2039 if len(groupby) - len(annotated_groupbys) >= 1:
2040 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2044 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2046 Get the list of records in list view grouped by the given ``groupby`` fields
2048 :param cr: database cursor
2049 :param uid: current user id
2050 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2051 :param list fields: list of fields present in the list view specified on the object
2052 :param list groupby: list of groupby descriptions by which the records will be grouped.
2053 A groupby description is either a field (then it will be grouped by that field)
2054 or a string 'field:groupby_function'. Right now, the only functions supported
2055 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2056 date/datetime fields.
2057 :param int offset: optional number of records to skip
2058 :param int limit: optional max number of records to return
2059 :param dict context: context arguments, like lang, time zone.
2060 :param list orderby: optional ``order by`` specification, for
2061 overriding the natural sort ordering of the
2062 groups, see also :py:meth:`~osv.osv.osv.search`
2063 (supported only for many2one fields currently)
2064 :param bool lazy: if true, the results are only grouped by the first groupby and the
2065 remaining groupbys are put in the __context key. If false, all the groupbys are
2067 :return: list of dictionaries(one dictionary for each record) containing:
2069 * the values of fields grouped by the fields in ``groupby`` argument
2070 * __domain: list of tuples specifying the search criteria
2071 * __context: dictionary with argument like ``groupby``
2072 :rtype: [{'field_name_1': value, ...]
2073 :raise AccessError: * if user has no read rights on the requested object
2074 * if user tries to bypass access rules for read on the requested object
2078 self.check_access_rights(cr, uid, 'read')
2079 query = self._where_calc(cr, uid, domain, context=context)
2080 fields = fields or self._columns.keys()
2082 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2083 groupby_list = groupby[:1] if lazy else groupby
2084 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2085 for gb in groupby_list]
2086 groupby_fields = [g['field'] for g in annotated_groupbys]
2087 order = orderby or ','.join([g for g in groupby_list])
2088 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2090 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2091 for gb in groupby_fields:
2092 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2093 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2094 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2095 if not (gb in self._fields):
2096 # Don't allow arbitrary values, as this would be a SQL injection vector!
2097 raise except_orm(_('Invalid group_by'),
2098 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2100 aggregated_fields = [
2102 if f not in ('id', 'sequence')
2103 if f not in groupby_fields
2104 if f in self._fields
2105 if self._fields[f].type in ('integer', 'float')
2106 if getattr(self._fields[f].base_field.column, '_classic_write')
2109 field_formatter = lambda f: (self._fields[f].group_operator or 'sum', self._inherits_join_calc(f, query), f)
2110 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2112 for gb in annotated_groupbys:
2113 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2115 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2116 from_clause, where_clause, where_clause_params = query.get_sql()
2117 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2118 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2121 count_field += '_count'
2123 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2124 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2127 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2135 'table': self._table,
2136 'count_field': count_field,
2137 'extra_fields': prefix_terms(',', select_terms),
2138 'from': from_clause,
2139 'where': prefix_term('WHERE', where_clause),
2140 'groupby': prefix_terms('GROUP BY', groupby_terms),
2141 'orderby': prefix_terms('ORDER BY', orderby_terms),
2142 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2143 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2145 cr.execute(query, where_clause_params)
2146 fetched_data = cr.dictfetchall()
2148 if not groupby_fields:
2151 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2153 data_ids = [r['id'] for r in fetched_data]
2154 many2onefields = list(set(many2onefields))
2155 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2156 for d in fetched_data:
2157 d.update(data_dict[d['id']])
2159 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2160 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2161 if lazy and groupby_fields[0] in self._group_by_full:
2162 # Right now, read_group only fill results in lazy mode (by default).
2163 # If you need to have the empty groups in 'eager' mode, then the
2164 # method _read_group_fill_results need to be completely reimplemented
2166 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2167 aggregated_fields, count_field, result, read_group_order=order,
2171 def _inherits_join_add(self, current_model, parent_model_name, query):
2173 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2174 :param current_model: current model object
2175 :param parent_model_name: name of the parent model for which the clauses should be added
2176 :param query: query object on which the JOIN should be added
2178 inherits_field = current_model._inherits[parent_model_name]
2179 parent_model = self.pool[parent_model_name]
2180 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2183 def _inherits_join_calc(self, field, query):
2185 Adds missing table select and join clause(s) to ``query`` for reaching
2186 the field coming from an '_inherits' parent table (no duplicates).
2188 :param field: name of inherited field to reach
2189 :param query: query object on which the JOIN should be added
2190 :return: qualified name of field, to be used in SELECT clause
2192 current_table = self
2193 parent_alias = '"%s"' % current_table._table
2194 while field in current_table._inherit_fields and not field in current_table._columns:
2195 parent_model_name = current_table._inherit_fields[field][0]
2196 parent_table = self.pool[parent_model_name]
2197 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2198 current_table = parent_table
2199 return '%s."%s"' % (parent_alias, field)
2201 def _parent_store_compute(self, cr):
2202 if not self._parent_store:
2204 _logger.info('Computing parent left and right for table %s...', self._table)
2205 def browse_rec(root, pos=0):
2207 where = self._parent_name+'='+str(root)
2209 where = self._parent_name+' IS NULL'
2210 if self._parent_order:
2211 where += ' order by '+self._parent_order
2212 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2214 for id in cr.fetchall():
2215 pos2 = browse_rec(id[0], pos2)
2216 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2218 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2219 if self._parent_order:
2220 query += ' order by ' + self._parent_order
2223 for (root,) in cr.fetchall():
2224 pos = browse_rec(root, pos)
2225 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2228 def _update_store(self, cr, f, k):
2229 _logger.info("storing computed values of fields.function '%s'", k)
2230 ss = self._columns[k]._symbol_set
2231 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2232 cr.execute('select id from '+self._table)
2233 ids_lst = map(lambda x: x[0], cr.fetchall())
2235 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2236 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2237 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2238 for key, val in res.items():
2241 # if val is a many2one, just write the ID
2242 if type(val) == tuple:
2244 if val is not False:
2245 cr.execute(update_query, (ss[1](val), key))
2248 def _check_selection_field_value(self, field, value):
2249 """ Check whether value is among the valid values for the given
2250 selection/reference field, and raise an exception if not.
2252 field = self._fields[field]
2253 field.convert_to_cache(value, self)
2255 def _check_removed_columns(self, cr, log=False):
2256 # iterate on the database columns to drop the NOT NULL constraints
2257 # of fields which were required but have been removed (or will be added by another module)
2258 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2259 columns += MAGIC_COLUMNS
2260 cr.execute("SELECT a.attname, a.attnotnull"
2261 " FROM pg_class c, pg_attribute a"
2262 " WHERE c.relname=%s"
2263 " AND c.oid=a.attrelid"
2264 " AND a.attisdropped=%s"
2265 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2266 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2268 for column in cr.dictfetchall():
2270 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2271 column['attname'], self._table, self._name)
2272 if column['attnotnull']:
2273 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2274 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2275 self._table, column['attname'])
2277 def _save_constraint(self, cr, constraint_name, type, definition):
2279 Record the creation of a constraint for this model, to make it possible
2280 to delete it later when the module is uninstalled. Type can be either
2281 'f' or 'u' depending on the constraint being a foreign key or not.
2283 if not self._module:
2284 # no need to save constraints for custom models as they're not part
2287 assert type in ('f', 'u')
2289 SELECT type, definition FROM ir_model_constraint, ir_module_module
2290 WHERE ir_model_constraint.module=ir_module_module.id
2291 AND ir_model_constraint.name=%s
2292 AND ir_module_module.name=%s
2293 """, (constraint_name, self._module))
2294 constraints = cr.dictfetchone()
2297 INSERT INTO ir_model_constraint
2298 (name, date_init, date_update, module, model, type, definition)
2299 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2300 (SELECT id FROM ir_module_module WHERE name=%s),
2301 (SELECT id FROM ir_model WHERE model=%s), %s, %s)""",
2302 (constraint_name, self._module, self._name, type, definition))
2303 elif constraints['type'] != type or (definition and constraints['definition'] != definition):
2305 UPDATE ir_model_constraint
2306 SET date_update=now() AT TIME ZONE 'UTC', type=%s, definition=%s
2307 WHERE name=%s AND module = (SELECT id FROM ir_module_module WHERE name=%s)""",
2308 (type, definition, constraint_name, self._module))
2310 def _save_relation_table(self, cr, relation_table):
2312 Record the creation of a many2many for this model, to make it possible
2313 to delete it later when the module is uninstalled.
2316 SELECT 1 FROM ir_model_relation, ir_module_module
2317 WHERE ir_model_relation.module=ir_module_module.id
2318 AND ir_model_relation.name=%s
2319 AND ir_module_module.name=%s
2320 """, (relation_table, self._module))
2322 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2323 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2324 (SELECT id FROM ir_module_module WHERE name=%s),
2325 (SELECT id FROM ir_model WHERE model=%s))""",
2326 (relation_table, self._module, self._name))
2327 self.invalidate_cache(cr, SUPERUSER_ID)
2329 # checked version: for direct m2o starting from `self`
2330 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2331 assert self.is_transient() or not dest_model.is_transient(), \
2332 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2333 if self.is_transient() and not dest_model.is_transient():
2334 # TransientModel relationships to regular Models are annoying
2335 # usually because they could block deletion due to the FKs.
2336 # So unless stated otherwise we default them to ondelete=cascade.
2337 ondelete = ondelete or 'cascade'
2338 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2339 self._foreign_keys.add(fk_def)
2340 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2342 # unchecked version: for custom cases, such as m2m relationships
2343 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2344 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2345 self._foreign_keys.add(fk_def)
2346 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2348 def _drop_constraint(self, cr, source_table, constraint_name):
2349 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2351 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2352 # Find FK constraint(s) currently established for the m2o field,
2353 # and see whether they are stale or not
2354 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2355 cl2.relname as foreign_table
2356 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2357 pg_attribute as att1, pg_attribute as att2
2358 WHERE con.conrelid = cl1.oid
2359 AND cl1.relname = %s
2360 AND con.confrelid = cl2.oid
2361 AND array_lower(con.conkey, 1) = 1
2362 AND con.conkey[1] = att1.attnum
2363 AND att1.attrelid = cl1.oid
2364 AND att1.attname = %s
2365 AND array_lower(con.confkey, 1) = 1
2366 AND con.confkey[1] = att2.attnum
2367 AND att2.attrelid = cl2.oid
2368 AND att2.attname = %s
2369 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2370 constraints = cr.dictfetchall()
2372 if len(constraints) == 1:
2373 # Is it the right constraint?
2375 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2376 or cons['foreign_table'] != dest_model._table:
2377 # Wrong FK: drop it and recreate
2378 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2379 source_table, cons['constraint_name'])
2380 self._drop_constraint(cr, source_table, cons['constraint_name'])
2382 # it's all good, nothing to do!
2385 # Multiple FKs found for the same field, drop them all, and re-create
2386 for cons in constraints:
2387 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2388 source_table, cons['constraint_name'])
2389 self._drop_constraint(cr, source_table, cons['constraint_name'])
2391 # (re-)create the FK
2392 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2395 def _set_default_value_on_column(self, cr, column_name, context=None):
2396 # ideally, we should use default_get(), but it fails due to ir.values
2400 default = self._defaults.get(column_name)
2401 if callable(default):
2402 default = default(self, cr, SUPERUSER_ID, context)
2404 column = self._columns[column_name]
2405 ss = column._symbol_set
2406 db_default = ss[1](default)
2407 # Write default if non-NULL, except for booleans for which False means
2408 # the same as NULL - this saves us an expensive query on large tables.
2409 write_default = (db_default is not None if column._type != 'boolean'
2412 _logger.debug("Table '%s': setting default value of new column %s to %r",
2413 self._table, column_name, default)
2414 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2415 self._table, column_name, ss[0], column_name)
2416 cr.execute(query, (db_default,))
2417 # this is a disgrace
2420 def _auto_init(self, cr, context=None):
2423 Call _field_create and, unless _auto is False:
2425 - create the corresponding table in database for the model,
2426 - possibly add the parent columns in database,
2427 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2428 'write_date' in database if _log_access is True (the default),
2429 - report on database columns no more existing in _columns,
2430 - remove no more existing not null constraints,
2431 - alter existing database columns to match _columns,
2432 - create database tables to match _columns,
2433 - add database indices to match _columns,
2434 - save in self._foreign_keys a list a foreign keys to create (see
2438 self._foreign_keys = set()
2439 raise_on_invalid_object_name(self._name)
2442 store_compute = False
2443 stored_fields = [] # new-style stored fields with compute
2445 update_custom_fields = context.get('update_custom_fields', False)
2446 self._field_create(cr, context=context)
2447 create = not self._table_exist(cr)
2451 self._create_table(cr)
2454 cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
2455 has_rows = cr.rowcount
2458 if self._parent_store:
2459 if not self._parent_columns_exist(cr):
2460 self._create_parent_columns(cr)
2461 store_compute = True
2463 self._check_removed_columns(cr, log=False)
2465 # iterate on the "object columns"
2466 column_data = self._select_column_data(cr)
2468 for k, f in self._columns.iteritems():
2469 if k == 'id': # FIXME: maybe id should be a regular column?
2471 # Don't update custom (also called manual) fields
2472 if f.manual and not update_custom_fields:
2475 if isinstance(f, fields.one2many):
2476 self._o2m_raise_on_missing_reference(cr, f)
2478 elif isinstance(f, fields.many2many):
2479 self._m2m_raise_or_create_relation(cr, f)
2482 res = column_data.get(k)
2484 # The field is not found as-is in database, try if it
2485 # exists with an old name.
2486 if not res and hasattr(f, 'oldname'):
2487 res = column_data.get(f.oldname)
2489 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2491 column_data[k] = res
2492 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2493 self._table, f.oldname, k)
2495 # The field already exists in database. Possibly
2496 # change its type, rename it, drop it or change its
2499 f_pg_type = res['typname']
2500 f_pg_size = res['size']
2501 f_pg_notnull = res['attnotnull']
2502 if isinstance(f, fields.function) and not f.store and\
2503 not getattr(f, 'nodrop', False):
2504 _logger.info('column %s (%s) converted to a function, removed from table %s',
2505 k, f.string, self._table)
2506 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2508 _schema.debug("Table '%s': dropped column '%s' with cascade",
2512 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2517 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2518 ('varchar', 'text', 'TEXT', ''),
2519 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2520 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2521 ('timestamp', 'date', 'date', '::date'),
2522 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2523 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2525 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2527 with cr.savepoint():
2528 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2529 except psycopg2.NotSupportedError:
2530 # In place alter table cannot be done because a view is depending of this field.
2531 # Do a manual copy. This will drop the view (that will be recreated later)
2532 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2533 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2534 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2535 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2537 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2538 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2540 if (f_pg_type==c[0]) and (f._type==c[1]):
2541 if f_pg_type != f_obj_type:
2543 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2544 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2545 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2546 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2548 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2549 self._table, k, c[0], c[1])
2552 if f_pg_type != f_obj_type:
2556 newname = k + '_moved' + str(i)
2557 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2558 "WHERE c.relname=%s " \
2559 "AND a.attname=%s " \
2560 "AND c.oid=a.attrelid ", (self._table, newname))
2561 if not cr.fetchone()[0]:
2565 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2566 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2567 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2568 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2569 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2570 self._table, k, f_pg_type, f._type, newname)
2572 # if the field is required and hasn't got a NOT NULL constraint
2573 if f.required and f_pg_notnull == 0:
2575 self._set_default_value_on_column(cr, k, context=context)
2576 # add the NOT NULL constraint
2578 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2580 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2583 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2584 "If you want to have it, you should update the records and execute manually:\n"\
2585 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2586 _schema.warning(msg, self._table, k, self._table, k)
2588 elif not f.required and f_pg_notnull == 1:
2589 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2591 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2594 indexname = '%s_%s_index' % (self._table, k)
2595 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2596 res2 = cr.dictfetchall()
2597 if not res2 and f.select:
2598 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2600 if f._type == 'text':
2601 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2602 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2603 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2604 " because there is a length limit for indexable btree values!\n"\
2605 "Use a search view instead if you simply want to make the field searchable."
2606 _schema.warning(msg, self._table, f._type, k)
2607 if res2 and not f.select:
2608 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2610 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2611 _schema.debug(msg, self._table, k, f._type)
2613 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2614 dest_model = self.pool[f._obj]
2615 if dest_model._auto and dest_model._table != 'ir_actions':
2616 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2618 # The field doesn't exist in database. Create it if necessary.
2620 if not isinstance(f, fields.function) or f.store:
2621 # add the missing field
2622 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2623 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2624 _schema.debug("Table '%s': added column '%s' with definition=%s",
2625 self._table, k, get_pg_type(f)[1])
2629 self._set_default_value_on_column(cr, k, context=context)
2631 # remember the functions to call for the stored fields
2632 if isinstance(f, fields.function):
2634 if f.store is not True: # i.e. if f.store is a dict
2635 order = f.store[f.store.keys()[0]][2]
2636 todo_end.append((order, self._update_store, (f, k)))
2638 # remember new-style stored fields with compute method
2639 if k in self._fields and self._fields[k].depends:
2640 stored_fields.append(self._fields[k])
2642 # and add constraints if needed
2643 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2644 if f._obj not in self.pool:
2645 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2646 dest_model = self.pool[f._obj]
2647 ref = dest_model._table
2648 # ir_actions is inherited so foreign key doesn't work on it
2649 if dest_model._auto and ref != 'ir_actions':
2650 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2652 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2656 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2657 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2660 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2661 "Try to re-run: openerp-server --update=module\n"\
2662 "If it doesn't work, update records and execute manually:\n"\
2663 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2664 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2668 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2669 create = not bool(cr.fetchone())
2671 cr.commit() # start a new transaction
2674 self._add_sql_constraints(cr)
2677 self._execute_sql(cr)
2680 self._parent_store_compute(cr)
2684 # trigger computation of new-style stored fields with a compute
2686 _logger.info("Storing computed values of %s fields %s",
2687 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2688 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2689 recs = recs.search([])
2691 map(recs._recompute_todo, stored_fields)
2694 todo_end.append((1000, func, ()))
2698 def _auto_end(self, cr, context=None):
2699 """ Create the foreign keys recorded by _auto_init. """
2700 for t, k, r, d in self._foreign_keys:
2701 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2702 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f', False)
2704 del self._foreign_keys
2707 def _table_exist(self, cr):
2708 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2712 def _create_table(self, cr):
2713 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2714 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2715 _schema.debug("Table '%s': created", self._table)
2718 def _parent_columns_exist(self, cr):
2719 cr.execute("""SELECT c.relname
2720 FROM pg_class c, pg_attribute a
2721 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2722 """, (self._table, 'parent_left'))
2726 def _create_parent_columns(self, cr):
2727 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2728 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2729 if 'parent_left' not in self._columns:
2730 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2732 _schema.debug("Table '%s': added column '%s' with definition=%s",
2733 self._table, 'parent_left', 'INTEGER')
2734 elif not self._columns['parent_left'].select:
2735 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2737 if 'parent_right' not in self._columns:
2738 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2740 _schema.debug("Table '%s': added column '%s' with definition=%s",
2741 self._table, 'parent_right', 'INTEGER')
2742 elif not self._columns['parent_right'].select:
2743 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2745 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2746 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2747 self._parent_name, self._name)
2752 def _select_column_data(self, cr):
2753 # attlen is the number of bytes necessary to represent the type when
2754 # the type has a fixed size. If the type has a varying size attlen is
2755 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2756 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2757 "FROM pg_class c,pg_attribute a,pg_type t " \
2758 "WHERE c.relname=%s " \
2759 "AND c.oid=a.attrelid " \
2760 "AND a.atttypid=t.oid", (self._table,))
2761 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2764 def _o2m_raise_on_missing_reference(self, cr, f):
2765 # TODO this check should be a method on fields.one2many.
2766 if f._obj in self.pool:
2767 other = self.pool[f._obj]
2768 # TODO the condition could use fields_get_keys().
2769 if f._fields_id not in other._columns.keys():
2770 if f._fields_id not in other._inherit_fields.keys():
2771 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2773 def _m2m_raise_or_create_relation(self, cr, f):
2774 m2m_tbl, col1, col2 = f._sql_names(self)
2775 # do not create relations for custom fields as they do not belong to a module
2776 # they will be automatically removed when dropping the corresponding ir.model.field
2777 # table name for custom relation all starts with x_, see __init__
2778 if not m2m_tbl.startswith('x_'):
2779 self._save_relation_table(cr, m2m_tbl)
2780 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2781 if not cr.dictfetchall():
2782 if f._obj not in self.pool:
2783 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2784 dest_model = self.pool[f._obj]
2785 ref = dest_model._table
2786 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2787 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2788 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2789 if not cr.fetchall():
2790 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2791 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2792 if not cr.fetchall():
2793 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2795 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2796 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2797 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2799 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2802 def _add_sql_constraints(self, cr):
2805 Modify this model's database table constraints so they match the one in
2809 def unify_cons_text(txt):
2810 return txt.lower().replace(', ',',').replace(' (','(')
2812 for (key, con, _) in self._sql_constraints:
2813 conname = '%s_%s' % (self._table, key)
2815 # using 1 to get result if no imc but one pgc
2816 cr.execute("""SELECT definition, 1
2817 FROM ir_model_constraint imc
2818 RIGHT JOIN pg_constraint pgc
2819 ON (pgc.conname = imc.name)
2820 WHERE pgc.conname=%s
2822 existing_constraints = cr.dictfetchone()
2826 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2827 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2828 self._table, conname, con),
2829 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2834 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2835 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2836 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2842 if not existing_constraints:
2843 # constraint does not exists:
2844 sql_actions['add']['execute'] = True
2845 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2846 elif unify_cons_text(con) != existing_constraints['definition']:
2847 # constraint exists but its definition has changed:
2848 sql_actions['drop']['execute'] = True
2849 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints['definition'] or '', )
2850 sql_actions['add']['execute'] = True
2851 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2853 # we need to add the constraint:
2854 self._save_constraint(cr, conname, 'u', unify_cons_text(con))
2855 sql_actions = [item for item in sql_actions.values()]
2856 sql_actions.sort(key=lambda x: x['order'])
2857 for sql_action in [action for action in sql_actions if action['execute']]:
2859 cr.execute(sql_action['query'])
2861 _schema.debug(sql_action['msg_ok'])
2863 _schema.warning(sql_action['msg_err'])
2867 def _execute_sql(self, cr):
2868 """ Execute the SQL code from the _sql attribute (if any)."""
2869 if hasattr(self, "_sql"):
2870 for line in self._sql.split(';'):
2871 line2 = line.replace('\n', '').strip()
2877 # Update objects that uses this one to update their _inherits fields
2881 def _inherits_reload(cls):
2882 """ Recompute the _inherit_fields mapping, and inherited fields. """
2885 for parent_model, parent_field in cls._inherits.iteritems():
2886 parent = cls.pool[parent_model]
2887 # old-api struct for _inherit_fields
2888 for name, column in parent._columns.iteritems():
2889 struct[name] = (parent_model, parent_field, column, parent_model)
2890 for name, source in parent._inherit_fields.iteritems():
2891 struct[name] = (parent_model, parent_field, source[2], source[3])
2892 # new-api fields for _fields
2893 for name, field in parent._fields.iteritems():
2894 fields[name] = field.new(
2896 related=(parent_field, name),
2901 cls._inherit_fields = struct
2902 cls._all_columns = cls._get_column_infos()
2904 # add inherited fields that are not redefined locally
2905 for name, field in fields.iteritems():
2906 if name not in cls._fields:
2907 cls._add_field(name, field)
2910 def _get_column_infos(cls):
2911 """Returns a dict mapping all fields names (direct fields and
2912 inherited field via _inherits) to a ``column_info`` struct
2913 giving detailed columns """
2915 # do not inverse for loops, since local fields may hide inherited ones!
2916 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2917 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2918 for k, col in cls._columns.iteritems():
2919 result[k] = fields.column_info(k, col)
2923 def _inherits_check(cls):
2924 for table, field_name in cls._inherits.items():
2925 if field_name not in cls._columns:
2926 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2927 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2928 required=True, ondelete="cascade")
2929 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2930 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2931 cls._columns[field_name].required = True
2932 cls._columns[field_name].ondelete = "cascade"
2934 # reflect fields with delegate=True in dictionary cls._inherits
2935 for field in cls._fields.itervalues():
2936 if field.type == 'many2one' and not field.related and field.delegate:
2937 if not field.required:
2938 _logger.warning("Field %s with delegate=True must be required.", field)
2939 field.required = True
2940 if field.ondelete.lower() not in ('cascade', 'restrict'):
2941 field.ondelete = 'cascade'
2942 cls._inherits[field.comodel_name] = field.name
2945 def _prepare_setup_fields(self):
2946 """ Prepare the setup of fields once the models have been loaded. """
2947 type(self)._setup_done = False
2948 for name, field in self._fields.items():
2950 del self._fields[name]
2955 def _setup_fields(self):
2956 """ Setup the fields (dependency triggers, etc). """
2960 cls._setup_done = True
2962 # first make sure that parent models are all set up
2963 for parent in self._inherits:
2964 self.env[parent]._setup_fields()
2966 # retrieve custom fields
2967 if not self._context.get('_setup_fields_partial'):
2968 cls._init_manual_fields(self._cr)
2970 # retrieve inherited fields
2971 cls._inherits_check()
2972 cls._inherits_reload()
2975 for field in cls._fields.itervalues():
2976 field.setup(self.env)
2978 # update columns (fields may have changed)
2979 for name, field in cls._fields.iteritems():
2981 cls._columns[name] = field.to_column()
2983 # group fields by compute to determine field.computed_fields
2984 fields_by_compute = defaultdict(list)
2985 for field in cls._fields.itervalues():
2987 field.computed_fields = fields_by_compute[field.compute]
2988 field.computed_fields.append(field)
2990 field.computed_fields = []
2993 for func in cls._constraint_methods:
2994 if not all(name in cls._fields for name in func._constrains):
2995 _logger.warning("@constrains%r parameters must be field names", func._constrains)
2996 for name in cls._onchange_methods:
2997 if name not in cls._fields:
2998 func = cls._onchange_methods[name]
2999 _logger.warning("@onchange%r parameters must be field names", func._onchange)
3002 for name in cls._defaults:
3003 assert name in cls._fields, \
3004 "Model %s has a default for nonexiting field %s" % (cls._name, name)
3008 assert cls._rec_name in cls._fields, \
3009 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
3010 elif 'name' in cls._fields:
3011 cls._rec_name = 'name'
3012 elif 'x_name' in cls._fields:
3013 cls._rec_name = 'x_name'
3015 def fields_get(self, cr, user, allfields=None, context=None, write_access=True, attributes=None):
3016 """ fields_get([fields][, attributes])
3018 Return the definition of each field.
3020 The returned value is a dictionary (indiced by field name) of
3021 dictionaries. The _inherits'd fields are included. The string, help,
3022 and selection (if present) attributes are translated.
3024 :param allfields: list of fields to document, all if empty or not provided
3025 :param attributes: list of description attributes to return for each field, all if empty or not provided
3027 recs = self.browse(cr, user, [], context)
3029 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3030 readonly = not (has_access('write') or has_access('create'))
3033 for fname, field in self._fields.iteritems():
3034 if allfields and fname not in allfields:
3036 if not field.setup_done:
3038 if field.groups and not recs.user_has_groups(field.groups):
3041 description = field.get_description(recs.env)
3043 description['readonly'] = True
3044 description['states'] = {}
3046 description = {k: v for k, v in description.iteritems()
3048 res[fname] = description
3052 def get_empty_list_help(self, cr, user, help, context=None):
3053 """ Generic method giving the help message displayed when having
3054 no result to display in a list or kanban view. By default it returns
3055 the help given in parameter that is generally the help message
3056 defined in the action.
3060 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3062 Check the user access rights on the given fields. This raises Access
3063 Denied if the user does not have the rights. Otherwise it returns the
3064 fields (as is if the fields is not falsy, or the readable/writable
3065 fields if fields is falsy).
3067 if user == SUPERUSER_ID:
3068 return fields or list(self._fields)
3071 """ determine whether user has access to field `fname` """
3072 field = self._fields.get(fname)
3073 if field and field.groups:
3074 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3079 fields = filter(valid, self._fields)
3081 invalid_fields = set(filter(lambda name: not valid(name), fields))
3083 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3084 operation, user, self._name, ', '.join(invalid_fields))
3086 _('The requested operation cannot be completed due to security restrictions. '
3087 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3088 (self._description, operation))
3092 # add explicit old-style implementation to read()
3094 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3095 records = self.browse(cr, user, ids, context)
3096 result = BaseModel.read(records, fields, load=load)
3097 return result if isinstance(ids, list) else (bool(result) and result[0])
3099 # new-style implementation of read()
3101 def read(self, fields=None, load='_classic_read'):
3104 Reads the requested fields for the records in `self`, low-level/RPC
3105 method. In Python code, prefer :meth:`~.browse`.
3107 :param fields: list of field names to return (default is all fields)
3108 :return: a list of dictionaries mapping field names to their values,
3109 with one dictionary per record
3110 :raise AccessError: if user has no read rights on some of the given
3113 # check access rights
3114 self.check_access_rights('read')
3115 fields = self.check_field_access_rights('read', fields)
3117 # split fields into stored and computed fields
3118 stored, computed = [], []
3120 if name in self._columns:
3122 elif name in self._fields:
3123 computed.append(name)
3125 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3127 # fetch stored fields from the database to the cache
3128 self._read_from_database(stored)
3130 # retrieve results from records; this takes values from the cache and
3131 # computes remaining fields
3133 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3134 use_name_get = (load == '_classic_read')
3137 values = {'id': record.id}
3138 for name, field in name_fields:
3139 values[name] = field.convert_to_read(record[name], use_name_get)
3140 result.append(values)
3141 except MissingError:
3147 def _prefetch_field(self, field):
3148 """ Read from the database in order to fetch `field` (:class:`Field`
3149 instance) for `self` in cache.
3151 # fetch the records of this model without field_name in their cache
3152 records = self._in_cache_without(field)
3154 if len(records) > PREFETCH_MAX:
3155 records = records[:PREFETCH_MAX] | self
3157 # determine which fields can be prefetched
3158 if not self.env.in_draft and \
3159 self._context.get('prefetch_fields', True) and \
3160 self._columns[field.name]._prefetch:
3161 # prefetch all classic and many2one fields that the user can access
3163 for fname, fcolumn in self._columns.iteritems()
3164 if fcolumn._prefetch
3165 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3168 fnames = {field.name}
3170 # important: never prefetch fields to recompute!
3171 get_recs_todo = self.env.field_todo
3172 for fname in list(fnames):
3173 if get_recs_todo(self._fields[fname]):
3174 if fname == field.name:
3175 records -= get_recs_todo(field)
3177 fnames.discard(fname)
3179 # fetch records with read()
3180 assert self in records and field.name in fnames
3183 result = records.read(list(fnames), load='_classic_write')
3187 # check the cache, and update it if necessary
3188 if not self._cache.contains(field):
3189 for values in result:
3190 record = self.browse(values.pop('id'))
3191 record._cache.update(record._convert_to_cache(values, validate=False))
3192 if not self._cache.contains(field):
3193 e = AccessError("No value found for %s.%s" % (self, field.name))
3194 self._cache[field] = FailedValue(e)
3197 def _read_from_database(self, field_names):
3198 """ Read the given fields of the records in `self` from the database,
3199 and store them in cache. Access errors are also stored in cache.
3202 cr, user, context = env.args
3204 # FIXME: The query construction needs to be rewritten using the internal Query
3205 # object, as in search(), to avoid ambiguous column references when
3206 # reading/sorting on a table that is auto_joined to another table with
3207 # common columns (e.g. the magical columns)
3209 # Construct a clause for the security rules.
3210 # 'tables' holds the list of tables necessary for the SELECT, including
3211 # the ir.rule clauses, and contains at least self._table.
3212 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3214 # determine the fields that are stored as columns in self._table
3215 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3217 # we need fully-qualified column names in case len(tables) > 1
3219 if isinstance(self._columns.get(f), fields.binary) and \
3220 context.get('bin_size_%s' % f, context.get('bin_size')):
3221 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3222 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3224 return '%s."%s"' % (self._table, f)
3225 qual_names = map(qualify, set(fields_pre + ['id']))
3227 query = """ SELECT %(qual_names)s FROM %(tables)s
3228 WHERE %(table)s.id IN %%s AND (%(extra)s)
3231 'qual_names': ",".join(qual_names),
3232 'tables': ",".join(tables),
3233 'table': self._table,
3234 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3235 'order': self._parent_order or self._order,
3239 for sub_ids in cr.split_for_in_conditions(self.ids):
3240 cr.execute(query, [tuple(sub_ids)] + rule_params)
3241 result.extend(cr.dictfetchall())
3243 ids = [vals['id'] for vals in result]
3246 # translate the fields if necessary
3247 if context.get('lang'):
3248 ir_translation = env['ir.translation']
3249 for f in fields_pre:
3250 if self._columns[f].translate:
3251 #TODO: optimize out of this loop
3252 res_trans = ir_translation._get_ids(
3253 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3255 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3257 # apply the symbol_get functions of the fields we just read
3258 for f in fields_pre:
3259 symbol_get = self._columns[f]._symbol_get
3262 vals[f] = symbol_get(vals[f])
3264 # store result in cache for POST fields
3266 record = self.browse(vals['id'])
3267 record._cache.update(record._convert_to_cache(vals, validate=False))
3269 # determine the fields that must be processed now
3270 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3272 # Compute POST fields, grouped by multi
3273 by_multi = defaultdict(list)
3274 for f in fields_post:
3275 by_multi[self._columns[f]._multi].append(f)
3277 for multi, fs in by_multi.iteritems():
3279 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3280 assert res2 is not None, \
3281 'The function field "%s" on the "%s" model returned None\n' \
3282 '(a dictionary was expected).' % (fs[0], self._name)
3284 # TOCHECK : why got string instend of dict in python2.6
3285 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3286 multi_fields = res2.get(vals['id'], {})
3289 vals[f] = multi_fields.get(f, [])
3292 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3295 vals[f] = res2[vals['id']]
3299 # Warn about deprecated fields now that fields_pre and fields_post are computed
3300 for f in field_names:
3301 column = self._columns[f]
3302 if column.deprecated:
3303 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3305 # store result in cache
3307 record = self.browse(vals.pop('id'))
3308 record._cache.update(record._convert_to_cache(vals, validate=False))
3310 # store failed values in cache for the records that could not be read
3311 fetched = self.browse(ids)
3312 missing = self - fetched
3314 extras = fetched - self
3317 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3318 ', '.join(map(repr, missing._ids)),
3319 ', '.join(map(repr, extras._ids)),
3321 # store an access error exception in existing records
3323 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3324 (self._name, 'read')
3326 forbidden = missing.exists()
3327 forbidden._cache.update(FailedValue(exc))
3328 # store a missing error exception in non-existing records
3330 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3332 (missing - forbidden)._cache.update(FailedValue(exc))
3335 def get_metadata(self):
3337 Returns some metadata about the given records.
3339 :return: list of ownership dictionaries for each requested record
3340 :rtype: list of dictionaries with the following keys:
3343 * create_uid: user who created the record
3344 * create_date: date when the record was created
3345 * write_uid: last user who changed the record
3346 * write_date: date of the last change to the record
3347 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3350 if self._log_access:
3351 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3352 quoted_table = '"%s"' % self._table
3353 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3354 query = '''SELECT %s, __imd.module, __imd.name
3355 FROM %s LEFT JOIN ir_model_data __imd
3356 ON (__imd.model = %%s and __imd.res_id = %s.id)
3357 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3358 self._cr.execute(query, (self._name, tuple(self.ids)))
3359 res = self._cr.dictfetchall()
3361 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3362 names = dict(self.env['res.users'].browse(uids).name_get())
3366 value = r[key] = r[key] or False
3367 if key in ('write_uid', 'create_uid') and value in names:
3368 r[key] = (value, names[value])
3369 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3370 del r['name'], r['module']
3373 def _check_concurrency(self, cr, ids, context):
3376 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3378 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3379 for sub_ids in cr.split_for_in_conditions(ids):
3382 id_ref = "%s,%s" % (self._name, id)
3383 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3385 ids_to_check.extend([id, update_date])
3386 if not ids_to_check:
3388 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3391 # mention the first one only to keep the error message readable
3392 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3394 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3395 """Verify the returned rows after applying record rules matches
3396 the length of `ids`, and raise an appropriate exception if it does not.
3400 ids, result_ids = set(ids), set(result_ids)
3401 missing_ids = ids - result_ids
3403 # Attempt to distinguish record rule restriction vs deleted records,
3404 # to provide a more specific error message - check if the missinf
3405 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3406 forbidden_ids = [x[0] for x in cr.fetchall()]
3408 # the missing ids are (at least partially) hidden by access rules
3409 if uid == SUPERUSER_ID:
3411 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3412 raise except_orm(_('Access Denied'),
3413 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3414 (self._description, operation))
3416 # If we get here, the missing_ids are not in the database
3417 if operation in ('read','unlink'):
3418 # No need to warn about deleting an already deleted record.
3419 # And no error when reading a record that was deleted, to prevent spurious
3420 # errors for non-transactional search/read sequences coming from clients
3422 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3423 raise except_orm(_('Missing document(s)'),
3424 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3427 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3428 """Verifies that the operation given by ``operation`` is allowed for the user
3429 according to the access rights."""
3430 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3432 def check_access_rule(self, cr, uid, ids, operation, context=None):
3433 """Verifies that the operation given by ``operation`` is allowed for the user
3434 according to ir.rules.
3436 :param operation: one of ``write``, ``unlink``
3437 :raise except_orm: * if current ir.rules do not permit this operation.
3438 :return: None if the operation is allowed
3440 if uid == SUPERUSER_ID:
3443 if self.is_transient():
3444 # Only one single implicit access rule for transient models: owner only!
3445 # This is ok to hardcode because we assert that TransientModels always
3446 # have log_access enabled so that the create_uid column is always there.
3447 # And even with _inherits, these fields are always present in the local
3448 # table too, so no need for JOINs.
3449 cr.execute("""SELECT distinct create_uid
3451 WHERE id IN %%s""" % self._table, (tuple(ids),))
3452 uids = [x[0] for x in cr.fetchall()]
3453 if len(uids) != 1 or uids[0] != uid:
3454 raise except_orm(_('Access Denied'),
3455 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3457 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3459 where_clause = ' and ' + ' and '.join(where_clause)
3460 for sub_ids in cr.split_for_in_conditions(ids):
3461 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3462 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3463 [sub_ids] + where_params)
3464 returned_ids = [x['id'] for x in cr.dictfetchall()]
3465 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3467 def create_workflow(self, cr, uid, ids, context=None):
3468 """Create a workflow instance for each given record IDs."""
3469 from openerp import workflow
3471 workflow.trg_create(uid, self._name, res_id, cr)
3472 # self.invalidate_cache(cr, uid, context=context) ?
3475 def delete_workflow(self, cr, uid, ids, context=None):
3476 """Delete the workflow instances bound to the given record IDs."""
3477 from openerp import workflow
3479 workflow.trg_delete(uid, self._name, res_id, cr)
3480 self.invalidate_cache(cr, uid, context=context)
3483 def step_workflow(self, cr, uid, ids, context=None):
3484 """Reevaluate the workflow instances of the given record IDs."""
3485 from openerp import workflow
3487 workflow.trg_write(uid, self._name, res_id, cr)
3488 # self.invalidate_cache(cr, uid, context=context) ?
3491 def signal_workflow(self, cr, uid, ids, signal, context=None):
3492 """Send given workflow signal and return a dict mapping ids to workflow results"""
3493 from openerp import workflow
3496 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3497 # self.invalidate_cache(cr, uid, context=context) ?
3500 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3501 """ Rebind the workflow instance bound to the given 'old' record IDs to
3502 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3504 from openerp import workflow
3505 for old_id, new_id in old_new_ids:
3506 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3507 self.invalidate_cache(cr, uid, context=context)
3510 def unlink(self, cr, uid, ids, context=None):
3513 Deletes the records of the current set
3515 :raise AccessError: * if user has no unlink rights on the requested object
3516 * if user tries to bypass access rules for unlink on the requested object
3517 :raise UserError: if the record is default property for other records
3522 if isinstance(ids, (int, long)):
3525 result_store = self._store_get_values(cr, uid, ids, self._fields.keys(), context)
3527 # for recomputing new-style fields
3528 recs = self.browse(cr, uid, ids, context)
3529 recs.modified(self._fields)
3531 self._check_concurrency(cr, ids, context)
3533 self.check_access_rights(cr, uid, 'unlink')
3535 ir_property = self.pool.get('ir.property')
3537 # Check if the records are used as default properties.
3538 domain = [('res_id', '=', False),
3539 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3541 if ir_property.search(cr, uid, domain, context=context):
3542 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3544 # Delete the records' properties.
3545 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3546 ir_property.unlink(cr, uid, property_ids, context=context)
3548 self.delete_workflow(cr, uid, ids, context=context)
3550 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3551 pool_model_data = self.pool.get('ir.model.data')
3552 ir_values_obj = self.pool.get('ir.values')
3553 ir_attachment_obj = self.pool.get('ir.attachment')
3554 for sub_ids in cr.split_for_in_conditions(ids):
3555 cr.execute('delete from ' + self._table + ' ' \
3556 'where id IN %s', (sub_ids,))
3558 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3559 # as these are not connected with real database foreign keys, and would be dangling references.
3560 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3561 # to avoid possible side-effects during admin calls.
3562 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3563 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3564 # Step 2. Marching towards the real deletion of referenced records
3566 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3568 # For the same reason, removing the record relevant to ir_values
3569 ir_value_ids = ir_values_obj.search(cr, uid,
3570 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3573 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3575 # For the same reason, removing the record relevant to ir_attachment
3576 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3577 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3578 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3579 if ir_attachment_ids:
3580 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3582 # invalidate the *whole* cache, since the orm does not handle all
3583 # changes made in the database, like cascading delete!
3584 recs.invalidate_cache()
3586 for order, obj_name, store_ids, fields in result_store:
3587 if obj_name == self._name:
3588 effective_store_ids = set(store_ids) - set(ids)
3590 effective_store_ids = store_ids
3591 if effective_store_ids:
3592 obj = self.pool[obj_name]
3593 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3594 rids = map(lambda x: x[0], cr.fetchall())
3596 obj._store_set_values(cr, uid, rids, fields, context)
3598 # recompute new-style fields
3607 def write(self, vals):
3610 Updates all records in the current set with the provided values.
3612 :param dict vals: fields to update and the value to set on them e.g::
3614 {'foo': 1, 'bar': "Qux"}
3616 will set the field ``foo`` to ``1`` and the field ``bar`` to
3617 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3619 :raise AccessError: * if user has no write rights on the requested object
3620 * if user tries to bypass access rules for write on the requested object
3621 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3622 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3624 * For numeric fields (:class:`~openerp.fields.Integer`,
3625 :class:`~openerp.fields.Float`) the value should be of the
3627 * For :class:`~openerp.fields.Boolean`, the value should be a
3628 :class:`python:bool`
3629 * For :class:`~openerp.fields.Selection`, the value should match the
3630 selection values (generally :class:`python:str`, sometimes
3631 :class:`python:int`)
3632 * For :class:`~openerp.fields.Many2one`, the value should be the
3633 database identifier of the record to set
3634 * Other non-relational fields use a string for value
3638 for historical and compatibility reasons,
3639 :class:`~openerp.fields.Date` and
3640 :class:`~openerp.fields.Datetime` fields use strings as values
3641 (written and read) rather than :class:`~python:datetime.date` or
3642 :class:`~python:datetime.datetime`. These date strings are
3643 UTC-only and formatted according to
3644 :const:`openerp.tools.misc.DEFAULT_SERVER_DATE_FORMAT` and
3645 :const:`openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT`
3646 * .. _openerp/models/relationals/format:
3648 :class:`~openerp.fields.One2many` and
3649 :class:`~openerp.fields.Many2many` use a special "commands" format to
3650 manipulate the set of records stored in/associated with the field.
3652 This format is a list of triplets executed sequentially, where each
3653 triplet is a command to execute on the set of records. Not all
3654 commands apply in all situations. Possible commands are:
3657 adds a new record created from the provided ``value`` dict.
3659 updates an existing record of id ``id`` with the values in
3660 ``values``. Can not be used in :meth:`~.create`.
3662 removes the record of id ``id`` from the set, then deletes it
3663 (from the database). Can not be used in :meth:`~.create`.
3665 removes the record of id ``id`` from the set, but does not
3666 delete it. Can not be used on
3667 :class:`~openerp.fields.One2many`. Can not be used in
3670 adds an existing record of id ``id`` to the set. Can not be
3671 used on :class:`~openerp.fields.One2many`.
3673 removes all records from the set, equivalent to using the
3674 command ``3`` on every record explicitly. Can not be used on
3675 :class:`~openerp.fields.One2many`. Can not be used in
3678 replaces all existing records in the set by the ``ids`` list,
3679 equivalent to using the command ``5`` followed by a command
3680 ``4`` for each ``id`` in ``ids``. Can not be used on
3681 :class:`~openerp.fields.One2many`.
3683 .. note:: Values marked as ``_`` in the list above are ignored and
3684 can be anything, generally ``0`` or ``False``.
3689 self._check_concurrency(self._ids)
3690 self.check_access_rights('write')
3692 # No user-driven update of these columns
3693 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3694 vals.pop(field, None)
3696 # split up fields into old-style and pure new-style ones
3697 old_vals, new_vals, unknown = {}, {}, []
3698 for key, val in vals.iteritems():
3699 field = self._fields.get(key)
3701 if field.column or field.inherited:
3703 if field.inverse and not field.inherited:
3709 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3711 # write old-style fields with (low-level) method _write
3713 self._write(old_vals)
3715 # put the values of pure new-style fields into cache, and inverse them
3718 record._cache.update(record._convert_to_cache(new_vals, update=True))
3719 for key in new_vals:
3720 self._fields[key].determine_inverse(self)
3724 def _write(self, cr, user, ids, vals, context=None):
3725 # low-level implementation of write()
3730 self.check_field_access_rights(cr, user, 'write', vals.keys())
3731 deleted_related = defaultdict(list)
3732 for field in vals.keys():
3734 if field in self._columns:
3735 fobj = self._columns[field]
3736 elif field in self._inherit_fields:
3737 fobj = self._inherit_fields[field][2]
3740 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3741 for wtuple in vals[field]:
3742 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3743 deleted_related[fobj._obj].append(wtuple[1])
3748 for group in groups:
3749 module = group.split(".")[0]
3750 grp = group.split(".")[1]
3751 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3752 (grp, module, 'res.groups', user))
3753 readonly = cr.fetchall()
3754 if readonly[0][0] >= 1:
3761 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3763 # for recomputing new-style fields
3764 recs = self.browse(cr, user, ids, context)
3765 modified_fields = list(vals)
3766 if self._log_access:
3767 modified_fields += ['write_date', 'write_uid']
3768 recs.modified(modified_fields)
3770 parents_changed = []
3771 parent_order = self._parent_order or self._order
3772 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3773 # The parent_left/right computation may take up to
3774 # 5 seconds. No need to recompute the values if the
3775 # parent is the same.
3776 # Note: to respect parent_order, nodes must be processed in
3777 # order, so ``parents_changed`` must be ordered properly.
3778 parent_val = vals[self._parent_name]
3780 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3781 (self._table, self._parent_name, self._parent_name, parent_order)
3782 cr.execute(query, (tuple(ids), parent_val))
3784 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3785 (self._table, self._parent_name, parent_order)
3786 cr.execute(query, (tuple(ids),))
3787 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3789 updates = [] # list of (column, expr) or (column, pattern, value)
3793 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3795 ffield = self._fields.get(field)
3796 if ffield and ffield.deprecated:
3797 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, ffield.deprecated)
3798 if field in self._columns:
3799 column = self._columns[field]
3800 if hasattr(column, 'selection') and vals[field]:
3801 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3802 if column._classic_write and not hasattr(column, '_fnct_inv'):
3803 if (not totranslate) or not column.translate:
3804 updates.append((field, '%s', column._symbol_set[1](vals[field])))
3805 direct.append(field)
3807 upd_todo.append(field)
3809 updend.append(field)
3811 if self._log_access:
3812 updates.append(('write_uid', '%s', user))
3813 updates.append(('write_date', "(now() at time zone 'UTC')"))
3814 direct.append('write_uid')
3815 direct.append('write_date')
3818 self.check_access_rule(cr, user, ids, 'write', context=context)
3819 query = 'UPDATE "%s" SET %s WHERE id IN %%s' % (
3820 self._table, ','.join('"%s"=%s' % u[:2] for u in updates),
3822 params = tuple(u[2] for u in updates if len(u) > 2)
3823 for sub_ids in cr.split_for_in_conditions(ids):
3824 cr.execute(query, params + (sub_ids,))
3825 if cr.rowcount != len(sub_ids):
3826 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3831 if self._columns[f].translate:
3832 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3835 # Inserting value to DB
3836 context_wo_lang = dict(context, lang=None)
3837 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3838 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3840 # invalidate and mark new-style fields to recompute; do this before
3841 # setting other fields, because it can require the value of computed
3842 # fields, e.g., a one2many checking constraints on records
3843 recs.modified(direct)
3845 # call the 'set' method of fields which are not classic_write
3846 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3848 # default element in context must be removed when call a one2many or many2many
3849 rel_context = context.copy()
3850 for c in context.items():
3851 if c[0].startswith('default_'):
3852 del rel_context[c[0]]
3854 for field in upd_todo:
3856 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3858 # for recomputing new-style fields
3859 recs.modified(upd_todo)
3861 unknown_fields = updend[:]
3862 for table in self._inherits:
3863 col = self._inherits[table]
3865 for sub_ids in cr.split_for_in_conditions(ids):
3866 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3867 'where id IN %s', (sub_ids,))
3868 nids.extend([x[0] for x in cr.fetchall()])
3872 if self._inherit_fields[val][0] == table:
3874 unknown_fields.remove(val)
3876 self.pool[table].write(cr, user, nids, v, context)
3880 'No such field(s) in model %s: %s.',
3881 self._name, ', '.join(unknown_fields))
3883 # check Python constraints
3884 recs._validate_fields(vals)
3886 # TODO: use _order to set dest at the right position and not first node of parent
3887 # We can't defer parent_store computation because the stored function
3888 # fields that are computer may refer (directly or indirectly) to
3889 # parent_left/right (via a child_of domain)
3892 self.pool._init_parent[self._name] = True
3894 order = self._parent_order or self._order
3895 parent_val = vals[self._parent_name]
3897 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3899 clause, params = '%s IS NULL' % (self._parent_name,), ()
3901 for id in parents_changed:
3902 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3903 pleft, pright = cr.fetchone()
3904 distance = pright - pleft + 1
3906 # Positions of current siblings, to locate proper insertion point;
3907 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3908 # after each update, in case several nodes are sequentially inserted one
3909 # next to the other (i.e computed incrementally)
3910 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3911 parents = cr.fetchall()
3913 # Find Position of the element
3915 for (parent_pright, parent_id) in parents:
3918 position = parent_pright and parent_pright + 1 or 1
3920 # It's the first node of the parent
3925 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3926 position = cr.fetchone()[0] + 1
3928 if pleft < position <= pright:
3929 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3931 if pleft < position:
3932 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3933 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3934 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3936 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3937 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3938 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3939 recs.invalidate_cache(['parent_left', 'parent_right'])
3941 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3945 for order, model_name, ids_to_update, fields_to_recompute in result:
3946 key = (model_name, tuple(fields_to_recompute))
3947 done.setdefault(key, {})
3948 # avoid to do several times the same computation
3950 for id in ids_to_update:
3951 if id not in done[key]:
3952 done[key][id] = True
3953 if id not in deleted_related[model_name]:
3955 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3957 # recompute new-style fields
3958 if context.get('recompute', True):
3961 self.step_workflow(cr, user, ids, context=context)
3965 # TODO: Should set perm to user.xxx
3968 @api.returns('self', lambda value: value.id)
3969 def create(self, vals):
3970 """ create(vals) -> record
3972 Creates a new record for the model.
3974 The new record is initialized using the values from ``vals`` and
3975 if necessary those from :meth:`~.default_get`.
3978 values for the model's fields, as a dictionary::
3980 {'field_name': field_value, ...}
3982 see :meth:`~.write` for details
3983 :return: new record created
3984 :raise AccessError: * if user has no create rights on the requested object
3985 * if user tries to bypass access rules for create on the requested object
3986 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3987 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3989 self.check_access_rights('create')
3991 # add missing defaults, and drop fields that may not be set by user
3992 vals = self._add_missing_default_values(vals)
3993 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3994 vals.pop(field, None)
3996 # split up fields into old-style and pure new-style ones
3997 old_vals, new_vals, unknown = {}, {}, []
3998 for key, val in vals.iteritems():
3999 field = self._fields.get(key)
4001 if field.column or field.inherited:
4003 if field.inverse and not field.inherited:
4009 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
4011 # create record with old-style fields
4012 record = self.browse(self._create(old_vals))
4014 # put the values of pure new-style fields into cache, and inverse them
4015 record._cache.update(record._convert_to_cache(new_vals))
4016 for key in new_vals:
4017 self._fields[key].determine_inverse(record)
4021 def _create(self, cr, user, vals, context=None):
4022 # low-level implementation of create()
4026 if self.is_transient():
4027 self._transient_vacuum(cr, user)
4030 for v in self._inherits:
4031 if self._inherits[v] not in vals:
4034 tocreate[v] = {'id': vals[self._inherits[v]]}
4037 # list of column assignments defined as tuples like:
4038 # (column_name, format_string, column_value)
4039 # (column_name, sql_formula)
4040 # Those tuples will be used by the string formatting for the INSERT
4042 ('id', "nextval('%s')" % self._sequence),
4047 for v in vals.keys():
4048 if v in self._inherit_fields and v not in self._columns:
4049 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4050 tocreate[table][v] = vals[v]
4053 if (v not in self._inherit_fields) and (v not in self._columns):
4055 unknown_fields.append(v)
4058 'No such field(s) in model %s: %s.',
4059 self._name, ', '.join(unknown_fields))
4061 for table in tocreate:
4062 if self._inherits[table] in vals:
4063 del vals[self._inherits[table]]
4065 record_id = tocreate[table].pop('id', None)
4067 if record_id is None or not record_id:
4068 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4070 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4072 updates.append((self._inherits[table], '%s', record_id))
4074 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4075 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4077 for bool_field in bool_fields:
4078 if bool_field not in vals:
4079 vals[bool_field] = False
4081 for field in vals.keys():
4083 if field in self._columns:
4084 fobj = self._columns[field]
4086 fobj = self._inherit_fields[field][2]
4092 for group in groups:
4093 module = group.split(".")[0]
4094 grp = group.split(".")[1]
4095 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4096 (grp, module, 'res.groups', user))
4097 readonly = cr.fetchall()
4098 if readonly[0][0] >= 1:
4101 elif readonly[0][0] == 0:
4109 current_field = self._columns[field]
4110 if current_field._classic_write:
4111 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4113 #for the function fields that receive a value, we set them directly in the database
4114 #(they may be required), but we also need to trigger the _fct_inv()
4115 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4116 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4117 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4118 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4119 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4120 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4121 #after the release but, definitively, the behavior shouldn't be different for related and function
4123 upd_todo.append(field)
4125 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4126 #related. See the above TODO comment for further explanations.
4127 if not isinstance(current_field, fields.related):
4128 upd_todo.append(field)
4129 if field in self._columns \
4130 and hasattr(current_field, 'selection') \
4132 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4133 if self._log_access:
4134 updates.append(('create_uid', '%s', user))
4135 updates.append(('write_uid', '%s', user))
4136 updates.append(('create_date', "(now() at time zone 'UTC')"))
4137 updates.append(('write_date', "(now() at time zone 'UTC')"))
4139 # the list of tuples used in this formatting corresponds to
4140 # tuple(field_name, format, value)
4141 # In some case, for example (id, create_date, write_date) we does not
4142 # need to read the third value of the tuple, because the real value is
4143 # encoded in the second value (the format).
4145 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4147 ', '.join('"%s"' % u[0] for u in updates),
4148 ', '.join(u[1] for u in updates)
4150 tuple([u[2] for u in updates if len(u) > 2])
4153 id_new, = cr.fetchone()
4154 recs = self.browse(cr, user, id_new, context)
4156 if self._parent_store and not context.get('defer_parent_store_computation'):
4158 self.pool._init_parent[self._name] = True
4160 parent = vals.get(self._parent_name, False)
4162 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4164 result_p = cr.fetchall()
4165 for (pleft,) in result_p:
4170 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4171 pleft_old = cr.fetchone()[0]
4174 cr.execute('select max(parent_right) from '+self._table)
4175 pleft = cr.fetchone()[0] or 0
4176 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4177 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4178 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4179 recs.invalidate_cache(['parent_left', 'parent_right'])
4181 # invalidate and mark new-style fields to recompute; do this before
4182 # setting other fields, because it can require the value of computed
4183 # fields, e.g., a one2many checking constraints on records
4184 recs.modified([u[0] for u in updates])
4186 # call the 'set' method of fields which are not classic_write
4187 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4189 # default element in context must be remove when call a one2many or many2many
4190 rel_context = context.copy()
4191 for c in context.items():
4192 if c[0].startswith('default_'):
4193 del rel_context[c[0]]
4196 for field in upd_todo:
4197 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4199 # for recomputing new-style fields
4200 recs.modified(upd_todo)
4202 # check Python constraints
4203 recs._validate_fields(vals)
4205 if context.get('recompute', True):
4206 result += self._store_get_values(cr, user, [id_new],
4207 list(set(vals.keys() + self._inherits.values())),
4211 for order, model_name, ids, fields2 in result:
4212 if not (model_name, ids, fields2) in done:
4213 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4214 done.append((model_name, ids, fields2))
4215 # recompute new-style fields
4218 if self._log_create and context.get('recompute', True):
4219 message = self._description + \
4221 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4222 "' " + _("created.")
4223 self.log(cr, user, id_new, message, True, context=context)
4225 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4226 self.create_workflow(cr, user, [id_new], context=context)
4229 def _store_get_values(self, cr, uid, ids, fields, context):
4230 """Returns an ordered list of fields.function to call due to
4231 an update operation on ``fields`` of records with ``ids``,
4232 obtained by calling the 'store' triggers of these fields,
4233 as setup by their 'store' attribute.
4235 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4237 if fields is None: fields = []
4238 stored_functions = self.pool._store_function.get(self._name, [])
4240 # use indexed names for the details of the stored_functions:
4241 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4243 # only keep store triggers that should be triggered for the ``fields``
4245 triggers_to_compute = (
4246 f for f in stored_functions
4247 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4251 target_id_results = {}
4252 for store_trigger in triggers_to_compute:
4253 target_func_id_ = id(store_trigger[target_ids_func_])
4254 if target_func_id_ not in target_id_results:
4255 # use admin user for accessing objects having rules defined on store fields
4256 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4257 target_ids = target_id_results[target_func_id_]
4259 # the compound key must consider the priority and model name
4260 key = (store_trigger[priority_], store_trigger[model_name_])
4261 for target_id in target_ids:
4262 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4264 # Here to_compute_map looks like:
4265 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4266 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4267 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4270 # Now we need to generate the batch function calls list
4272 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4274 for ((priority,model), id_map) in to_compute_map.iteritems():
4275 trigger_ids_maps = {}
4276 # function_ids_maps =
4277 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4278 for target_id, triggers in id_map.iteritems():
4279 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4280 for triggers, target_ids in trigger_ids_maps.iteritems():
4281 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4282 [t[func_field_to_compute_] for t in triggers]))
4285 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4288 def _store_set_values(self, cr, uid, ids, fields, context):
4289 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4290 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4295 if self._log_access:
4296 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4300 field_dict.setdefault(r[0], [])
4301 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4302 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4303 for i in self.pool._store_function.get(self._name, []):
4305 up_write_date = write_date + datetime.timedelta(hours=i[5])
4306 if datetime.datetime.now() < up_write_date:
4308 field_dict[r[0]].append(i[1])
4314 if self._columns[f]._multi not in keys:
4315 keys.append(self._columns[f]._multi)
4316 todo.setdefault(self._columns[f]._multi, [])
4317 todo[self._columns[f]._multi].append(f)
4321 # use admin user for accessing objects having rules defined on store fields
4322 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4323 for id, value in result.items():
4325 for f in value.keys():
4326 if f in field_dict[id]:
4328 updates = [] # list of (column, pattern, value)
4332 column = self._columns[v]
4333 if column._type == 'many2one':
4335 value[v] = value[v][0]
4338 updates.append((v, '%s', column._symbol_set[1](value[v])))
4340 query = 'UPDATE "%s" SET %s WHERE id = %%s' % (
4341 self._table, ','.join('"%s"=%s' % u[:2] for u in updates),
4343 params = tuple(u[2] for u in updates)
4344 cr.execute(query, params + (id,))
4348 column = self._columns[f]
4349 # use admin user for accessing objects having rules defined on store fields
4350 result = column.get(cr, self, ids, f, SUPERUSER_ID, context=context)
4351 for r in result.keys():
4353 if r in field_dict.keys():
4354 if f in field_dict[r]:
4356 for id, value in result.items():
4357 if column._type == 'many2one':
4362 query = 'UPDATE "%s" SET "%s"=%%s WHERE id = %%s' % (
4365 cr.execute(query, (column._symbol_set[1](value), id))
4367 # invalidate and mark new-style fields to recompute
4368 self.browse(cr, uid, ids, context).modified(fields)
4372 # TODO: ameliorer avec NULL
4373 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4374 """Computes the WHERE clause needed to implement an OpenERP domain.
4375 :param domain: the domain to compute
4377 :param active_test: whether the default filtering of records with ``active``
4378 field set to ``False`` should be applied.
4379 :return: the query expressing the given domain as provided in domain
4380 :rtype: osv.query.Query
4385 # if the object has a field named 'active', filter out all inactive
4386 # records unless they were explicitely asked for
4387 if 'active' in self._fields and active_test and context.get('active_test', True):
4389 # the item[0] trick below works for domain items and '&'/'|'/'!'
4391 if not any(item[0] == 'active' for item in domain):
4392 domain.insert(0, ('active', '=', 1))
4394 domain = [('active', '=', 1)]
4397 e = expression.expression(cr, user, domain, self, context)
4398 tables = e.get_tables()
4399 where_clause, where_params = e.to_sql()
4400 where_clause = where_clause and [where_clause] or []
4402 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4404 return Query(tables, where_clause, where_params)
4406 def _check_qorder(self, word):
4407 if not regex_order.match(word):
4408 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4411 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4412 """Add what's missing in ``query`` to implement all appropriate ir.rules
4413 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4415 :param query: the current query object
4417 if uid == SUPERUSER_ID:
4420 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4421 """ :param parent_model: name of the parent model, if the added
4422 clause comes from a parent model
4426 # as inherited rules are being applied, we need to add the missing JOIN
4427 # to reach the parent table (if it was not JOINed yet in the query)
4428 parent_alias = self._inherits_join_add(self, parent_model, query)
4429 # inherited rules are applied on the external table -> need to get the alias and replace
4430 parent_table = self.pool[parent_model]._table
4431 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4432 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4434 for table in added_tables:
4435 # table is just a table name -> switch to the full alias
4436 if table == '"%s"' % parent_table:
4437 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4438 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4440 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4441 added_tables = new_tables
4442 query.where_clause += added_clause
4443 query.where_clause_params += added_params
4444 for table in added_tables:
4445 if table not in query.tables:
4446 query.tables.append(table)
4450 # apply main rules on the object
4451 rule_obj = self.pool.get('ir.rule')
4452 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4453 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4455 # apply ir.rules from the parents (through _inherits)
4456 for inherited_model in self._inherits:
4457 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4458 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4459 parent_model=inherited_model)
4461 def _generate_m2o_order_by(self, order_field, query):
4463 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4464 either native m2o fields or function/related fields that are stored, including
4465 intermediate JOINs for inheritance if required.
4467 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4469 if order_field not in self._columns and order_field in self._inherit_fields:
4470 # also add missing joins for reaching the table containing the m2o field
4471 qualified_field = self._inherits_join_calc(order_field, query)
4472 order_field_column = self._inherit_fields[order_field][2]
4474 qualified_field = '"%s"."%s"' % (self._table, order_field)
4475 order_field_column = self._columns[order_field]
4477 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4478 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4479 _logger.debug("Many2one function/related fields must be stored " \
4480 "to be used as ordering fields! Ignoring sorting for %s.%s",
4481 self._name, order_field)
4484 # figure out the applicable order_by for the m2o
4485 dest_model = self.pool[order_field_column._obj]
4486 m2o_order = dest_model._order
4487 if not regex_order.match(m2o_order):
4488 # _order is complex, can't use it here, so we default to _rec_name
4489 m2o_order = dest_model._rec_name
4491 # extract the field names, to be able to qualify them and add desc/asc
4493 for order_part in m2o_order.split(","):
4494 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4495 m2o_order = m2o_order_list
4497 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4498 # as we don't want to exclude results that have NULL values for the m2o
4499 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4500 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4501 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4502 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4504 def _generate_order_by(self, order_spec, query):
4506 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4507 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4509 :raise" except_orm in case order_spec is malformed
4511 order_by_clause = ''
4512 order_spec = order_spec or self._order
4514 order_by_elements = []
4515 self._check_qorder(order_spec)
4516 for order_part in order_spec.split(','):
4517 order_split = order_part.strip().split(' ')
4518 order_field = order_split[0].strip()
4519 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4522 if order_field == 'id':
4523 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4524 elif order_field in self._columns:
4525 order_column = self._columns[order_field]
4526 if order_column._classic_read:
4527 inner_clause = '"%s"."%s"' % (self._table, order_field)
4528 elif order_column._type == 'many2one':
4529 inner_clause = self._generate_m2o_order_by(order_field, query)
4531 continue # ignore non-readable or "non-joinable" fields
4532 elif order_field in self._inherit_fields:
4533 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4534 order_column = parent_obj._columns[order_field]
4535 if order_column._classic_read:
4536 inner_clause = self._inherits_join_calc(order_field, query)
4537 elif order_column._type == 'many2one':
4538 inner_clause = self._generate_m2o_order_by(order_field, query)
4540 continue # ignore non-readable or "non-joinable" fields
4542 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4543 if order_column and order_column._type == 'boolean':
4544 inner_clause = "COALESCE(%s, false)" % inner_clause
4546 if isinstance(inner_clause, list):
4547 for clause in inner_clause:
4548 order_by_elements.append("%s %s" % (clause, order_direction))
4550 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4551 if order_by_elements:
4552 order_by_clause = ",".join(order_by_elements)
4554 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4556 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4558 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4559 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4560 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4561 This is ok at the security level because this method is private and not callable through XML-RPC.
4563 :param access_rights_uid: optional user ID to use when checking access rights
4564 (not for ir.rules, this is only for ir.model.access)
4568 self.check_access_rights(cr, access_rights_uid or user, 'read')
4570 # For transient models, restrict acces to the current user, except for the super-user
4571 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4572 args = expression.AND(([('create_uid', '=', user)], args or []))
4574 query = self._where_calc(cr, user, args, context=context)
4575 self._apply_ir_rules(cr, user, query, 'read', context=context)
4576 order_by = self._generate_order_by(order, query)
4577 from_clause, where_clause, where_clause_params = query.get_sql()
4579 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4582 # Ignore order, limit and offset when just counting, they don't make sense and could
4584 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4585 cr.execute(query_str, where_clause_params)
4589 limit_str = limit and ' limit %d' % limit or ''
4590 offset_str = offset and ' offset %d' % offset or ''
4591 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4592 cr.execute(query_str, where_clause_params)
4595 # TDE note: with auto_join, we could have several lines about the same result
4596 # i.e. a lead with several unread messages; we uniquify the result using
4597 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4598 def _uniquify_list(seq):
4600 return [x for x in seq if x not in seen and not seen.add(x)]
4602 return _uniquify_list([x[0] for x in res])
4604 # returns the different values ever entered for one field
4605 # this is used, for example, in the client when the user hits enter on
4607 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4610 if field in self._inherit_fields:
4611 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4613 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4615 def copy_data(self, cr, uid, id, default=None, context=None):
4617 Copy given record's data with all its fields values
4619 :param cr: database cursor
4620 :param uid: current user id
4621 :param id: id of the record to copy
4622 :param default: field values to override in the original values of the copied record
4623 :type default: dictionary
4624 :param context: context arguments, like lang, time zone
4625 :type context: dictionary
4626 :return: dictionary containing all the field values
4632 # avoid recursion through already copied records in case of circular relationship
4633 seen_map = context.setdefault('__copy_data_seen', {})
4634 if id in seen_map.setdefault(self._name, []):
4636 seen_map[self._name].append(id)
4640 if 'state' not in default:
4641 if 'state' in self._defaults:
4642 if callable(self._defaults['state']):
4643 default['state'] = self._defaults['state'](self, cr, uid, context)
4645 default['state'] = self._defaults['state']
4647 # build a black list of fields that should not be copied
4648 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4649 whitelist = set(name for name, field in self._fields.iteritems() if not field.inherited)
4651 def blacklist_given_fields(obj):
4652 # blacklist the fields that are given by inheritance
4653 for other, field_to_other in obj._inherits.items():
4654 blacklist.add(field_to_other)
4655 if field_to_other in default:
4656 # all the fields of 'other' are given by the record: default[field_to_other],
4657 # except the ones redefined in self
4658 blacklist.update(set(self.pool[other]._fields) - whitelist)
4660 blacklist_given_fields(self.pool[other])
4661 # blacklist deprecated fields
4662 for name, field in obj._fields.iteritems():
4663 if field.deprecated:
4666 blacklist_given_fields(self)
4669 fields_to_copy = dict((f,fi) for f, fi in self._fields.iteritems()
4672 if f not in blacklist)
4674 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4678 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4681 for f, field in fields_to_copy.iteritems():
4682 if field.type == 'many2one':
4683 res[f] = data[f] and data[f][0]
4684 elif field.type == 'one2many':
4685 other = self.pool[field.comodel_name]
4686 # duplicate following the order of the ids because we'll rely on
4687 # it later for copying translations in copy_translation()!
4688 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4689 # the lines are duplicated using the wrong (old) parent, but then
4690 # are reassigned to the correct one thanks to the (0, 0, ...)
4691 res[f] = [(0, 0, line) for line in lines if line]
4692 elif field.type == 'many2many':
4693 res[f] = [(6, 0, data[f])]
4699 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4703 # avoid recursion through already copied records in case of circular relationship
4704 seen_map = context.setdefault('__copy_translations_seen',{})
4705 if old_id in seen_map.setdefault(self._name,[]):
4707 seen_map[self._name].append(old_id)
4709 trans_obj = self.pool.get('ir.translation')
4711 for field_name, field in self._fields.iteritems():
4714 # removing the lang to compare untranslated values
4715 context_wo_lang = dict(context, lang=None)
4716 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4717 # we must recursively copy the translations for o2o and o2m
4718 if field.type == 'one2many':
4719 target_obj = self.pool[field.comodel_name]
4720 # here we rely on the order of the ids to match the translations
4721 # as foreseen in copy_data()
4722 old_children = sorted(r.id for r in old_record[field_name])
4723 new_children = sorted(r.id for r in new_record[field_name])
4724 for (old_child, new_child) in zip(old_children, new_children):
4725 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4726 # and for translatable fields we keep them for copy
4727 elif getattr(field, 'translate', False):
4728 if field_name in self._columns:
4729 trans_name = self._name + "," + field_name
4732 elif field_name in self._inherit_fields:
4733 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4734 # get the id of the parent record to set the translation
4735 inherit_field_name = self._inherit_fields[field_name][1]
4736 target_id = new_record[inherit_field_name].id
4737 source_id = old_record[inherit_field_name].id
4741 trans_ids = trans_obj.search(cr, uid, [
4742 ('name', '=', trans_name),
4743 ('res_id', '=', source_id)
4745 user_lang = context.get('lang')
4746 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4748 # remove source to avoid triggering _set_src
4749 del record['source']
4750 record.update({'res_id': target_id})
4751 if user_lang and user_lang == record['lang']:
4752 # 'source' to force the call to _set_src
4753 # 'value' needed if value is changed in copy(), want to see the new_value
4754 record['source'] = old_record[field_name]
4755 record['value'] = new_record[field_name]
4756 trans_obj.create(cr, uid, record, context=context)
4758 @api.returns('self', lambda value: value.id)
4759 def copy(self, cr, uid, id, default=None, context=None):
4760 """ copy(default=None)
4762 Duplicate record with given id updating it with default values
4764 :param dict default: dictionary of field values to override in the
4765 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4766 :returns: new record
4771 context = context.copy()
4772 data = self.copy_data(cr, uid, id, default, context)
4773 new_id = self.create(cr, uid, data, context)
4774 self.copy_translations(cr, uid, id, new_id, context)
4778 @api.returns('self')
4780 """ exists() -> records
4782 Returns the subset of records in `self` that exist, and marks deleted
4783 records as such in cache. It can be used as a test on records::
4788 By convention, new records are returned as existing.
4790 ids, new_ids = [], []
4792 (ids if isinstance(i, (int, long)) else new_ids).append(i)
4795 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4796 self._cr.execute(query, [tuple(ids)])
4797 ids = [r[0] for r in self._cr.fetchall()]
4798 existing = self.browse(ids + new_ids)
4799 if len(existing) < len(self):
4800 # mark missing records in cache with a failed value
4801 exc = MissingError(_("Record does not exist or has been deleted."))
4802 (self - existing)._cache.update(FailedValue(exc))
4805 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4806 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4808 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4809 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4810 return self._check_recursion(cr, uid, ids, context, parent)
4812 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4814 Verifies that there is no loop in a hierarchical structure of records,
4815 by following the parent relationship using the **parent** field until a loop
4816 is detected or until a top-level record is found.
4818 :param cr: database cursor
4819 :param uid: current user id
4820 :param ids: list of ids of records to check
4821 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4822 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4825 parent = self._parent_name
4827 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4828 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4831 while current_id is not None:
4832 cr.execute(query, (current_id,))
4833 result = cr.fetchone()
4834 current_id = result[0] if result else None
4835 if current_id == id:
4839 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4841 Verifies that there is no loop in a hierarchical structure of records,
4842 by following the parent relationship using the **parent** field until a loop
4843 is detected or until a top-level record is found.
4845 :param cr: database cursor
4846 :param uid: current user id
4847 :param ids: list of ids of records to check
4848 :param field_name: field to check
4849 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4852 field = self._fields.get(field_name)
4853 if not (field and field.type == 'many2many' and
4854 field.comodel_name == self._name and field.store):
4855 # field must be a many2many on itself
4856 raise ValueError('invalid field_name: %r' % (field_name,))
4858 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % \
4859 (field.column2, field.relation, field.column1)
4863 for i in range(0, len(ids_parent), cr.IN_MAX):
4865 sub_ids_parent = ids_parent[i:j]
4866 cr.execute(query, (tuple(sub_ids_parent),))
4867 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4868 ids_parent = ids_parent2
4869 for i in ids_parent:
4874 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4875 """Retrieve the External ID(s) of any database record.
4877 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4879 :return: map of ids to the list of their fully qualified External IDs
4880 in the form ``module.key``, or an empty list when there's no External
4881 ID for a record, e.g.::
4883 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4886 ir_model_data = self.pool.get('ir.model.data')
4887 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4888 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4891 # can't use dict.fromkeys() as the list would be shared!
4893 for record in data_results:
4894 result[record['res_id']].append('%(module)s.%(name)s' % record)
4897 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4898 """Retrieve the External ID of any database record, if there
4899 is one. This method works as a possible implementation
4900 for a function field, to be able to add it to any
4901 model object easily, referencing it as ``Model.get_external_id``.
4903 When multiple External IDs exist for a record, only one
4904 of them is returned (randomly).
4906 :return: map of ids to their fully qualified XML ID,
4907 defaulting to an empty string when there's none
4908 (to be usable as a function field),
4911 { 'id': 'module.ext_id',
4914 results = self._get_xml_ids(cr, uid, ids)
4915 for k, v in results.iteritems():
4922 # backwards compatibility
4923 get_xml_id = get_external_id
4924 _get_xml_ids = _get_external_ids
4926 def print_report(self, cr, uid, ids, name, data, context=None):
4928 Render the report `name` for the given IDs. The report must be defined
4929 for this model, not another.
4931 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4932 assert self._name == report.table
4933 return report.create(cr, uid, ids, data, context)
4937 def is_transient(cls):
4938 """ Return whether the model is transient.
4940 See :class:`TransientModel`.
4943 return cls._transient
4945 def _transient_clean_rows_older_than(self, cr, seconds):
4946 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4947 # Never delete rows used in last 5 minutes
4948 seconds = max(seconds, 300)
4949 query = ("SELECT id FROM " + self._table + " WHERE"
4950 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4951 " < ((now() at time zone 'UTC') - interval %s)")
4952 cr.execute(query, ("%s seconds" % seconds,))
4953 ids = [x[0] for x in cr.fetchall()]
4954 self.unlink(cr, SUPERUSER_ID, ids)
4956 def _transient_clean_old_rows(self, cr, max_count):
4957 # Check how many rows we have in the table
4958 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4960 if res[0][0] <= max_count:
4961 return # max not reached, nothing to do
4962 self._transient_clean_rows_older_than(cr, 300)
4964 def _transient_vacuum(self, cr, uid, force=False):
4965 """Clean the transient records.
4967 This unlinks old records from the transient model tables whenever the
4968 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4969 Actual cleaning will happen only once every "_transient_check_time" calls.
4970 This means this method can be called frequently called (e.g. whenever
4971 a new record is created).
4972 Example with both max_hours and max_count active:
4973 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4974 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4975 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4976 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4977 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4978 would immediately cause the maximum to be reached again.
4979 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4981 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4982 _transient_check_time = 20 # arbitrary limit on vacuum executions
4983 self._transient_check_count += 1
4984 if not force and (self._transient_check_count < _transient_check_time):
4985 return True # no vacuum cleaning this time
4986 self._transient_check_count = 0
4988 # Age-based expiration
4989 if self._transient_max_hours:
4990 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4992 # Count-based expiration
4993 if self._transient_max_count:
4994 self._transient_clean_old_rows(cr, self._transient_max_count)
4998 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4999 """ Serializes one2many and many2many commands into record dictionaries
5000 (as if all the records came from the database via a read()). This
5001 method is aimed at onchange methods on one2many and many2many fields.
5003 Because commands might be creation commands, not all record dicts
5004 will contain an ``id`` field. Commands matching an existing record
5005 will have an ``id``.
5007 :param field_name: name of the one2many or many2many field matching the commands
5008 :type field_name: str
5009 :param commands: one2many or many2many commands to execute on ``field_name``
5010 :type commands: list((int|False, int|False, dict|False))
5011 :param fields: list of fields to read from the database, when applicable
5012 :type fields: list(str)
5013 :returns: records in a shape similar to that returned by ``read()``
5014 (except records may be missing the ``id`` field if they don't exist in db)
5017 result = [] # result (list of dict)
5018 record_ids = [] # ids of records to read
5019 updates = {} # {id: dict} of updates on particular records
5021 for command in commands or []:
5022 if not isinstance(command, (list, tuple)):
5023 record_ids.append(command)
5024 elif command[0] == 0:
5025 result.append(command[2])
5026 elif command[0] == 1:
5027 record_ids.append(command[1])
5028 updates.setdefault(command[1], {}).update(command[2])
5029 elif command[0] in (2, 3):
5030 record_ids = [id for id in record_ids if id != command[1]]
5031 elif command[0] == 4:
5032 record_ids.append(command[1])
5033 elif command[0] == 5:
5034 result, record_ids = [], []
5035 elif command[0] == 6:
5036 result, record_ids = [], list(command[2])
5038 # read the records and apply the updates
5039 other_model = self.pool[self._fields[field_name].comodel_name]
5040 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
5041 record.update(updates.get(record['id'], {}))
5042 result.append(record)
5046 # for backward compatibility
5047 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5049 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5051 Performs a ``search()`` followed by a ``read()``.
5053 :param cr: database cursor
5054 :param user: current user id
5055 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5056 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5057 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5058 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5059 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5060 :param context: context arguments.
5061 :return: List of dictionaries containing the asked fields.
5062 :rtype: List of dictionaries.
5065 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5069 if fields and fields == ['id']:
5070 # shortcut read if we only want the ids
5071 return [{'id': id} for id in record_ids]
5073 # read() ignores active_test, but it would forward it to any downstream search call
5074 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5075 # was presumably only meant for the main search().
5076 # TODO: Move this to read() directly?
5077 read_ctx = dict(context or {})
5078 read_ctx.pop('active_test', None)
5080 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5081 if len(result) <= 1:
5085 index = dict((r['id'], r) for r in result)
5086 return [index[x] for x in record_ids if x in index]
5088 def _register_hook(self, cr):
5089 """ stuff to do right after the registry is built """
5093 def _patch_method(cls, name, method):
5094 """ Monkey-patch a method for all instances of this model. This replaces
5095 the method called `name` by `method` in the given class.
5096 The original method is then accessible via ``method.origin``, and it
5097 can be restored with :meth:`~._revert_method`.
5102 def do_write(self, values):
5103 # do stuff, and call the original method
5104 return do_write.origin(self, values)
5106 # patch method write of model
5107 model._patch_method('write', do_write)
5109 # this will call do_write
5110 records = model.search([...])
5113 # restore the original method
5114 model._revert_method('write')
5116 origin = getattr(cls, name)
5117 method.origin = origin
5118 # propagate decorators from origin to method, and apply api decorator
5119 wrapped = api.guess(api.propagate(origin, method))
5120 wrapped.origin = origin
5121 setattr(cls, name, wrapped)
5124 def _revert_method(cls, name):
5125 """ Revert the original method called `name` in the given class.
5126 See :meth:`~._patch_method`.
5128 method = getattr(cls, name)
5129 setattr(cls, name, method.origin)
5134 # An instance represents an ordered collection of records in a given
5135 # execution environment. The instance object refers to the environment, and
5136 # the records themselves are represented by their cache dictionary. The 'id'
5137 # of each record is found in its corresponding cache dictionary.
5139 # This design has the following advantages:
5140 # - cache access is direct and thus fast;
5141 # - one can consider records without an 'id' (see new records);
5142 # - the global cache is only an index to "resolve" a record 'id'.
5146 def _browse(cls, env, ids):
5147 """ Create an instance attached to `env`; `ids` is a tuple of record
5150 records = object.__new__(cls)
5153 env.prefetch[cls._name].update(ids)
5157 def browse(self, cr, uid, arg=None, context=None):
5158 ids = _normalize_ids(arg)
5159 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5160 return self._browse(Environment(cr, uid, context or {}), ids)
5163 def browse(self, arg=None):
5164 """ browse([ids]) -> records
5166 Returns a recordset for the ids provided as parameter in the current
5169 Can take no ids, a single id or a sequence of ids.
5171 ids = _normalize_ids(arg)
5172 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5173 return self._browse(self.env, ids)
5176 # Internal properties, for manipulating the instance's implementation
5181 """ List of actual record ids in this recordset (ignores placeholder
5182 ids for records to create)
5184 return filter(None, list(self._ids))
5186 # backward-compatibility with former browse records
5187 _cr = property(lambda self: self.env.cr)
5188 _uid = property(lambda self: self.env.uid)
5189 _context = property(lambda self: self.env.context)
5192 # Conversion methods
5195 def ensure_one(self):
5196 """ Verifies that the current recorset holds a single record. Raises
5197 an exception otherwise.
5201 raise except_orm("ValueError", "Expected singleton: %s" % self)
5203 def with_env(self, env):
5204 """ Returns a new version of this recordset attached to the provided
5207 :type env: :class:`~openerp.api.Environment`
5209 return self._browse(env, self._ids)
5211 def sudo(self, user=SUPERUSER_ID):
5212 """ sudo([user=SUPERUSER])
5214 Returns a new version of this recordset attached to the provided
5217 return self.with_env(self.env(user=user))
5219 def with_context(self, *args, **kwargs):
5220 """ with_context([context][, **overrides]) -> records
5222 Returns a new version of this recordset attached to an extended
5225 The extended context is either the provided ``context`` in which
5226 ``overrides`` are merged or the *current* context in which
5227 ``overrides`` are merged e.g.::
5229 # current context is {'key1': True}
5230 r2 = records.with_context({}, key2=True)
5231 # -> r2._context is {'key2': True}
5232 r2 = records.with_context(key2=True)
5233 # -> r2._context is {'key1': True, 'key2': True}
5235 context = dict(args[0] if args else self._context, **kwargs)
5236 return self.with_env(self.env(context=context))
5238 def _convert_to_cache(self, values, update=False, validate=True):
5239 """ Convert the `values` dictionary into cached values.
5241 :param update: whether the conversion is made for updating `self`;
5242 this is necessary for interpreting the commands of *2many fields
5243 :param validate: whether values must be checked
5245 fields = self._fields
5246 target = self if update else self.browse()
5248 name: fields[name].convert_to_cache(value, target, validate=validate)
5249 for name, value in values.iteritems()
5253 def _convert_to_write(self, values):
5254 """ Convert the `values` dictionary into the format of :meth:`write`. """
5255 fields = self._fields
5257 for name, value in values.iteritems():
5259 value = fields[name].convert_to_write(value)
5260 if not isinstance(value, NewId):
5261 result[name] = value
5265 # Record traversal and update
5268 def _mapped_func(self, func):
5269 """ Apply function `func` on all records in `self`, and return the
5270 result as a list or a recordset (if `func` returns recordsets).
5273 vals = [func(rec) for rec in self]
5274 return reduce(operator.or_, vals) if isinstance(vals[0], BaseModel) else vals
5277 return vals if isinstance(vals, BaseModel) else []
5279 def mapped(self, func):
5280 """ Apply `func` on all records in `self`, and return the result as a
5281 list or a recordset (if `func` return recordsets). In the latter
5282 case, the order of the returned recordset is arbritrary.
5284 :param func: a function or a dot-separated sequence of field names
5286 if isinstance(func, basestring):
5288 for name in func.split('.'):
5289 recs = recs._mapped_func(operator.itemgetter(name))
5292 return self._mapped_func(func)
5294 def _mapped_cache(self, name_seq):
5295 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5296 field names, and only cached values are used.
5299 for name in name_seq.split('.'):
5300 field = recs._fields[name]
5301 null = field.null(self.env)
5302 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5305 def filtered(self, func):
5306 """ Select the records in `self` such that `func(rec)` is true, and
5307 return them as a recordset.
5309 :param func: a function or a dot-separated sequence of field names
5311 if isinstance(func, basestring):
5313 func = lambda rec: filter(None, rec.mapped(name))
5314 return self.browse([rec.id for rec in self if func(rec)])
5316 def sorted(self, key=None, reverse=False):
5317 """ Return the recordset `self` ordered by `key`.
5319 :param key: either a function of one argument that returns a
5320 comparison key for each record, or ``None``, in which case
5321 records are ordered according the default model's order
5323 :param reverse: if ``True``, return the result in reverse order
5326 recs = self.search([('id', 'in', self.ids)])
5327 return self.browse(reversed(recs._ids)) if reverse else recs
5329 return self.browse(map(int, sorted(self, key=key, reverse=reverse)))
5331 def update(self, values):
5332 """ Update record `self[0]` with `values`. """
5333 for name, value in values.iteritems():
5337 # New records - represent records that do not exist in the database yet;
5338 # they are used to perform onchanges.
5342 def new(self, values={}):
5343 """ new([values]) -> record
5345 Return a new record instance attached to the current environment and
5346 initialized with the provided ``value``. The record is *not* created
5347 in database, it only exists in memory.
5349 record = self.browse([NewId()])
5350 record._cache.update(record._convert_to_cache(values, update=True))
5352 if record.env.in_onchange:
5353 # The cache update does not set inverse fields, so do it manually.
5354 # This is useful for computing a function field on secondary
5355 # records, if that field depends on the main record.
5357 field = self._fields.get(name)
5359 for invf in field.inverse_fields:
5360 invf._update(record[name], record)
5365 # Dirty flags, to mark record fields modified (in draft mode)
5368 def _is_dirty(self):
5369 """ Return whether any record in `self` is dirty. """
5370 dirty = self.env.dirty
5371 return any(record in dirty for record in self)
5373 def _get_dirty(self):
5374 """ Return the list of field names for which `self` is dirty. """
5375 dirty = self.env.dirty
5376 return list(dirty.get(self, ()))
5378 def _set_dirty(self, field_name):
5379 """ Mark the records in `self` as dirty for the given `field_name`. """
5380 dirty = self.env.dirty
5382 dirty[record].add(field_name)
5388 def __nonzero__(self):
5389 """ Test whether `self` is nonempty. """
5390 return bool(getattr(self, '_ids', True))
5393 """ Return the size of `self`. """
5394 return len(self._ids)
5397 """ Return an iterator over `self`. """
5398 for id in self._ids:
5399 yield self._browse(self.env, (id,))
5401 def __contains__(self, item):
5402 """ Test whether `item` (record or field name) is an element of `self`.
5403 In the first case, the test is fully equivalent to::
5405 any(item == record for record in self)
5407 if isinstance(item, BaseModel) and self._name == item._name:
5408 return len(item) == 1 and item.id in self._ids
5409 elif isinstance(item, basestring):
5410 return item in self._fields
5412 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5414 def __add__(self, other):
5415 """ Return the concatenation of two recordsets. """
5416 if not isinstance(other, BaseModel) or self._name != other._name:
5417 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5418 return self.browse(self._ids + other._ids)
5420 def __sub__(self, other):
5421 """ Return the recordset of all the records in `self` that are not in `other`. """
5422 if not isinstance(other, BaseModel) or self._name != other._name:
5423 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5424 other_ids = set(other._ids)
5425 return self.browse([id for id in self._ids if id not in other_ids])
5427 def __and__(self, other):
5428 """ Return the intersection of two recordsets.
5429 Note that recordset order is not preserved.
5431 if not isinstance(other, BaseModel) or self._name != other._name:
5432 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5433 return self.browse(set(self._ids) & set(other._ids))
5435 def __or__(self, other):
5436 """ Return the union of two recordsets.
5437 Note that recordset order is not preserved.
5439 if not isinstance(other, BaseModel) or self._name != other._name:
5440 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5441 return self.browse(set(self._ids) | set(other._ids))
5443 def __eq__(self, other):
5444 """ Test whether two recordsets are equivalent (up to reordering). """
5445 if not isinstance(other, BaseModel):
5447 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5449 return self._name == other._name and set(self._ids) == set(other._ids)
5451 def __ne__(self, other):
5452 return not self == other
5454 def __lt__(self, other):
5455 if not isinstance(other, BaseModel) or self._name != other._name:
5456 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5457 return set(self._ids) < set(other._ids)
5459 def __le__(self, other):
5460 if not isinstance(other, BaseModel) or self._name != other._name:
5461 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5462 return set(self._ids) <= set(other._ids)
5464 def __gt__(self, other):
5465 if not isinstance(other, BaseModel) or self._name != other._name:
5466 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5467 return set(self._ids) > set(other._ids)
5469 def __ge__(self, other):
5470 if not isinstance(other, BaseModel) or self._name != other._name:
5471 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5472 return set(self._ids) >= set(other._ids)
5478 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5480 def __unicode__(self):
5481 return unicode(str(self))
5486 if hasattr(self, '_ids'):
5487 return hash((self._name, frozenset(self._ids)))
5489 return hash(self._name)
5491 def __getitem__(self, key):
5492 """ If `key` is an integer or a slice, return the corresponding record
5493 selection as an instance (attached to `self.env`).
5494 Otherwise read the field `key` of the first record in `self`.
5498 inst = model.search(dom) # inst is a recordset
5499 r4 = inst[3] # fourth record in inst
5500 rs = inst[10:20] # subset of inst
5501 nm = rs['name'] # name of first record in inst
5503 if isinstance(key, basestring):
5504 # important: one must call the field's getter
5505 return self._fields[key].__get__(self, type(self))
5506 elif isinstance(key, slice):
5507 return self._browse(self.env, self._ids[key])
5509 return self._browse(self.env, (self._ids[key],))
5511 def __setitem__(self, key, value):
5512 """ Assign the field `key` to `value` in record `self`. """
5513 # important: one must call the field's setter
5514 return self._fields[key].__set__(self, value)
5517 # Cache and recomputation management
5522 """ Return the cache of `self`, mapping field names to values. """
5523 return RecordCache(self)
5526 def _in_cache_without(self, field):
5527 """ Make sure `self` is present in cache (for prefetching), and return
5528 the records of model `self` in cache that have no value for `field`
5529 (:class:`Field` instance).
5532 prefetch_ids = env.prefetch[self._name]
5533 prefetch_ids.update(self._ids)
5534 ids = filter(None, prefetch_ids - set(env.cache[field]))
5535 return self.browse(ids)
5539 """ Clear the records cache.
5542 The record cache is automatically invalidated.
5544 self.invalidate_cache()
5547 def invalidate_cache(self, fnames=None, ids=None):
5548 """ Invalidate the record caches after some records have been modified.
5549 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5551 :param fnames: the list of modified fields, or ``None`` for all fields
5552 :param ids: the list of modified record ids, or ``None`` for all
5556 return self.env.invalidate_all()
5557 fields = self._fields.values()
5559 fields = map(self._fields.__getitem__, fnames)
5561 # invalidate fields and inverse fields, too
5562 spec = [(f, ids) for f in fields] + \
5563 [(invf, None) for f in fields for invf in f.inverse_fields]
5564 self.env.invalidate(spec)
5567 def modified(self, fnames):
5568 """ Notify that fields have been modified on `self`. This invalidates
5569 the cache, and prepares the recomputation of stored function fields
5570 (new-style fields only).
5572 :param fnames: iterable of field names that have been modified on
5575 # each field knows what to invalidate and recompute
5577 for fname in fnames:
5578 spec += self._fields[fname].modified(self)
5582 for env in self.env.all
5583 for field in env.cache
5585 # invalidate non-stored fields.function which are currently cached
5586 spec += [(f, None) for f in self.pool.pure_function_fields
5587 if f in cached_fields]
5589 self.env.invalidate(spec)
5591 def _recompute_check(self, field):
5592 """ If `field` must be recomputed on some record in `self`, return the
5593 corresponding records that must be recomputed.
5595 return self.env.check_todo(field, self)
5597 def _recompute_todo(self, field):
5598 """ Mark `field` to be recomputed. """
5599 self.env.add_todo(field, self)
5601 def _recompute_done(self, field):
5602 """ Mark `field` as recomputed. """
5603 self.env.remove_todo(field, self)
5606 def recompute(self):
5607 """ Recompute stored function fields. The fields and records to
5608 recompute have been determined by method :meth:`modified`.
5610 while self.env.has_todo():
5611 field, recs = self.env.get_todo()
5612 # evaluate the fields to recompute, and save them to database
5613 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5615 values = rec._convert_to_write({
5616 f.name: rec[f.name] for f in field.computed_fields
5619 except MissingError:
5621 # mark the computed fields as done
5622 map(recs._recompute_done, field.computed_fields)
5625 # Generic onchange method
5628 def _has_onchange(self, field, other_fields):
5629 """ Return whether `field` should trigger an onchange event in the
5630 presence of `other_fields`.
5632 # test whether self has an onchange method for field, or field is a
5633 # dependency of any field in other_fields
5634 return field.name in self._onchange_methods or \
5635 any(dep in other_fields for dep in field.dependents)
5638 def _onchange_spec(self, view_info=None):
5639 """ Return the onchange spec from a view description; if not given, the
5640 result of ``self.fields_view_get()`` is used.
5644 # for traversing the XML arch and populating result
5645 def process(node, info, prefix):
5646 if node.tag == 'field':
5647 name = node.attrib['name']
5648 names = "%s.%s" % (prefix, name) if prefix else name
5649 if not result.get(names):
5650 result[names] = node.attrib.get('on_change')
5651 # traverse the subviews included in relational fields
5652 for subinfo in info['fields'][name].get('views', {}).itervalues():
5653 process(etree.fromstring(subinfo['arch']), subinfo, names)
5656 process(child, info, prefix)
5658 if view_info is None:
5659 view_info = self.fields_view_get()
5660 process(etree.fromstring(view_info['arch']), view_info, '')
5663 def _onchange_eval(self, field_name, onchange, result):
5664 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5665 on record `self`. Value assignments are applied on `self`, while
5666 domain and warning messages are put in dictionary `result`.
5668 onchange = onchange.strip()
5671 if onchange in ("1", "true"):
5672 for method in self._onchange_methods.get(field_name, ()):
5673 method_res = method(self)
5676 if 'domain' in method_res:
5677 result.setdefault('domain', {}).update(method_res['domain'])
5678 if 'warning' in method_res:
5679 result['warning'] = method_res['warning']
5683 match = onchange_v7.match(onchange)
5685 method, params = match.groups()
5687 # evaluate params -> tuple
5688 global_vars = {'context': self._context, 'uid': self._uid}
5689 if self._context.get('field_parent'):
5690 class RawRecord(object):
5691 def __init__(self, record):
5692 self._record = record
5693 def __getattr__(self, name):
5694 field = self._record._fields[name]
5695 value = self._record[name]
5696 return field.convert_to_onchange(value)
5697 record = self[self._context['field_parent']]
5698 global_vars['parent'] = RawRecord(record)
5700 key: self._fields[key].convert_to_onchange(val)
5701 for key, val in self._cache.iteritems()
5703 params = eval("[%s]" % params, global_vars, field_vars)
5705 # call onchange method
5706 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5707 method_res = getattr(self._model, method)(*args)
5708 if not isinstance(method_res, dict):
5710 if 'value' in method_res:
5711 method_res['value'].pop('id', None)
5712 self.update(self._convert_to_cache(method_res['value'], validate=False))
5713 if 'domain' in method_res:
5714 result.setdefault('domain', {}).update(method_res['domain'])
5715 if 'warning' in method_res:
5716 result['warning'] = method_res['warning']
5719 def onchange(self, values, field_name, field_onchange):
5720 """ Perform an onchange on the given field.
5722 :param values: dictionary mapping field names to values, giving the
5723 current state of modification
5724 :param field_name: name of the modified field_name
5725 :param field_onchange: dictionary mapping field names to their
5730 if field_name and field_name not in self._fields:
5733 # determine subfields for field.convert_to_write() below
5735 subfields = defaultdict(set)
5736 for dotname in field_onchange:
5738 secondary.append(dotname)
5739 name, subname = dotname.split('.')
5740 subfields[name].add(subname)
5742 # create a new record with values, and attach `self` to it
5743 with env.do_in_onchange():
5744 record = self.new(values)
5745 values = dict(record._cache)
5746 # attach `self` with a different context (for cache consistency)
5747 record._origin = self.with_context(__onchange=True)
5749 # determine which field should be triggered an onchange
5750 todo = set([field_name]) if field_name else set(values)
5753 # dummy assignment: trigger invalidations on the record
5755 value = record[name]
5756 field = self._fields[name]
5757 if not field_name and field.type == 'many2one' and field.delegate and not value:
5758 # do not nullify all fields of parent record for new records
5760 record[name] = value
5762 result = {'value': {}}
5770 with env.do_in_onchange():
5771 # apply field-specific onchange methods
5772 if field_onchange.get(name):
5773 record._onchange_eval(name, field_onchange[name], result)
5775 # force re-evaluation of function fields on secondary records
5776 for field_seq in secondary:
5777 record.mapped(field_seq)
5779 # determine which fields have been modified
5780 for name, oldval in values.iteritems():
5781 field = self._fields[name]
5782 newval = record[name]
5783 if field.type in ('one2many', 'many2many'):
5784 if newval != oldval or newval._is_dirty():
5785 # put new value in result
5786 result['value'][name] = field.convert_to_write(
5787 newval, record._origin, subfields.get(name),
5791 # keep result: newval may have been dirty before
5794 if newval != oldval:
5795 # put new value in result
5796 result['value'][name] = field.convert_to_write(
5797 newval, record._origin, subfields.get(name),
5801 # clean up result to not return another value
5802 result['value'].pop(name, None)
5804 # At the moment, the client does not support updates on a *2many field
5805 # while this one is modified by the user.
5806 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5807 result['value'].pop(field_name, None)
5812 class RecordCache(MutableMapping):
5813 """ Implements a proxy dictionary to read/update the cache of a record.
5814 Upon iteration, it looks like a dictionary mapping field names to
5815 values. However, fields may be used as keys as well.
5817 def __init__(self, records):
5818 self._recs = records
5820 def contains(self, field):
5821 """ Return whether `records[0]` has a value for `field` in cache. """
5822 if isinstance(field, basestring):
5823 field = self._recs._fields[field]
5824 return self._recs.id in self._recs.env.cache[field]
5826 def __contains__(self, field):
5827 """ Return whether `records[0]` has a regular value for `field` in cache. """
5828 if isinstance(field, basestring):
5829 field = self._recs._fields[field]
5830 dummy = SpecialValue(None)
5831 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5832 return not isinstance(value, SpecialValue)
5834 def __getitem__(self, field):
5835 """ Return the cached value of `field` for `records[0]`. """
5836 if isinstance(field, basestring):
5837 field = self._recs._fields[field]
5838 value = self._recs.env.cache[field][self._recs.id]
5839 return value.get() if isinstance(value, SpecialValue) else value
5841 def __setitem__(self, field, value):
5842 """ Assign the cached value of `field` for all records in `records`. """
5843 if isinstance(field, basestring):
5844 field = self._recs._fields[field]
5845 values = dict.fromkeys(self._recs._ids, value)
5846 self._recs.env.cache[field].update(values)
5848 def update(self, *args, **kwargs):
5849 """ Update the cache of all records in `records`. If the argument is a
5850 `SpecialValue`, update all fields (except "magic" columns).
5852 if args and isinstance(args[0], SpecialValue):
5853 values = dict.fromkeys(self._recs._ids, args[0])
5854 for name, field in self._recs._fields.iteritems():
5856 self._recs.env.cache[field].update(values)
5858 return super(RecordCache, self).update(*args, **kwargs)
5860 def __delitem__(self, field):
5861 """ Remove the cached value of `field` for all `records`. """
5862 if isinstance(field, basestring):
5863 field = self._recs._fields[field]
5864 field_cache = self._recs.env.cache[field]
5865 for id in self._recs._ids:
5866 field_cache.pop(id, None)
5869 """ Iterate over the field names with a regular value in cache. """
5870 cache, id = self._recs.env.cache, self._recs.id
5871 dummy = SpecialValue(None)
5872 for name, field in self._recs._fields.iteritems():
5873 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5877 """ Return the number of fields with a regular value in cache. """
5878 return sum(1 for name in self)
5880 class Model(BaseModel):
5881 """Main super-class for regular database-persisted OpenERP models.
5883 OpenERP models are created by inheriting from this class::
5888 The system will later instantiate the class once per database (on
5889 which the class' module is installed).
5892 _register = False # not visible in ORM registry, meant to be python-inherited only
5893 _transient = False # True in a TransientModel
5895 class TransientModel(BaseModel):
5896 """Model super-class for transient records, meant to be temporarily
5897 persisted, and regularly vaccuum-cleaned.
5899 A TransientModel has a simplified access rights management,
5900 all users can create new records, and may only access the
5901 records they created. The super-user has unrestricted access
5902 to all TransientModel records.
5905 _register = False # not visible in ORM registry, meant to be python-inherited only
5908 class AbstractModel(BaseModel):
5909 """Abstract Model super-class for creating an abstract class meant to be
5910 inherited by regular models (Models or TransientModels) but not meant to
5911 be usable on its own, or persisted.
5913 Technical note: we don't want to make AbstractModel the super-class of
5914 Model or BaseModel because it would not make sense to put the main
5915 definition of persistence methods such as create() in it, and still we
5916 should be able to override them within an AbstractModel.
5918 _auto = False # don't create any database backend for AbstractModels
5919 _register = False # not visible in ORM registry, meant to be python-inherited only
5922 def itemgetter_tuple(items):
5923 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5924 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5929 return lambda gettable: (gettable[items[0]],)
5930 return operator.itemgetter(*items)
5932 def convert_pgerror_23502(model, fields, info, e):
5933 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5934 r'not-null constraint\n',
5936 field_name = m and m.group('field')
5937 if not m or field_name not in fields:
5938 return {'message': unicode(e)}
5939 message = _(u"Missing required value for the field '%s'.") % field_name
5940 field = fields.get(field_name)
5942 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5945 'field': field_name,
5948 def convert_pgerror_23505(model, fields, info, e):
5949 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5951 field_name = m and m.group('field')
5952 if not m or field_name not in fields:
5953 return {'message': unicode(e)}
5954 message = _(u"The value for the field '%s' already exists.") % field_name
5955 field = fields.get(field_name)
5957 message = _(u"%s This might be '%s' in the current model, or a field "
5958 u"of the same name in an o2m.") % (message, field['string'])
5961 'field': field_name,
5964 PGERROR_TO_OE = defaultdict(
5965 # shape of mapped converters
5966 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5967 # not_null_violation
5968 '23502': convert_pgerror_23502,
5969 # unique constraint error
5970 '23505': convert_pgerror_23505,
5973 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5974 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5976 Various implementations were tested on the corpus of all browse() calls
5977 performed during a full crawler run (after having installed all website_*
5978 modules) and this one was the most efficient overall.
5980 A possible bit of correctness was sacrificed by not doing any test on
5981 Iterable and just assuming that any non-atomic type was an iterable of
5986 # much of the corpus is falsy objects (empty list, tuple or set, None)
5990 # `type in set` is significantly faster (because more restrictive) than
5991 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5992 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5993 # (and looks much worse) in most cases, but over millions of calls it
5994 # does have a very minor effect.
5995 if arg.__class__ in atoms:
6000 # keep those imports here to avoid dependency cycle errors
6001 from .osv import expression
6002 from .fields import Field, SpecialValue, FailedValue
6004 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: