1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
51 from collections import defaultdict, MutableMapping
52 from inspect import getmembers
55 import dateutil.relativedelta
57 from lxml import etree
60 from . import SUPERUSER_ID
63 from .api import Environment
64 from .exceptions import except_orm, AccessError, MissingError, ValidationError
65 from .osv import fields
66 from .osv.query import Query
67 from .tools import lazy_property, ormcache
68 from .tools.config import config
69 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
70 from .tools.safe_eval import safe_eval as eval
71 from .tools.translate import _
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
77 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
78 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
80 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def check_object_name(name):
84 """ Check if the given name is a valid openerp object name.
86 The _name attribute in osv and osv_memory object is subject to
87 some restrictions. This function returns True or False whether
88 the given name is allowed or not.
90 TODO: this is an approximation. The goal in this approximation
91 is to disallow uppercase characters (in some places, we quote
92 table/column names and in other not, which leads to this kind
95 psycopg2.ProgrammingError: relation "xxx" does not exist).
97 The same restriction should apply to both osv and osv_memory
98 objects for consistency.
101 if regex_object_name.match(name) is None:
105 def raise_on_invalid_object_name(name):
106 if not check_object_name(name):
107 msg = "The _name attribute %s is not valid." % name
109 raise except_orm('ValueError', msg)
111 POSTGRES_CONFDELTYPES = {
119 def intersect(la, lb):
120 return filter(lambda x: x in lb, la)
123 """ Test whether functions `f` and `g` are identical or have the same name """
124 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
126 def fix_import_export_id_paths(fieldname):
128 Fixes the id fields in import and exports, and splits field paths
131 :param str fieldname: name of the field to import/export
132 :return: split field name
135 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
136 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
137 return fixed_external_id.split('/')
139 def pg_varchar(size=0):
140 """ Returns the VARCHAR declaration for the provided size:
142 * If no size (or an empty or negative size is provided) return an
144 * Otherwise return a VARCHAR(n)
146 :type int size: varchar size, optional
150 if not isinstance(size, int):
151 raise TypeError("VARCHAR parameter should be an int, got %s"
154 return 'VARCHAR(%d)' % size
157 FIELDS_TO_PGTYPES = {
158 fields.boolean: 'bool',
159 fields.integer: 'int4',
163 fields.datetime: 'timestamp',
164 fields.binary: 'bytea',
165 fields.many2one: 'int4',
166 fields.serialized: 'text',
169 def get_pg_type(f, type_override=None):
171 :param fields._column f: field to get a Postgres type for
172 :param type type_override: use the provided type for dispatching instead of the field's own type
173 :returns: (postgres_identification_type, postgres_type_specification)
176 field_type = type_override or type(f)
178 if field_type in FIELDS_TO_PGTYPES:
179 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
180 elif issubclass(field_type, fields.float):
181 # Explicit support for "falsy" digits (0, False) to indicate a
182 # NUMERIC field with no fixed precision. The values will be saved
183 # in the database with all significant digits.
184 # FLOAT8 type is still the default when there is no precision because
185 # it is faster for most operations (sums, etc.)
186 if f.digits is not None:
187 pg_type = ('numeric', 'NUMERIC')
189 pg_type = ('float8', 'DOUBLE PRECISION')
190 elif issubclass(field_type, (fields.char, fields.reference)):
191 pg_type = ('varchar', pg_varchar(f.size))
192 elif issubclass(field_type, fields.selection):
193 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
194 or getattr(f, 'size', None) == -1:
195 pg_type = ('int4', 'INTEGER')
197 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
198 elif issubclass(field_type, fields.function):
199 if f._type == 'selection':
200 pg_type = ('varchar', pg_varchar())
202 pg_type = get_pg_type(f, getattr(fields, f._type))
204 _logger.warning('%s type not supported!', field_type)
210 class MetaModel(api.Meta):
211 """ Metaclass for the models.
213 This class is used as the metaclass for the class :class:`BaseModel` to
214 discover the models defined in a module (without instanciating them).
215 If the automatic discovery is not needed, it is possible to set the model's
216 ``_register`` attribute to False.
220 module_to_models = {}
222 def __init__(self, name, bases, attrs):
223 if not self._register:
224 self._register = True
225 super(MetaModel, self).__init__(name, bases, attrs)
228 if not hasattr(self, '_module'):
229 # The (OpenERP) module name can be in the `openerp.addons` namespace
230 # or not. For instance, module `sale` can be imported as
231 # `openerp.addons.sale` (the right way) or `sale` (for backward
233 module_parts = self.__module__.split('.')
234 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
235 module_name = self.__module__.split('.')[2]
237 module_name = self.__module__.split('.')[0]
238 self._module = module_name
240 # Remember which models to instanciate for this module.
242 self.module_to_models.setdefault(self._module, []).append(self)
244 # transform columns into new-style fields (enables field inheritance)
245 for name, column in self._columns.iteritems():
246 if name in self.__dict__:
247 _logger.warning("In class %s, field %r overriding an existing value", self, name)
248 setattr(self, name, column.to_field())
252 """ Pseudo-ids for new records. """
253 def __nonzero__(self):
256 IdType = (int, long, basestring, NewId)
259 # maximum number of prefetched records
262 # special columns automatically created by the ORM
263 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
264 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
266 class BaseModel(object):
267 """ Base class for OpenERP models.
269 OpenERP models are created by inheriting from this class' subclasses:
271 * :class:`Model` for regular database-persisted models
273 * :class:`TransientModel` for temporary data, stored in the database but
274 automatically vaccuumed every so often
276 * :class:`AbstractModel` for abstract super classes meant to be shared by
277 multiple inheriting model
279 The system automatically instantiates every model once per database. Those
280 instances represent the available models on each database, and depend on
281 which modules are installed on that database. The actual class of each
282 instance is built from the Python classes that create and inherit from the
285 Every model instance is a "recordset", i.e., an ordered collection of
286 records of the model. Recordsets are returned by methods like
287 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
288 explicit representation: a record is represented as a recordset of one
291 To create a class that should not be instantiated, the _register class
292 attribute may be set to False.
294 __metaclass__ = MetaModel
295 _auto = True # create database backend
296 _register = False # Set to false if the model shouldn't be automatically discovered.
303 _parent_name = 'parent_id'
304 _parent_store = False
305 _parent_order = False
311 _translate = True # set to False to disable translations export for this model
313 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
314 # to include in the _read_group, if grouped on this field
318 _transient = False # True in a TransientModel
321 # { 'parent_model': 'm2o_field', ... }
324 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
325 # model from which it is inherits'd, r is the (local) field towards m, f
326 # is the _column object itself, and n is the original (i.e. top-most)
329 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
330 # field_column_obj, origina_parent_model), ... }
333 # Mapping field name/column_info object
334 # This is similar to _inherit_fields but:
335 # 1. includes self fields,
336 # 2. uses column_info instead of a triple.
341 _sql_constraints = []
343 # model dependencies, for models backed up by sql views:
344 # {model_name: field_names, ...}
347 CONCURRENCY_CHECK_FIELD = '__last_update'
349 def log(self, cr, uid, id, message, secondary=False, context=None):
350 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
352 def view_init(self, cr, uid, fields_list, context=None):
353 """Override this method to do specific things when a view on the object is opened."""
356 def _field_create(self, cr, context=None):
357 """ Create entries in ir_model_fields for all the model's fields.
359 If necessary, also create an entry in ir_model, and if called from the
360 modules loading scheme (by receiving 'module' in the context), also
361 create entries in ir_model_data (for the model and the fields).
363 - create an entry in ir_model (if there is not already one),
364 - create an entry in ir_model_data (if there is not already one, and if
365 'module' is in the context),
366 - update ir_model_fields with the fields found in _columns
367 (TODO there is some redundancy as _columns is updated from
368 ir_model_fields in __init__).
373 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
375 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
376 model_id = cr.fetchone()[0]
377 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
379 model_id = cr.fetchone()[0]
380 if 'module' in context:
381 name_id = 'model_'+self._name.replace('.', '_')
382 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
384 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
385 (name_id, context['module'], 'ir.model', model_id)
388 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
390 for rec in cr.dictfetchall():
391 cols[rec['name']] = rec
393 ir_model_fields_obj = self.pool.get('ir.model.fields')
395 # sparse field should be created at the end, as it depends on its serialized field already existing
396 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
397 for (k, f) in model_fields:
399 'model_id': model_id,
402 'field_description': f.string,
404 'relation': f._obj or '',
405 'select_level': tools.ustr(int(f.select)),
406 'readonly': (f.readonly and 1) or 0,
407 'required': (f.required and 1) or 0,
408 'selectable': (f.selectable and 1) or 0,
409 'translate': (f.translate and 1) or 0,
410 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
411 'serialization_field_id': None,
413 if getattr(f, 'serialization_field', None):
414 # resolve link to serialization_field if specified by name
415 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
416 if not serialization_field_id:
417 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
418 vals['serialization_field_id'] = serialization_field_id[0]
420 # When its a custom field,it does not contain f.select
421 if context.get('field_state', 'base') == 'manual':
422 if context.get('field_name', '') == k:
423 vals['select_level'] = context.get('select', '0')
424 #setting value to let the problem NOT occur next time
426 vals['select_level'] = cols[k]['select_level']
429 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
430 id = cr.fetchone()[0]
432 cr.execute("""INSERT INTO ir_model_fields (
433 id, model_id, model, name, field_description, ttype,
434 relation,state,select_level,relation_field, translate, serialization_field_id
436 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
438 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
439 vals['relation'], 'base',
440 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
442 if 'module' in context:
443 name1 = 'field_' + self._table + '_' + k
444 cr.execute("select name from ir_model_data where name=%s", (name1,))
446 name1 = name1 + "_" + str(id)
447 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
448 (name1, context['module'], 'ir.model.fields', id)
451 for key, val in vals.items():
452 if cols[k][key] != vals[key]:
453 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
454 cr.execute("""UPDATE ir_model_fields SET
455 model_id=%s, field_description=%s, ttype=%s, relation=%s,
456 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
458 model=%s AND name=%s""", (
459 vals['model_id'], vals['field_description'], vals['ttype'],
461 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
464 self.invalidate_cache(cr, SUPERUSER_ID)
467 def _add_field(cls, name, field):
468 """ Add the given `field` under the given `name` in the class """
469 # add field as an attribute and in cls._fields (for reflection)
470 if not isinstance(getattr(cls, name, field), Field):
471 _logger.warning("In model %r, field %r overriding existing value", cls._name, name)
472 setattr(cls, name, field)
473 cls._fields[name] = field
475 # basic setup of field
476 field.set_class_name(cls, name)
479 cls._columns[name] = field.to_column()
481 # remove potential column that may be overridden by field
482 cls._columns.pop(name, None)
485 def _pop_field(cls, name):
486 """ Remove the field with the given `name` from the model.
487 This method should only be used for manual fields.
489 field = cls._fields.pop(name)
490 cls._columns.pop(name, None)
491 cls._all_columns.pop(name, None)
492 if hasattr(cls, name):
497 def _add_magic_fields(cls):
498 """ Introduce magic fields on the current class
500 * id is a "normal" field (with a specific getter)
501 * create_uid, create_date, write_uid and write_date have become
503 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
504 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
505 to get the same structure as the previous
506 ``(now() at time zone 'UTC')::timestamp``::
508 # select (now() at time zone 'UTC')::timestamp;
510 ----------------------------
511 2013-06-18 08:30:37.292809
513 >>> str(datetime.datetime.utcnow())
514 '2013-06-18 08:31:32.821177'
516 def add(name, field):
517 """ add `field` with the given `name` if it does not exist yet """
518 if name not in cls._columns and name not in cls._fields:
519 cls._add_field(name, field)
524 # this field 'id' must override any other column or field
525 cls._add_field('id', fields.Id(automatic=True))
527 add('display_name', fields.Char(string='Display Name', automatic=True,
528 compute='_compute_display_name'))
531 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
532 add('create_date', fields.Datetime(string='Created on', automatic=True))
533 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
534 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
535 last_modified_name = 'compute_concurrency_field_with_access'
537 last_modified_name = 'compute_concurrency_field'
539 # this field must override any other column or field
540 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
541 string='Last Modified on', compute=last_modified_name, automatic=True))
544 def compute_concurrency_field(self):
545 self[self.CONCURRENCY_CHECK_FIELD] = \
546 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
549 @api.depends('create_date', 'write_date')
550 def compute_concurrency_field_with_access(self):
551 self[self.CONCURRENCY_CHECK_FIELD] = \
552 self.write_date or self.create_date or \
553 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
556 # Goal: try to apply inheritance at the instanciation level and
557 # put objects in the pool var
560 def _build_model(cls, pool, cr):
561 """ Instanciate a given model.
563 This class method instanciates the class of some model (i.e. a class
564 deriving from osv or osv_memory). The class might be the class passed
565 in argument or, if it inherits from another class, a class constructed
566 by combining the two classes.
570 # IMPORTANT: the registry contains an instance for each model. The class
571 # of each model carries inferred metadata that is shared among the
572 # model's instances for this registry, but not among registries. Hence
573 # we cannot use that "registry class" for combining model classes by
574 # inheritance, since it confuses the metadata inference process.
576 # Keep links to non-inherited constraints in cls; this is useful for
577 # instance when exporting translations
578 cls._local_constraints = cls.__dict__.get('_constraints', [])
579 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
581 # determine inherited models
582 parents = getattr(cls, '_inherit', [])
583 parents = [parents] if isinstance(parents, basestring) else (parents or [])
585 # determine the model's name
586 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
588 # determine the module that introduced the model
589 original_module = pool[name]._original_module if name in parents else cls._module
591 # build the class hierarchy for the model
592 for parent in parents:
593 if parent not in pool:
594 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
595 'You may need to add a dependency on the parent class\' module.' % (name, parent))
596 parent_model = pool[parent]
598 # do no use the class of parent_model, since that class contains
599 # inferred metadata; use its ancestor instead
600 parent_class = type(parent_model).__base__
602 # don't inherit custom fields
603 columns = dict((key, val)
604 for key, val in parent_class._columns.iteritems()
607 columns.update(cls._columns)
609 inherits = dict(parent_class._inherits)
610 inherits.update(cls._inherits)
612 depends = dict(parent_class._depends)
613 for m, fs in cls._depends.iteritems():
614 depends[m] = depends.get(m, []) + fs
616 old_constraints = parent_class._constraints
617 new_constraints = cls._constraints
618 # filter out from old_constraints the ones overridden by a
619 # constraint with the same function name in new_constraints
620 constraints = new_constraints + [oldc
621 for oldc in old_constraints
622 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
623 for newc in new_constraints)
626 sql_constraints = cls._sql_constraints + \
627 parent_class._sql_constraints
633 '_inherits': inherits,
635 '_constraints': constraints,
636 '_sql_constraints': sql_constraints,
638 cls = type(name, (cls, parent_class), attrs)
640 # introduce the "registry class" of the model;
641 # duplicate some attributes so that the ORM can modify them
645 '_columns': dict(cls._columns),
646 '_defaults': {}, # filled by Field._determine_default()
647 '_inherits': dict(cls._inherits),
648 '_depends': dict(cls._depends),
649 '_constraints': list(cls._constraints),
650 '_sql_constraints': list(cls._sql_constraints),
651 '_original_module': original_module,
653 cls = type(cls._name, (cls,), attrs)
655 # instantiate the model, and initialize it
656 model = object.__new__(cls)
657 model.__init__(pool, cr)
661 def _init_function_fields(cls, pool, cr):
662 # initialize the list of non-stored function fields for this model
663 pool._pure_function_fields[cls._name] = []
665 # process store of low-level function fields
666 for fname, column in cls._columns.iteritems():
667 if hasattr(column, 'digits_change'):
668 column.digits_change(cr)
669 # filter out existing store about this field
670 pool._store_function[cls._name] = [
672 for stored in pool._store_function.get(cls._name, [])
673 if (stored[0], stored[1]) != (cls._name, fname)
675 if not isinstance(column, fields.function):
678 # register it on the pool for invalidation
679 pool._pure_function_fields[cls._name].append(fname)
681 # process store parameter
684 get_ids = lambda self, cr, uid, ids, c={}: ids
685 store = {cls._name: (get_ids, None, column.priority, None)}
686 for model, spec in store.iteritems():
688 (fnct, fields2, order, length) = spec
690 (fnct, fields2, order) = spec
693 raise except_orm('Error',
694 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
695 pool._store_function.setdefault(model, [])
696 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
697 if t not in pool._store_function[model]:
698 pool._store_function[model].append(t)
699 pool._store_function[model].sort(key=lambda x: x[4])
702 def _init_manual_fields(cls, pool, cr):
703 # Check whether the query is already done
704 if pool.fields_by_model is not None:
705 manual_fields = pool.fields_by_model.get(cls._name, [])
707 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
708 manual_fields = cr.dictfetchall()
710 for field in manual_fields:
711 if field['name'] in cls._columns:
714 'string': field['field_description'],
715 'required': bool(field['required']),
716 'readonly': bool(field['readonly']),
717 'domain': eval(field['domain']) if field['domain'] else None,
718 'size': field['size'] or None,
719 'ondelete': field['on_delete'],
720 'translate': (field['translate']),
723 #'select': int(field['select_level'])
725 if field['serialization_field_id']:
726 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
727 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
728 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
729 attrs.update({'relation': field['relation']})
730 cls._columns[field['name']] = fields.sparse(**attrs)
731 elif field['ttype'] == 'selection':
732 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
733 elif field['ttype'] == 'reference':
734 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
735 elif field['ttype'] == 'many2one':
736 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
737 elif field['ttype'] == 'one2many':
738 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
739 elif field['ttype'] == 'many2many':
740 _rel1 = field['relation'].replace('.', '_')
741 _rel2 = field['model'].replace('.', '_')
742 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
743 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
745 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
748 def _init_constraints_onchanges(cls):
749 # store sql constraint error messages
750 for (key, _, msg) in cls._sql_constraints:
751 cls.pool._sql_error[cls._table + '_' + key] = msg
753 # collect constraint and onchange methods
754 cls._constraint_methods = []
755 cls._onchange_methods = defaultdict(list)
756 for attr, func in getmembers(cls, callable):
757 if hasattr(func, '_constrains'):
758 if not all(name in cls._fields for name in func._constrains):
759 _logger.warning("@constrains%r parameters must be field names", func._constrains)
760 cls._constraint_methods.append(func)
761 if hasattr(func, '_onchange'):
762 if not all(name in cls._fields for name in func._onchange):
763 _logger.warning("@onchange%r parameters must be field names", func._onchange)
764 for name in func._onchange:
765 cls._onchange_methods[name].append(func)
768 # In the past, this method was registering the model class in the server.
769 # This job is now done entirely by the metaclass MetaModel.
771 # Do not create an instance here. Model instances are created by method
775 def __init__(self, pool, cr):
776 """ Initialize a model and make it part of the given registry.
778 - copy the stored fields' functions in the registry,
779 - retrieve custom fields and add them in the model,
780 - ensure there is a many2one for each _inherits'd parent,
781 - update the children's _columns,
782 - give a chance to each field to initialize itself.
787 # link the class to the registry, and update the registry
789 cls._model = self # backward compatibility
790 pool.add(cls._name, self)
792 # determine description, table, sequence and log_access
793 if not cls._description:
794 cls._description = cls._name
796 cls._table = cls._name.replace('.', '_')
797 if not cls._sequence:
798 cls._sequence = cls._table + '_id_seq'
799 if not hasattr(cls, '_log_access'):
800 # If _log_access is not specified, it is the same value as _auto.
801 cls._log_access = cls._auto
804 if cls.is_transient():
805 cls._transient_check_count = 0
806 cls._transient_max_count = config.get('osv_memory_count_limit')
807 cls._transient_max_hours = config.get('osv_memory_age_limit')
808 assert cls._log_access, \
809 "TransientModels must have log_access turned on, " \
810 "in order to implement their access rights policy"
812 # retrieve new-style fields (from above registry class) and duplicate
813 # them (to avoid clashes with inheritance between different models)
815 above = cls.__bases__[0]
816 for attr, field in getmembers(above, Field.__instancecheck__):
817 if not field.inherited:
818 cls._add_field(attr, field.new())
820 # introduce magic fields
821 cls._add_magic_fields()
823 # register stuff about low-level function fields and custom fields
824 cls._init_function_fields(pool, cr)
825 cls._init_manual_fields(pool, cr)
828 cls._inherits_check()
829 cls._inherits_reload()
831 # register constraints and onchange methods
832 cls._init_constraints_onchanges()
835 for k in cls._defaults:
836 assert k in cls._fields, \
837 "Model %s has a default for nonexiting field %s" % (cls._name, k)
840 for column in cls._columns.itervalues():
845 assert cls._rec_name in cls._fields, \
846 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
847 elif 'name' in cls._fields:
848 cls._rec_name = 'name'
850 # prepare ormcache, which must be shared by all instances of the model
855 def _is_an_ordinary_table(self):
856 self.env.cr.execute("""\
860 AND relkind = %s""", [self._table, 'r'])
861 return bool(self.env.cr.fetchone())
863 def __export_xml_id(self):
864 """ Return a valid xml_id for the record `self`. """
865 if not self._is_an_ordinary_table():
867 "You can not export the column ID of model %s, because the "
868 "table %s is not an ordinary table."
869 % (self._name, self._table))
870 ir_model_data = self.sudo().env['ir.model.data']
871 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
874 return '%s.%s' % (data[0].module, data[0].name)
879 name = '%s_%s' % (self._table, self.id)
880 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
882 name = '%s_%s_%s' % (self._table, self.id, postfix)
883 ir_model_data.create({
886 'module': '__export__',
889 return '__export__.' + name
892 def __export_rows(self, fields):
893 """ Export fields of the records in `self`.
895 :param fields: list of lists of fields to traverse
896 :return: list of lists of corresponding values
900 # main line of record, initially empty
901 current = [''] * len(fields)
902 lines.append(current)
904 # list of primary fields followed by secondary field(s)
907 # process column by column
908 for i, path in enumerate(fields):
913 if name in primary_done:
917 current[i] = str(record.id)
919 current[i] = record.__export_xml_id()
921 field = record._fields[name]
924 # this part could be simpler, but it has to be done this way
925 # in order to reproduce the former behavior
926 if not isinstance(value, BaseModel):
927 current[i] = field.convert_to_export(value, self.env)
929 primary_done.append(name)
931 # This is a special case, its strange behavior is intended!
932 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
933 xml_ids = [r.__export_xml_id() for r in value]
934 current[i] = ','.join(xml_ids) or False
937 # recursively export the fields that follow name
938 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
939 lines2 = value.__export_rows(fields2)
941 # merge first line with record's main line
942 for j, val in enumerate(lines2[0]):
945 # check value of current field
947 # assign xml_ids, and forget about remaining lines
948 xml_ids = [item[1] for item in value.name_get()]
949 current[i] = ','.join(xml_ids)
951 # append the other lines at the end
959 def export_data(self, fields_to_export, raw_data=False):
960 """ Export fields for selected objects
962 :param fields_to_export: list of fields
963 :param raw_data: True to return value in native Python type
964 :rtype: dictionary with a *datas* matrix
966 This method is used when exporting data via client menu
968 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
970 self = self.with_context(export_raw_data=True)
971 return {'datas': self.__export_rows(fields_to_export)}
973 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
976 Use :meth:`~load` instead
978 Import given data in given module
980 This method is used when importing data via client menu.
982 Example of fields to import for a sale.order::
985 partner_id, (=name_search)
986 order_line/.id, (=database_id)
988 order_line/product_id/id, (=xml id)
989 order_line/price_unit,
990 order_line/product_uom_qty,
991 order_line/product_uom/id (=xml_id)
993 This method returns a 4-tuple with the following structure::
995 (return_code, errored_resource, error_message, unused)
997 * The first item is a return code, it is ``-1`` in case of
998 import error, or the last imported row number in case of success
999 * The second item contains the record data dict that failed to import
1000 in case of error, otherwise it's 0
1001 * The third item contains an error message string in case of error,
1003 * The last item is currently unused, with no specific semantics
1005 :param fields: list of fields to import
1006 :param datas: data to import
1007 :param mode: 'init' or 'update' for record creation
1008 :param current_module: module name
1009 :param noupdate: flag for record creation
1010 :param filename: optional file to store partial import state for recovery
1011 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1012 :rtype: (int, dict or 0, str or 0, str or 0)
1014 context = dict(context) if context is not None else {}
1015 context['_import_current_module'] = current_module
1017 fields = map(fix_import_export_id_paths, fields)
1018 ir_model_data_obj = self.pool.get('ir.model.data')
1021 if m['type'] == 'error':
1022 raise Exception(m['message'])
1024 if config.get('import_partial') and filename:
1025 with open(config.get('import_partial'), 'rb') as partial_import_file:
1026 data = pickle.load(partial_import_file)
1027 position = data.get(filename, 0)
1031 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1032 self._extract_records(cr, uid, fields, datas,
1033 context=context, log=log),
1034 context=context, log=log):
1035 ir_model_data_obj._update(cr, uid, self._name,
1036 current_module, res, mode=mode, xml_id=xml_id,
1037 noupdate=noupdate, res_id=res_id, context=context)
1038 position = info.get('rows', {}).get('to', 0) + 1
1039 if config.get('import_partial') and filename and (not (position%100)):
1040 with open(config.get('import_partial'), 'rb') as partial_import:
1041 data = pickle.load(partial_import)
1042 data[filename] = position
1043 with open(config.get('import_partial'), 'wb') as partial_import:
1044 pickle.dump(data, partial_import)
1045 if context.get('defer_parent_store_computation'):
1046 self._parent_store_compute(cr)
1048 except Exception, e:
1050 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1052 if context.get('defer_parent_store_computation'):
1053 self._parent_store_compute(cr)
1054 return position, 0, 0, 0
1056 def load(self, cr, uid, fields, data, context=None):
1058 Attempts to load the data matrix, and returns a list of ids (or
1059 ``False`` if there was an error and no id could be generated) and a
1062 The ids are those of the records created and saved (in database), in
1063 the same order they were extracted from the file. They can be passed
1064 directly to :meth:`~read`
1066 :param fields: list of fields to import, at the same index as the corresponding data
1067 :type fields: list(str)
1068 :param data: row-major matrix of data to import
1069 :type data: list(list(str))
1070 :param dict context:
1071 :returns: {ids: list(int)|False, messages: [Message]}
1073 cr.execute('SAVEPOINT model_load')
1076 fields = map(fix_import_export_id_paths, fields)
1077 ModelData = self.pool['ir.model.data'].clear_caches()
1079 fg = self.fields_get(cr, uid, context=context)
1086 for id, xid, record, info in self._convert_records(cr, uid,
1087 self._extract_records(cr, uid, fields, data,
1088 context=context, log=messages.append),
1089 context=context, log=messages.append):
1091 cr.execute('SAVEPOINT model_load_save')
1092 except psycopg2.InternalError, e:
1093 # broken transaction, exit and hope the source error was
1095 if not any(message['type'] == 'error' for message in messages):
1096 messages.append(dict(info, type='error',message=
1097 u"Unknown database error: '%s'" % e))
1100 ids.append(ModelData._update(cr, uid, self._name,
1101 current_module, record, mode=mode, xml_id=xid,
1102 noupdate=noupdate, res_id=id, context=context))
1103 cr.execute('RELEASE SAVEPOINT model_load_save')
1104 except psycopg2.Warning, e:
1105 messages.append(dict(info, type='warning', message=str(e)))
1106 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1107 except psycopg2.Error, e:
1108 messages.append(dict(
1110 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1111 # Failed to write, log to messages, rollback savepoint (to
1112 # avoid broken transaction) and keep going
1113 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1114 except Exception, e:
1115 message = (_('Unknown error during import:') +
1116 ' %s: %s' % (type(e), unicode(e)))
1117 moreinfo = _('Resolve other errors first')
1118 messages.append(dict(info, type='error',
1121 # Failed for some reason, perhaps due to invalid data supplied,
1122 # rollback savepoint and keep going
1123 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1124 if any(message['type'] == 'error' for message in messages):
1125 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1127 return {'ids': ids, 'messages': messages}
1129 def _extract_records(self, cr, uid, fields_, data,
1130 context=None, log=lambda a: None):
1131 """ Generates record dicts from the data sequence.
1133 The result is a generator of dicts mapping field names to raw
1134 (unconverted, unvalidated) values.
1136 For relational fields, if sub-fields were provided the value will be
1137 a list of sub-records
1139 The following sub-fields may be set on the record (by key):
1140 * None is the name_get for the record (to use with name_create/name_search)
1141 * "id" is the External ID for the record
1142 * ".id" is the Database ID for the record
1144 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1145 # Fake columns to avoid special cases in extractor
1146 columns[None] = fields.char('rec_name')
1147 columns['id'] = fields.char('External ID')
1148 columns['.id'] = fields.integer('Database ID')
1150 # m2o fields can't be on multiple lines so exclude them from the
1151 # is_relational field rows filter, but special-case it later on to
1152 # be handled with relational fields (as it can have subfields)
1153 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1154 get_o2m_values = itemgetter_tuple(
1155 [index for index, field in enumerate(fields_)
1156 if columns[field[0]]._type == 'one2many'])
1157 get_nono2m_values = itemgetter_tuple(
1158 [index for index, field in enumerate(fields_)
1159 if columns[field[0]]._type != 'one2many'])
1160 # Checks if the provided row has any non-empty non-relational field
1161 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1162 return any(g(row)) and not any(f(row))
1166 if index >= len(data): return
1169 # copy non-relational fields to record dict
1170 record = dict((field[0], value)
1171 for field, value in itertools.izip(fields_, row)
1172 if not is_relational(field[0]))
1174 # Get all following rows which have relational values attached to
1175 # the current record (no non-relational values)
1176 record_span = itertools.takewhile(
1177 only_o2m_values, itertools.islice(data, index + 1, None))
1178 # stitch record row back on for relational fields
1179 record_span = list(itertools.chain([row], record_span))
1180 for relfield in set(
1181 field[0] for field in fields_
1182 if is_relational(field[0])):
1183 column = columns[relfield]
1184 # FIXME: how to not use _obj without relying on fields_get?
1185 Model = self.pool[column._obj]
1187 # get only cells for this sub-field, should be strictly
1188 # non-empty, field path [None] is for name_get column
1189 indices, subfields = zip(*((index, field[1:] or [None])
1190 for index, field in enumerate(fields_)
1191 if field[0] == relfield))
1193 # return all rows which have at least one value for the
1194 # subfields of relfield
1195 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1196 record[relfield] = [subrecord
1197 for subrecord, _subinfo in Model._extract_records(
1198 cr, uid, subfields, relfield_data,
1199 context=context, log=log)]
1201 yield record, {'rows': {
1203 'to': index + len(record_span) - 1
1205 index += len(record_span)
1207 def _convert_records(self, cr, uid, records,
1208 context=None, log=lambda a: None):
1209 """ Converts records from the source iterable (recursive dicts of
1210 strings) into forms which can be written to the database (via
1211 self.create or (ir.model.data)._update)
1213 :returns: a list of triplets of (id, xid, record)
1214 :rtype: list((int|None, str|None, dict))
1216 if context is None: context = {}
1217 Converter = self.pool['ir.fields.converter']
1218 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1219 Translation = self.pool['ir.translation']
1221 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1222 context.get('lang'))
1224 for f, column in columns.iteritems())
1226 convert = Converter.for_model(cr, uid, self, context=context)
1228 def _log(base, field, exception):
1229 type = 'warning' if isinstance(exception, Warning) else 'error'
1230 # logs the logical (not human-readable) field name for automated
1231 # processing of response, but injects human readable in message
1232 record = dict(base, type=type, field=field,
1233 message=unicode(exception.args[0]) % base)
1234 if len(exception.args) > 1 and exception.args[1]:
1235 record.update(exception.args[1])
1238 stream = CountingStream(records)
1239 for record, extras in stream:
1242 # name_get/name_create
1243 if None in record: pass
1250 dbid = int(record['.id'])
1252 # in case of overridden id column
1253 dbid = record['.id']
1254 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1257 record=stream.index,
1259 message=_(u"Unknown database identifier '%s'") % dbid))
1262 converted = convert(record, lambda field, err:\
1263 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1265 yield dbid, xid, converted, dict(extras, record=stream.index)
1268 def _validate_fields(self, field_names):
1269 field_names = set(field_names)
1271 # old-style constraint methods
1272 trans = self.env['ir.translation']
1273 cr, uid, context = self.env.args
1276 for fun, msg, names in self._constraints:
1278 # validation must be context-independent; call `fun` without context
1279 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1281 except Exception, e:
1282 _logger.debug('Exception while validating constraint', exc_info=True)
1284 extra_error = tools.ustr(e)
1287 res_msg = msg(self._model, cr, uid, ids, context=context)
1288 if isinstance(res_msg, tuple):
1289 template, params = res_msg
1290 res_msg = template % params
1292 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1294 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1296 _("Field(s) `%s` failed against a constraint: %s") %
1297 (', '.join(names), res_msg)
1300 raise ValidationError('\n'.join(errors))
1302 # new-style constraint methods
1303 for check in self._constraint_methods:
1304 if set(check._constrains) & field_names:
1307 except ValidationError, e:
1309 except Exception, e:
1310 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1313 def default_get(self, fields_list):
1314 """ default_get(fields) -> default_values
1316 Return default values for the fields in `fields_list`. Default
1317 values are determined by the context, user defaults, and the model
1320 :param fields_list: a list of field names
1321 :return: a dictionary mapping each field name to its corresponding
1322 default value, if it has one.
1325 # trigger view init hook
1326 self.view_init(fields_list)
1329 parent_fields = defaultdict(list)
1331 for name in fields_list:
1332 # 1. look up context
1333 key = 'default_' + name
1334 if key in self._context:
1335 defaults[name] = self._context[key]
1338 # 2. look up ir_values
1339 # Note: performance is good, because get_defaults_dict is cached!
1340 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1341 if name in ir_values_dict:
1342 defaults[name] = ir_values_dict[name]
1345 field = self._fields.get(name)
1347 # 3. look up property fields
1348 # TODO: get rid of this one
1349 if field and field.company_dependent:
1350 defaults[name] = self.env['ir.property'].get(name, self._name)
1353 # 4. look up field.default
1354 if field and field.default:
1355 defaults[name] = field.default(self)
1358 # 5. delegate to parent model
1359 if field and field.inherited:
1360 field = field.related_field
1361 parent_fields[field.model_name].append(field.name)
1363 # convert default values to the right format
1364 defaults = self._convert_to_cache(defaults, validate=False)
1365 defaults = self._convert_to_write(defaults)
1367 # add default values for inherited fields
1368 for model, names in parent_fields.iteritems():
1369 defaults.update(self.env[model].default_get(names))
1373 def fields_get_keys(self, cr, user, context=None):
1374 res = self._columns.keys()
1375 # TODO I believe this loop can be replace by
1376 # res.extend(self._inherit_fields.key())
1377 for parent in self._inherits:
1378 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1381 def _rec_name_fallback(self, cr, uid, context=None):
1382 rec_name = self._rec_name
1383 if rec_name not in self._columns:
1384 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1388 # Overload this method if you need a window title which depends on the context
1390 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1393 def user_has_groups(self, cr, uid, groups, context=None):
1394 """Return true if the user is at least member of one of the groups
1395 in groups_str. Typically used to resolve `groups` attribute
1396 in view and model definitions.
1398 :param str groups: comma-separated list of fully-qualified group
1399 external IDs, e.g.: ``base.group_user,base.group_system``
1400 :return: True if the current user is a member of one of the
1403 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1404 for group_ext_id in groups.split(','))
1406 def _get_default_form_view(self, cr, user, context=None):
1407 """ Generates a default single-line form view using all fields
1408 of the current model except the m2m and o2m ones.
1410 :param cr: database cursor
1411 :param int user: user id
1412 :param dict context: connection context
1413 :returns: a form view as an lxml document
1414 :rtype: etree._Element
1416 view = etree.Element('form', string=self._description)
1417 group = etree.SubElement(view, 'group', col="4")
1418 for fname, field in self._fields.iteritems():
1419 if field.automatic or field.type in ('one2many', 'many2many'):
1422 etree.SubElement(group, 'field', name=fname)
1423 if field.type == 'text':
1424 etree.SubElement(group, 'newline')
1427 def _get_default_search_view(self, cr, user, context=None):
1428 """ Generates a single-field search view, based on _rec_name.
1430 :param cr: database cursor
1431 :param int user: user id
1432 :param dict context: connection context
1433 :returns: a tree view as an lxml document
1434 :rtype: etree._Element
1436 view = etree.Element('search', string=self._description)
1437 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1440 def _get_default_tree_view(self, cr, user, context=None):
1441 """ Generates a single-field tree view, based on _rec_name.
1443 :param cr: database cursor
1444 :param int user: user id
1445 :param dict context: connection context
1446 :returns: a tree view as an lxml document
1447 :rtype: etree._Element
1449 view = etree.Element('tree', string=self._description)
1450 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1453 def _get_default_calendar_view(self, cr, user, context=None):
1454 """ Generates a default calendar view by trying to infer
1455 calendar fields from a number of pre-set attribute names
1457 :param cr: database cursor
1458 :param int user: user id
1459 :param dict context: connection context
1460 :returns: a calendar view
1461 :rtype: etree._Element
1463 def set_first_of(seq, in_, to):
1464 """Sets the first value of `seq` also found in `in_` to
1465 the `to` attribute of the view being closed over.
1467 Returns whether it's found a suitable value (and set it on
1468 the attribute) or not
1476 view = etree.Element('calendar', string=self._description)
1477 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1479 if self._date_name not in self._columns:
1481 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1482 if dt in self._columns:
1483 self._date_name = dt
1488 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1489 view.set('date_start', self._date_name)
1491 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1492 self._columns, 'color')
1494 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1495 self._columns, 'date_stop'):
1496 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1497 self._columns, 'date_delay'):
1499 _('Invalid Object Architecture!'),
1500 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1504 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1505 """ fields_view_get([view_id | view_type='form'])
1507 Get the detailed composition of the requested view like fields, model, view architecture
1509 :param view_id: id of the view or None
1510 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1511 :param toolbar: true to include contextual actions
1512 :param submenu: deprecated
1513 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1514 :raise AttributeError:
1515 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1516 * if some tag other than 'position' is found in parent view
1517 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1521 View = self.pool['ir.ui.view']
1524 'model': self._name,
1525 'field_parent': False,
1528 # try to find a view_id if none provided
1530 # <view_type>_view_ref in context can be used to overrride the default view
1531 view_ref_key = view_type + '_view_ref'
1532 view_ref = context.get(view_ref_key)
1535 module, view_ref = view_ref.split('.', 1)
1536 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1537 view_ref_res = cr.fetchone()
1539 view_id = view_ref_res[0]
1541 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1542 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1546 # otherwise try to find the lowest priority matching ir.ui.view
1547 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1549 # context for post-processing might be overriden
1552 # read the view with inherited views applied
1553 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1554 result['arch'] = root_view['arch']
1555 result['name'] = root_view['name']
1556 result['type'] = root_view['type']
1557 result['view_id'] = root_view['id']
1558 result['field_parent'] = root_view['field_parent']
1559 # override context fro postprocessing
1560 if root_view.get('model') != self._name:
1561 ctx = dict(context, base_model_name=root_view.get('model'))
1563 # fallback on default views methods if no ir.ui.view could be found
1565 get_func = getattr(self, '_get_default_%s_view' % view_type)
1566 arch_etree = get_func(cr, uid, context)
1567 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1568 result['type'] = view_type
1569 result['name'] = 'default'
1570 except AttributeError:
1571 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1573 # Apply post processing, groups and modifiers etc...
1574 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1575 result['arch'] = xarch
1576 result['fields'] = xfields
1578 # Add related action information if aksed
1580 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1586 ir_values_obj = self.pool.get('ir.values')
1587 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1588 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1589 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1590 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1591 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1592 #When multi="True" set it will display only in More of the list view
1593 resrelate = [clean(action) for action in resrelate
1594 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1596 for x in itertools.chain(resprint, resaction, resrelate):
1597 x['string'] = x['name']
1599 result['toolbar'] = {
1601 'action': resaction,
1606 def get_formview_id(self, cr, uid, id, context=None):
1607 """ Return an view id to open the document with. This method is meant to be
1608 overridden in addons that want to give specific view ids for example.
1610 :param int id: id of the document to open
1614 def get_formview_action(self, cr, uid, id, context=None):
1615 """ Return an action to open the document. This method is meant to be
1616 overridden in addons that want to give specific view ids for example.
1618 :param int id: id of the document to open
1620 view_id = self.get_formview_id(cr, uid, id, context=context)
1622 'type': 'ir.actions.act_window',
1623 'res_model': self._name,
1624 'view_type': 'form',
1625 'view_mode': 'form',
1626 'views': [(view_id, 'form')],
1627 'target': 'current',
1631 def get_access_action(self, cr, uid, id, context=None):
1632 """ Return an action to open the document. This method is meant to be
1633 overridden in addons that want to give specific access to the document.
1634 By default it opens the formview of the document.
1636 :paramt int id: id of the document to open
1638 return self.get_formview_action(cr, uid, id, context=context)
1640 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1641 return self.pool['ir.ui.view'].postprocess_and_fields(
1642 cr, uid, self._name, node, view_id, context=context)
1644 def search_count(self, cr, user, args, context=None):
1645 """ search_count(args) -> int
1647 Returns the number of records in the current model matching :ref:`the
1648 provided domain <reference/orm/domains>`.
1650 res = self.search(cr, user, args, context=context, count=True)
1651 if isinstance(res, list):
1655 @api.returns('self')
1656 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1657 """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1659 Searches for records based on the ``args``
1660 :ref:`search domain <reference/orm/domains>`.
1662 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1663 list to match all records.
1664 :param int offset: number of results to ignore (default: none)
1665 :param int limit: maximum number of records to return (default: all)
1666 :param str order: sort string
1667 :param bool count: if ``True``, the call should return the number of
1668 records matching ``args`` rather than the records
1670 :returns: at most ``limit`` records matching the search criteria
1672 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1674 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1677 # display_name, name_get, name_create, name_search
1680 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1681 def _compute_display_name(self):
1682 names = dict(self.name_get())
1684 record.display_name = names.get(record.id, False)
1688 """ name_get() -> [(id, name), ...]
1690 Returns a textual representation for the records in ``self``.
1691 By default this is the value of the ``display_name`` field.
1693 :return: list of pairs ``(id, text_repr)`` for each records
1697 name = self._rec_name
1698 if name in self._fields:
1699 convert = self._fields[name].convert_to_display_name
1701 result.append((record.id, convert(record[name])))
1704 result.append((record.id, "%s,%s" % (record._name, record.id)))
1709 def name_create(self, name):
1710 """ name_create(name) -> record
1712 Create a new record by calling :meth:`~.create` with only one value
1713 provided: the display name of the new record.
1715 The new record will be initialized with any default values
1716 applicable to this model, or provided through the context. The usual
1717 behavior of :meth:`~.create` applies.
1719 :param name: display name of the record to create
1721 :return: the :meth:`~.name_get` pair value of the created record
1724 record = self.create({self._rec_name: name})
1725 return record.name_get()[0]
1727 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1731 def name_search(self, name='', args=None, operator='ilike', limit=100):
1732 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1734 Search for records that have a display name matching the given
1735 `name` pattern when compared with the given `operator`, while also
1736 matching the optional search domain (`args`).
1738 This is used for example to provide suggestions based on a partial
1739 value for a relational field. Sometimes be seen as the inverse
1740 function of :meth:`~.name_get`, but it is not guaranteed to be.
1742 This method is equivalent to calling :meth:`~.search` with a search
1743 domain based on ``display_name`` and then :meth:`~.name_get` on the
1744 result of the search.
1746 :param str name: the name pattern to match
1747 :param list args: optional search domain (see :meth:`~.search` for
1748 syntax), specifying further restrictions
1749 :param str operator: domain operator for matching `name`, such as
1750 ``'like'`` or ``'='``.
1751 :param int limit: optional max number of records to return
1753 :return: list of pairs ``(id, text_repr)`` for all matching records.
1755 return self._name_search(name, args, operator, limit=limit)
1757 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1758 # private implementation of name_search, allows passing a dedicated user
1759 # for the name_get part to solve some access rights issues
1760 args = list(args or [])
1761 # optimize out the default criterion of ``ilike ''`` that matches everything
1762 if not self._rec_name:
1763 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1764 elif not (name == '' and operator == 'ilike'):
1765 args += [(self._rec_name, operator, name)]
1766 access_rights_uid = name_get_uid or user
1767 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1768 res = self.name_get(cr, access_rights_uid, ids, context)
1771 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1774 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1776 fields = self._columns.keys() + self._inherit_fields.keys()
1777 #FIXME: collect all calls to _get_source into one SQL call.
1779 res[lang] = {'code': lang}
1781 if f in self._columns:
1782 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1784 res[lang][f] = res_trans
1786 res[lang][f] = self._columns[f].string
1787 for table in self._inherits:
1788 cols = intersect(self._inherit_fields.keys(), fields)
1789 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1792 res[lang]['code'] = lang
1793 for f in res2[lang]:
1794 res[lang][f] = res2[lang][f]
1797 def write_string(self, cr, uid, id, langs, vals, context=None):
1798 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1799 #FIXME: try to only call the translation in one SQL
1802 if field in self._columns:
1803 src = self._columns[field].string
1804 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1805 for table in self._inherits:
1806 cols = intersect(self._inherit_fields.keys(), vals)
1808 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1811 def _add_missing_default_values(self, cr, uid, values, context=None):
1812 # avoid overriding inherited values when parent is set
1814 for tables, parent_field in self._inherits.items():
1815 if parent_field in values:
1816 avoid_tables.append(tables)
1818 # compute missing fields
1819 missing_defaults = set()
1820 for field in self._columns.keys():
1821 if not field in values:
1822 missing_defaults.add(field)
1823 for field in self._inherit_fields.keys():
1824 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1825 missing_defaults.add(field)
1826 # discard magic fields
1827 missing_defaults -= set(MAGIC_COLUMNS)
1829 if missing_defaults:
1830 # override defaults with the provided values, never allow the other way around
1831 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1833 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1834 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1835 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1836 defaults[dv] = [(6, 0, defaults[dv])]
1837 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1838 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1839 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1840 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1841 defaults.update(values)
1845 def clear_caches(self):
1846 """ Clear the caches
1848 This clears the caches associated to methods decorated with
1849 ``tools.ormcache`` or ``tools.ormcache_multi``.
1852 self._ormcache.clear()
1853 self.pool._any_cache_cleared = True
1854 except AttributeError:
1858 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1859 aggregated_fields, count_field,
1860 read_group_result, read_group_order=None, context=None):
1861 """Helper method for filling in empty groups for all possible values of
1862 the field being grouped by"""
1864 # self._group_by_full should map groupable fields to a method that returns
1865 # a list of all aggregated values that we want to display for this field,
1866 # in the form of a m2o-like pair (key,label).
1867 # This is useful to implement kanban views for instance, where all columns
1868 # should be displayed even if they don't contain any record.
1870 # Grab the list of all groups that should be displayed, including all present groups
1871 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1872 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1873 read_group_order=read_group_order,
1874 access_rights_uid=openerp.SUPERUSER_ID,
1877 result_template = dict.fromkeys(aggregated_fields, False)
1878 result_template[groupby + '_count'] = 0
1879 if remaining_groupbys:
1880 result_template['__context'] = {'group_by': remaining_groupbys}
1882 # Merge the left_side (current results as dicts) with the right_side (all
1883 # possible values as m2o pairs). Both lists are supposed to be using the
1884 # same ordering, and can be merged in one pass.
1887 def append_left(left_side):
1888 grouped_value = left_side[groupby] and left_side[groupby][0]
1889 if not grouped_value in known_values:
1890 result.append(left_side)
1891 known_values[grouped_value] = left_side
1893 known_values[grouped_value].update({count_field: left_side[count_field]})
1894 def append_right(right_side):
1895 grouped_value = right_side[0]
1896 if not grouped_value in known_values:
1897 line = dict(result_template)
1898 line[groupby] = right_side
1899 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1901 known_values[grouped_value] = line
1902 while read_group_result or all_groups:
1903 left_side = read_group_result[0] if read_group_result else None
1904 right_side = all_groups[0] if all_groups else None
1905 assert left_side is None or left_side[groupby] is False \
1906 or isinstance(left_side[groupby], (tuple,list)), \
1907 'M2O-like pair expected, got %r' % left_side[groupby]
1908 assert right_side is None or isinstance(right_side, (tuple,list)), \
1909 'M2O-like pair expected, got %r' % right_side
1910 if left_side is None:
1911 append_right(all_groups.pop(0))
1912 elif right_side is None:
1913 append_left(read_group_result.pop(0))
1914 elif left_side[groupby] == right_side:
1915 append_left(read_group_result.pop(0))
1916 all_groups.pop(0) # discard right_side
1917 elif not left_side[groupby] or not left_side[groupby][0]:
1918 # left side == "Undefined" entry, not present on right_side
1919 append_left(read_group_result.pop(0))
1921 append_right(all_groups.pop(0))
1925 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1928 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1930 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1931 to the query if order should be computed against m2o field.
1932 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1933 :param aggregated_fields: list of aggregated fields in the query
1934 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1935 These dictionaries contains the qualified name of each groupby
1936 (fully qualified SQL name for the corresponding field),
1937 and the (non raw) field name.
1938 :param osv.Query query: the query under construction
1939 :return: (groupby_terms, orderby_terms)
1942 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1943 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1945 return groupby_terms, orderby_terms
1947 self._check_qorder(orderby)
1948 for order_part in orderby.split(','):
1949 order_split = order_part.split()
1950 order_field = order_split[0]
1951 if order_field in groupby_fields:
1953 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1954 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1956 orderby_terms.append(order_clause)
1957 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1959 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1960 orderby_terms.append(order)
1961 elif order_field in aggregated_fields:
1962 orderby_terms.append(order_part)
1964 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1965 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1966 self._name, order_part)
1967 return groupby_terms, orderby_terms
1969 def _read_group_process_groupby(self, gb, query, context):
1971 Helper method to collect important information about groupbys: raw
1972 field name, type, time informations, qualified name, ...
1974 split = gb.split(':')
1975 field_type = self._all_columns[split[0]].column._type
1976 gb_function = split[1] if len(split) == 2 else None
1977 temporal = field_type in ('date', 'datetime')
1978 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1979 qualified_field = self._inherits_join_calc(split[0], query)
1982 # Careful with week/year formats:
1983 # - yyyy (lower) must always be used, *except* for week+year formats
1984 # - YYYY (upper) must always be used for week+year format
1985 # e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
1986 # and W1 2006 for others
1988 # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
1989 # such as 2006-01-01 being formatted as "January 2005" in some locales.
1990 # Cfr: http://babel.pocoo.org/docs/dates/#date-fields
1991 'day': 'dd MMM yyyy', # yyyy = normal year
1992 'week': "'W'w YYYY", # w YYYY = ISO week-year
1993 'month': 'MMMM yyyy',
1994 'quarter': 'QQQ yyyy',
1998 'day': dateutil.relativedelta.relativedelta(days=1),
1999 'week': datetime.timedelta(days=7),
2000 'month': dateutil.relativedelta.relativedelta(months=1),
2001 'quarter': dateutil.relativedelta.relativedelta(months=3),
2002 'year': dateutil.relativedelta.relativedelta(years=1)
2005 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
2006 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
2007 if field_type == 'boolean':
2008 qualified_field = "coalesce(%s,false)" % qualified_field
2013 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2014 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2015 'tz_convert': tz_convert,
2016 'qualified_field': qualified_field
2019 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2021 Helper method to sanitize the data received by read_group. The None
2022 values are converted to False, and the date/datetime are formatted,
2023 and corrected according to the timezones.
2025 value = False if value is None else value
2026 gb = groupby_dict.get(key)
2027 if gb and gb['type'] in ('date', 'datetime') and value:
2028 if isinstance(value, basestring):
2029 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2030 value = datetime.datetime.strptime(value, dt_format)
2031 if gb['tz_convert']:
2032 value = pytz.timezone(context['tz']).localize(value)
2035 def _read_group_get_domain(self, groupby, value):
2037 Helper method to construct the domain corresponding to a groupby and
2038 a given value. This is mostly relevant for date/datetime.
2040 if groupby['type'] in ('date', 'datetime') and value:
2041 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2042 domain_dt_begin = value
2043 domain_dt_end = value + groupby['interval']
2044 if groupby['tz_convert']:
2045 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2046 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2047 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2048 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2049 if groupby['type'] == 'many2one' and value:
2051 return [(groupby['field'], '=', value)]
2053 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2055 Helper method to format the data contained in the dictianary data by
2056 adding the domain corresponding to its values, the groupbys in the
2057 context and by properly formatting the date/datetime values.
2059 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2060 for k,v in data.iteritems():
2061 gb = groupby_dict.get(k)
2062 if gb and gb['type'] in ('date', 'datetime') and v:
2063 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2065 data['__domain'] = domain_group + domain
2066 if len(groupby) - len(annotated_groupbys) >= 1:
2067 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2071 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2073 Get the list of records in list view grouped by the given ``groupby`` fields
2075 :param cr: database cursor
2076 :param uid: current user id
2077 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2078 :param list fields: list of fields present in the list view specified on the object
2079 :param list groupby: list of groupby descriptions by which the records will be grouped.
2080 A groupby description is either a field (then it will be grouped by that field)
2081 or a string 'field:groupby_function'. Right now, the only functions supported
2082 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2083 date/datetime fields.
2084 :param int offset: optional number of records to skip
2085 :param int limit: optional max number of records to return
2086 :param dict context: context arguments, like lang, time zone.
2087 :param list orderby: optional ``order by`` specification, for
2088 overriding the natural sort ordering of the
2089 groups, see also :py:meth:`~osv.osv.osv.search`
2090 (supported only for many2one fields currently)
2091 :param bool lazy: if true, the results are only grouped by the first groupby and the
2092 remaining groupbys are put in the __context key. If false, all the groupbys are
2094 :return: list of dictionaries(one dictionary for each record) containing:
2096 * the values of fields grouped by the fields in ``groupby`` argument
2097 * __domain: list of tuples specifying the search criteria
2098 * __context: dictionary with argument like ``groupby``
2099 :rtype: [{'field_name_1': value, ...]
2100 :raise AccessError: * if user has no read rights on the requested object
2101 * if user tries to bypass access rules for read on the requested object
2105 self.check_access_rights(cr, uid, 'read')
2106 query = self._where_calc(cr, uid, domain, context=context)
2107 fields = fields or self._columns.keys()
2109 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2110 groupby_list = groupby[:1] if lazy else groupby
2111 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2112 for gb in groupby_list]
2113 groupby_fields = [g['field'] for g in annotated_groupbys]
2114 order = orderby or ','.join([g for g in groupby_list])
2115 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2117 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2118 for gb in groupby_fields:
2119 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2120 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2121 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2122 if not (gb in self._all_columns):
2123 # Don't allow arbitrary values, as this would be a SQL injection vector!
2124 raise except_orm(_('Invalid group_by'),
2125 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2127 aggregated_fields = [
2129 if f not in ('id', 'sequence')
2130 if f not in groupby_fields
2131 if f in self._all_columns
2132 if self._all_columns[f].column._type in ('integer', 'float')
2133 if getattr(self._all_columns[f].column, '_classic_write')]
2135 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2136 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2138 for gb in annotated_groupbys:
2139 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2141 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2142 from_clause, where_clause, where_clause_params = query.get_sql()
2143 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2144 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2147 count_field += '_count'
2149 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2150 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2153 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2161 'table': self._table,
2162 'count_field': count_field,
2163 'extra_fields': prefix_terms(',', select_terms),
2164 'from': from_clause,
2165 'where': prefix_term('WHERE', where_clause),
2166 'groupby': prefix_terms('GROUP BY', groupby_terms),
2167 'orderby': prefix_terms('ORDER BY', orderby_terms),
2168 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2169 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2171 cr.execute(query, where_clause_params)
2172 fetched_data = cr.dictfetchall()
2174 if not groupby_fields:
2177 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2179 data_ids = [r['id'] for r in fetched_data]
2180 many2onefields = list(set(many2onefields))
2181 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2182 for d in fetched_data:
2183 d.update(data_dict[d['id']])
2185 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2186 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2187 if lazy and groupby_fields[0] in self._group_by_full:
2188 # Right now, read_group only fill results in lazy mode (by default).
2189 # If you need to have the empty groups in 'eager' mode, then the
2190 # method _read_group_fill_results need to be completely reimplemented
2192 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2193 aggregated_fields, count_field, result, read_group_order=order,
2197 def _inherits_join_add(self, current_model, parent_model_name, query):
2199 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2200 :param current_model: current model object
2201 :param parent_model_name: name of the parent model for which the clauses should be added
2202 :param query: query object on which the JOIN should be added
2204 inherits_field = current_model._inherits[parent_model_name]
2205 parent_model = self.pool[parent_model_name]
2206 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2209 def _inherits_join_calc(self, field, query):
2211 Adds missing table select and join clause(s) to ``query`` for reaching
2212 the field coming from an '_inherits' parent table (no duplicates).
2214 :param field: name of inherited field to reach
2215 :param query: query object on which the JOIN should be added
2216 :return: qualified name of field, to be used in SELECT clause
2218 current_table = self
2219 parent_alias = '"%s"' % current_table._table
2220 while field in current_table._inherit_fields and not field in current_table._columns:
2221 parent_model_name = current_table._inherit_fields[field][0]
2222 parent_table = self.pool[parent_model_name]
2223 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2224 current_table = parent_table
2225 return '%s."%s"' % (parent_alias, field)
2227 def _parent_store_compute(self, cr):
2228 if not self._parent_store:
2230 _logger.info('Computing parent left and right for table %s...', self._table)
2231 def browse_rec(root, pos=0):
2233 where = self._parent_name+'='+str(root)
2235 where = self._parent_name+' IS NULL'
2236 if self._parent_order:
2237 where += ' order by '+self._parent_order
2238 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2240 for id in cr.fetchall():
2241 pos2 = browse_rec(id[0], pos2)
2242 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2244 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2245 if self._parent_order:
2246 query += ' order by ' + self._parent_order
2249 for (root,) in cr.fetchall():
2250 pos = browse_rec(root, pos)
2251 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2254 def _update_store(self, cr, f, k):
2255 _logger.info("storing computed values of fields.function '%s'", k)
2256 ss = self._columns[k]._symbol_set
2257 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2258 cr.execute('select id from '+self._table)
2259 ids_lst = map(lambda x: x[0], cr.fetchall())
2261 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2262 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2263 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2264 for key, val in res.items():
2267 # if val is a many2one, just write the ID
2268 if type(val) == tuple:
2270 if val is not False:
2271 cr.execute(update_query, (ss[1](val), key))
2274 def _check_selection_field_value(self, field, value):
2275 """ Check whether value is among the valid values for the given
2276 selection/reference field, and raise an exception if not.
2278 field = self._fields[field]
2279 field.convert_to_cache(value, self)
2281 def _check_removed_columns(self, cr, log=False):
2282 # iterate on the database columns to drop the NOT NULL constraints
2283 # of fields which were required but have been removed (or will be added by another module)
2284 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2285 columns += MAGIC_COLUMNS
2286 cr.execute("SELECT a.attname, a.attnotnull"
2287 " FROM pg_class c, pg_attribute a"
2288 " WHERE c.relname=%s"
2289 " AND c.oid=a.attrelid"
2290 " AND a.attisdropped=%s"
2291 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2292 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2294 for column in cr.dictfetchall():
2296 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2297 column['attname'], self._table, self._name)
2298 if column['attnotnull']:
2299 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2300 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2301 self._table, column['attname'])
2303 def _save_constraint(self, cr, constraint_name, type):
2305 Record the creation of a constraint for this model, to make it possible
2306 to delete it later when the module is uninstalled. Type can be either
2307 'f' or 'u' depending on the constraint being a foreign key or not.
2309 if not self._module:
2310 # no need to save constraints for custom models as they're not part
2313 assert type in ('f', 'u')
2315 SELECT 1 FROM ir_model_constraint, ir_module_module
2316 WHERE ir_model_constraint.module=ir_module_module.id
2317 AND ir_model_constraint.name=%s
2318 AND ir_module_module.name=%s
2319 """, (constraint_name, self._module))
2322 INSERT INTO ir_model_constraint
2323 (name, date_init, date_update, module, model, type)
2324 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2325 (SELECT id FROM ir_module_module WHERE name=%s),
2326 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2327 (constraint_name, self._module, self._name, type))
2329 def _save_relation_table(self, cr, relation_table):
2331 Record the creation of a many2many for this model, to make it possible
2332 to delete it later when the module is uninstalled.
2335 SELECT 1 FROM ir_model_relation, ir_module_module
2336 WHERE ir_model_relation.module=ir_module_module.id
2337 AND ir_model_relation.name=%s
2338 AND ir_module_module.name=%s
2339 """, (relation_table, self._module))
2341 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2342 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2343 (SELECT id FROM ir_module_module WHERE name=%s),
2344 (SELECT id FROM ir_model WHERE model=%s))""",
2345 (relation_table, self._module, self._name))
2346 self.invalidate_cache(cr, SUPERUSER_ID)
2348 # checked version: for direct m2o starting from `self`
2349 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2350 assert self.is_transient() or not dest_model.is_transient(), \
2351 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2352 if self.is_transient() and not dest_model.is_transient():
2353 # TransientModel relationships to regular Models are annoying
2354 # usually because they could block deletion due to the FKs.
2355 # So unless stated otherwise we default them to ondelete=cascade.
2356 ondelete = ondelete or 'cascade'
2357 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2358 self._foreign_keys.add(fk_def)
2359 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2361 # unchecked version: for custom cases, such as m2m relationships
2362 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2363 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2364 self._foreign_keys.add(fk_def)
2365 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2367 def _drop_constraint(self, cr, source_table, constraint_name):
2368 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2370 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2371 # Find FK constraint(s) currently established for the m2o field,
2372 # and see whether they are stale or not
2373 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2374 cl2.relname as foreign_table
2375 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2376 pg_attribute as att1, pg_attribute as att2
2377 WHERE con.conrelid = cl1.oid
2378 AND cl1.relname = %s
2379 AND con.confrelid = cl2.oid
2380 AND array_lower(con.conkey, 1) = 1
2381 AND con.conkey[1] = att1.attnum
2382 AND att1.attrelid = cl1.oid
2383 AND att1.attname = %s
2384 AND array_lower(con.confkey, 1) = 1
2385 AND con.confkey[1] = att2.attnum
2386 AND att2.attrelid = cl2.oid
2387 AND att2.attname = %s
2388 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2389 constraints = cr.dictfetchall()
2391 if len(constraints) == 1:
2392 # Is it the right constraint?
2394 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2395 or cons['foreign_table'] != dest_model._table:
2396 # Wrong FK: drop it and recreate
2397 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2398 source_table, cons['constraint_name'])
2399 self._drop_constraint(cr, source_table, cons['constraint_name'])
2401 # it's all good, nothing to do!
2404 # Multiple FKs found for the same field, drop them all, and re-create
2405 for cons in constraints:
2406 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2407 source_table, cons['constraint_name'])
2408 self._drop_constraint(cr, source_table, cons['constraint_name'])
2410 # (re-)create the FK
2411 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2414 def _set_default_value_on_column(self, cr, column_name, context=None):
2415 # ideally, we should use default_get(), but it fails due to ir.values
2419 default = self._defaults.get(column_name)
2420 if callable(default):
2421 default = default(self, cr, SUPERUSER_ID, context)
2423 column = self._columns[column_name]
2424 ss = column._symbol_set
2425 db_default = ss[1](default)
2426 # Write default if non-NULL, except for booleans for which False means
2427 # the same as NULL - this saves us an expensive query on large tables.
2428 write_default = (db_default is not None if column._type != 'boolean'
2431 _logger.debug("Table '%s': setting default value of new column %s to %r",
2432 self._table, column_name, default)
2433 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2434 self._table, column_name, ss[0], column_name)
2435 cr.execute(query, (db_default,))
2436 # this is a disgrace
2439 def _auto_init(self, cr, context=None):
2442 Call _field_create and, unless _auto is False:
2444 - create the corresponding table in database for the model,
2445 - possibly add the parent columns in database,
2446 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2447 'write_date' in database if _log_access is True (the default),
2448 - report on database columns no more existing in _columns,
2449 - remove no more existing not null constraints,
2450 - alter existing database columns to match _columns,
2451 - create database tables to match _columns,
2452 - add database indices to match _columns,
2453 - save in self._foreign_keys a list a foreign keys to create (see
2457 self._foreign_keys = set()
2458 raise_on_invalid_object_name(self._name)
2461 store_compute = False
2462 stored_fields = [] # new-style stored fields with compute
2464 update_custom_fields = context.get('update_custom_fields', False)
2465 self._field_create(cr, context=context)
2466 create = not self._table_exist(cr)
2470 self._create_table(cr)
2473 cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
2474 has_rows = cr.rowcount
2477 if self._parent_store:
2478 if not self._parent_columns_exist(cr):
2479 self._create_parent_columns(cr)
2480 store_compute = True
2482 self._check_removed_columns(cr, log=False)
2484 # iterate on the "object columns"
2485 column_data = self._select_column_data(cr)
2487 for k, f in self._columns.iteritems():
2488 if k == 'id': # FIXME: maybe id should be a regular column?
2490 # Don't update custom (also called manual) fields
2491 if f.manual and not update_custom_fields:
2494 if isinstance(f, fields.one2many):
2495 self._o2m_raise_on_missing_reference(cr, f)
2497 elif isinstance(f, fields.many2many):
2498 self._m2m_raise_or_create_relation(cr, f)
2501 res = column_data.get(k)
2503 # The field is not found as-is in database, try if it
2504 # exists with an old name.
2505 if not res and hasattr(f, 'oldname'):
2506 res = column_data.get(f.oldname)
2508 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2510 column_data[k] = res
2511 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2512 self._table, f.oldname, k)
2514 # The field already exists in database. Possibly
2515 # change its type, rename it, drop it or change its
2518 f_pg_type = res['typname']
2519 f_pg_size = res['size']
2520 f_pg_notnull = res['attnotnull']
2521 if isinstance(f, fields.function) and not f.store and\
2522 not getattr(f, 'nodrop', False):
2523 _logger.info('column %s (%s) converted to a function, removed from table %s',
2524 k, f.string, self._table)
2525 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2527 _schema.debug("Table '%s': dropped column '%s' with cascade",
2531 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2536 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2537 ('varchar', 'text', 'TEXT', ''),
2538 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2539 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2540 ('timestamp', 'date', 'date', '::date'),
2541 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2542 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2544 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2546 with cr.savepoint():
2547 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2548 except psycopg2.NotSupportedError:
2549 # In place alter table cannot be done because a view is depending of this field.
2550 # Do a manual copy. This will drop the view (that will be recreated later)
2551 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2552 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2553 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2554 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2556 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2557 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2559 if (f_pg_type==c[0]) and (f._type==c[1]):
2560 if f_pg_type != f_obj_type:
2562 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2563 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2564 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2565 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2567 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2568 self._table, k, c[0], c[1])
2571 if f_pg_type != f_obj_type:
2575 newname = k + '_moved' + str(i)
2576 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2577 "WHERE c.relname=%s " \
2578 "AND a.attname=%s " \
2579 "AND c.oid=a.attrelid ", (self._table, newname))
2580 if not cr.fetchone()[0]:
2584 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2585 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2586 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2587 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2588 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2589 self._table, k, f_pg_type, f._type, newname)
2591 # if the field is required and hasn't got a NOT NULL constraint
2592 if f.required and f_pg_notnull == 0:
2594 self._set_default_value_on_column(cr, k, context=context)
2595 # add the NOT NULL constraint
2597 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2599 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2602 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2603 "If you want to have it, you should update the records and execute manually:\n"\
2604 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2605 _schema.warning(msg, self._table, k, self._table, k)
2607 elif not f.required and f_pg_notnull == 1:
2608 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2610 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2613 indexname = '%s_%s_index' % (self._table, k)
2614 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2615 res2 = cr.dictfetchall()
2616 if not res2 and f.select:
2617 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2619 if f._type == 'text':
2620 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2621 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2622 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2623 " because there is a length limit for indexable btree values!\n"\
2624 "Use a search view instead if you simply want to make the field searchable."
2625 _schema.warning(msg, self._table, f._type, k)
2626 if res2 and not f.select:
2627 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2629 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2630 _schema.debug(msg, self._table, k, f._type)
2632 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2633 dest_model = self.pool[f._obj]
2634 if dest_model._auto and dest_model._table != 'ir_actions':
2635 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2637 # The field doesn't exist in database. Create it if necessary.
2639 if not isinstance(f, fields.function) or f.store:
2640 # add the missing field
2641 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2642 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2643 _schema.debug("Table '%s': added column '%s' with definition=%s",
2644 self._table, k, get_pg_type(f)[1])
2648 self._set_default_value_on_column(cr, k, context=context)
2650 # remember the functions to call for the stored fields
2651 if isinstance(f, fields.function):
2653 if f.store is not True: # i.e. if f.store is a dict
2654 order = f.store[f.store.keys()[0]][2]
2655 todo_end.append((order, self._update_store, (f, k)))
2657 # remember new-style stored fields with compute method
2658 if k in self._fields and self._fields[k].depends:
2659 stored_fields.append(self._fields[k])
2661 # and add constraints if needed
2662 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2663 if f._obj not in self.pool:
2664 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2665 dest_model = self.pool[f._obj]
2666 ref = dest_model._table
2667 # ir_actions is inherited so foreign key doesn't work on it
2668 if dest_model._auto and ref != 'ir_actions':
2669 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2671 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2675 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2676 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2679 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2680 "Try to re-run: openerp-server --update=module\n"\
2681 "If it doesn't work, update records and execute manually:\n"\
2682 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2683 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2687 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2688 create = not bool(cr.fetchone())
2690 cr.commit() # start a new transaction
2693 self._add_sql_constraints(cr)
2696 self._execute_sql(cr)
2699 self._parent_store_compute(cr)
2703 # trigger computation of new-style stored fields with a compute
2705 _logger.info("Storing computed values of %s fields %s",
2706 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2707 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2708 recs = recs.search([])
2710 map(recs._recompute_todo, stored_fields)
2713 todo_end.append((1000, func, ()))
2717 def _auto_end(self, cr, context=None):
2718 """ Create the foreign keys recorded by _auto_init. """
2719 for t, k, r, d in self._foreign_keys:
2720 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2721 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2723 del self._foreign_keys
2726 def _table_exist(self, cr):
2727 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2731 def _create_table(self, cr):
2732 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2733 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2734 _schema.debug("Table '%s': created", self._table)
2737 def _parent_columns_exist(self, cr):
2738 cr.execute("""SELECT c.relname
2739 FROM pg_class c, pg_attribute a
2740 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2741 """, (self._table, 'parent_left'))
2745 def _create_parent_columns(self, cr):
2746 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2747 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2748 if 'parent_left' not in self._columns:
2749 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2751 _schema.debug("Table '%s': added column '%s' with definition=%s",
2752 self._table, 'parent_left', 'INTEGER')
2753 elif not self._columns['parent_left'].select:
2754 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2756 if 'parent_right' not in self._columns:
2757 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2759 _schema.debug("Table '%s': added column '%s' with definition=%s",
2760 self._table, 'parent_right', 'INTEGER')
2761 elif not self._columns['parent_right'].select:
2762 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2764 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2765 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2766 self._parent_name, self._name)
2771 def _select_column_data(self, cr):
2772 # attlen is the number of bytes necessary to represent the type when
2773 # the type has a fixed size. If the type has a varying size attlen is
2774 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2775 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2776 "FROM pg_class c,pg_attribute a,pg_type t " \
2777 "WHERE c.relname=%s " \
2778 "AND c.oid=a.attrelid " \
2779 "AND a.atttypid=t.oid", (self._table,))
2780 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2783 def _o2m_raise_on_missing_reference(self, cr, f):
2784 # TODO this check should be a method on fields.one2many.
2785 if f._obj in self.pool:
2786 other = self.pool[f._obj]
2787 # TODO the condition could use fields_get_keys().
2788 if f._fields_id not in other._columns.keys():
2789 if f._fields_id not in other._inherit_fields.keys():
2790 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2792 def _m2m_raise_or_create_relation(self, cr, f):
2793 m2m_tbl, col1, col2 = f._sql_names(self)
2794 # do not create relations for custom fields as they do not belong to a module
2795 # they will be automatically removed when dropping the corresponding ir.model.field
2796 # table name for custom relation all starts with x_, see __init__
2797 if not m2m_tbl.startswith('x_'):
2798 self._save_relation_table(cr, m2m_tbl)
2799 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2800 if not cr.dictfetchall():
2801 if f._obj not in self.pool:
2802 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2803 dest_model = self.pool[f._obj]
2804 ref = dest_model._table
2805 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2806 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2807 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2808 if not cr.fetchall():
2809 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2810 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2811 if not cr.fetchall():
2812 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2814 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2815 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2816 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2818 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2821 def _add_sql_constraints(self, cr):
2824 Modify this model's database table constraints so they match the one in
2828 def unify_cons_text(txt):
2829 return txt.lower().replace(', ',',').replace(' (','(')
2831 for (key, con, _) in self._sql_constraints:
2832 conname = '%s_%s' % (self._table, key)
2834 self._save_constraint(cr, conname, 'u')
2835 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2836 existing_constraints = cr.dictfetchall()
2840 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2841 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2842 self._table, conname, con),
2843 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2848 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2849 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2850 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2856 if not existing_constraints:
2857 # constraint does not exists:
2858 sql_actions['add']['execute'] = True
2859 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2860 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2861 # constraint exists but its definition has changed:
2862 sql_actions['drop']['execute'] = True
2863 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2864 sql_actions['add']['execute'] = True
2865 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2867 # we need to add the constraint:
2868 sql_actions = [item for item in sql_actions.values()]
2869 sql_actions.sort(key=lambda x: x['order'])
2870 for sql_action in [action for action in sql_actions if action['execute']]:
2872 cr.execute(sql_action['query'])
2874 _schema.debug(sql_action['msg_ok'])
2876 _schema.warning(sql_action['msg_err'])
2880 def _execute_sql(self, cr):
2881 """ Execute the SQL code from the _sql attribute (if any)."""
2882 if hasattr(self, "_sql"):
2883 for line in self._sql.split(';'):
2884 line2 = line.replace('\n', '').strip()
2890 # Update objects that uses this one to update their _inherits fields
2894 def _inherits_reload_src(cls):
2895 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2896 for model in cls.pool.values():
2897 if cls._name in model._inherits:
2898 model._inherits_reload()
2901 def _inherits_reload(cls):
2902 """ Recompute the _inherit_fields mapping.
2904 This will also call itself on each inherits'd child model.
2908 for table in cls._inherits:
2909 other = cls.pool[table]
2910 for col in other._columns.keys():
2911 res[col] = (table, cls._inherits[table], other._columns[col], table)
2912 for col in other._inherit_fields.keys():
2913 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2914 cls._inherit_fields = res
2915 cls._all_columns = cls._get_column_infos()
2917 # interface columns with new-style fields
2918 for attr, column in cls._columns.items():
2919 if attr not in cls._fields:
2920 cls._add_field(attr, column.to_field())
2922 # interface inherited fields with new-style fields (note that the
2923 # reverse order is for being consistent with _all_columns above)
2924 for parent_model, parent_field in reversed(cls._inherits.items()):
2925 for attr, field in cls.pool[parent_model]._fields.iteritems():
2926 if attr not in cls._fields:
2927 cls._add_field(attr, field.new(
2929 related=(parent_field, attr),
2933 cls._inherits_reload_src()
2936 def _get_column_infos(cls):
2937 """Returns a dict mapping all fields names (direct fields and
2938 inherited field via _inherits) to a ``column_info`` struct
2939 giving detailed columns """
2941 # do not inverse for loops, since local fields may hide inherited ones!
2942 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2943 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2944 for k, col in cls._columns.iteritems():
2945 result[k] = fields.column_info(k, col)
2949 def _inherits_check(cls):
2950 for table, field_name in cls._inherits.items():
2951 if field_name not in cls._columns:
2952 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2953 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2954 required=True, ondelete="cascade")
2955 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2956 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2957 cls._columns[field_name].required = True
2958 cls._columns[field_name].ondelete = "cascade"
2960 # reflect fields with delegate=True in dictionary cls._inherits
2961 for field in cls._fields.itervalues():
2962 if field.type == 'many2one' and not field.related and field.delegate:
2963 if not field.required:
2964 _logger.warning("Field %s with delegate=True must be required.", field)
2965 field.required = True
2966 if field.ondelete.lower() not in ('cascade', 'restrict'):
2967 field.ondelete = 'cascade'
2968 cls._inherits[field.comodel_name] = field.name
2971 def _prepare_setup_fields(self):
2972 """ Prepare the setup of fields once the models have been loaded. """
2973 for field in self._fields.itervalues():
2977 def _setup_fields(self, partial=False):
2978 """ Setup the fields (dependency triggers, etc). """
2979 for field in self._fields.itervalues():
2981 field.setup(self.env)
2986 # group fields by compute to determine field.computed_fields
2987 fields_by_compute = defaultdict(list)
2988 for field in self._fields.itervalues():
2990 field.computed_fields = fields_by_compute[field.compute]
2991 field.computed_fields.append(field)
2993 field.computed_fields = []
2995 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
2996 """ fields_get([fields])
2998 Return the definition of each field.
3000 The returned value is a dictionary (indiced by field name) of
3001 dictionaries. The _inherits'd fields are included. The string, help,
3002 and selection (if present) attributes are translated.
3004 :param cr: database cursor
3005 :param user: current user id
3006 :param allfields: list of fields
3007 :param context: context arguments, like lang, time zone
3008 :return: dictionary of field dictionaries, each one describing a field of the business object
3009 :raise AccessError: * if user has no create/write rights on the requested object
3012 recs = self.browse(cr, user, [], context)
3015 for fname, field in self._fields.iteritems():
3016 if allfields and fname not in allfields:
3018 if not field.setup_done:
3020 if field.groups and not recs.user_has_groups(field.groups):
3022 res[fname] = field.get_description(recs.env)
3024 # if user cannot create or modify records, make all fields readonly
3025 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3026 if not (has_access('write') or has_access('create')):
3027 for description in res.itervalues():
3028 description['readonly'] = True
3029 description['states'] = {}
3033 def get_empty_list_help(self, cr, user, help, context=None):
3034 """ Generic method giving the help message displayed when having
3035 no result to display in a list or kanban view. By default it returns
3036 the help given in parameter that is generally the help message
3037 defined in the action.
3041 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3043 Check the user access rights on the given fields. This raises Access
3044 Denied if the user does not have the rights. Otherwise it returns the
3045 fields (as is if the fields is not falsy, or the readable/writable
3046 fields if fields is falsy).
3048 if user == SUPERUSER_ID:
3049 return fields or list(self._fields)
3052 """ determine whether user has access to field `fname` """
3053 field = self._fields.get(fname)
3054 if field and field.groups:
3055 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3060 fields = filter(valid, self._fields)
3062 invalid_fields = set(filter(lambda name: not valid(name), fields))
3064 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3065 operation, user, self._name, ', '.join(invalid_fields))
3067 _('The requested operation cannot be completed due to security restrictions. '
3068 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3069 (self._description, operation))
3073 # add explicit old-style implementation to read()
3075 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3076 records = self.browse(cr, user, ids, context)
3077 result = BaseModel.read(records, fields, load=load)
3078 return result if isinstance(ids, list) else (bool(result) and result[0])
3080 # new-style implementation of read()
3082 def read(self, fields=None, load='_classic_read'):
3085 Reads the requested fields for the records in `self`, low-level/RPC
3086 method. In Python code, prefer :meth:`~.browse`.
3088 :param fields: list of field names to return (default is all fields)
3089 :return: a list of dictionaries mapping field names to their values,
3090 with one dictionary per record
3091 :raise AccessError: if user has no read rights on some of the given
3094 # check access rights
3095 self.check_access_rights('read')
3096 fields = self.check_field_access_rights('read', fields)
3098 # split fields into stored and computed fields
3099 stored, computed = [], []
3101 if name in self._columns:
3103 elif name in self._fields:
3104 computed.append(name)
3106 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3108 # fetch stored fields from the database to the cache
3109 self._read_from_database(stored)
3111 # retrieve results from records; this takes values from the cache and
3112 # computes remaining fields
3114 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3115 use_name_get = (load == '_classic_read')
3118 values = {'id': record.id}
3119 for name, field in name_fields:
3120 values[name] = field.convert_to_read(record[name], use_name_get)
3121 result.append(values)
3122 except MissingError:
3128 def _prefetch_field(self, field):
3129 """ Read from the database in order to fetch `field` (:class:`Field`
3130 instance) for `self` in cache.
3132 # fetch the records of this model without field_name in their cache
3133 records = self._in_cache_without(field)
3135 if len(records) > PREFETCH_MAX:
3136 records = records[:PREFETCH_MAX] | self
3138 # determine which fields can be prefetched
3139 if not self.env.in_draft and \
3140 self._context.get('prefetch_fields', True) and \
3141 self._columns[field.name]._prefetch:
3142 # prefetch all classic and many2one fields that the user can access
3144 for fname, fcolumn in self._columns.iteritems()
3145 if fcolumn._prefetch
3146 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3149 fnames = {field.name}
3151 # important: never prefetch fields to recompute!
3152 get_recs_todo = self.env.field_todo
3153 for fname in list(fnames):
3154 if get_recs_todo(self._fields[fname]):
3155 if fname == field.name:
3156 records -= get_recs_todo(field)
3158 fnames.discard(fname)
3160 # fetch records with read()
3161 assert self in records and field.name in fnames
3164 result = records.read(list(fnames), load='_classic_write')
3168 # check the cache, and update it if necessary
3169 if not self._cache.contains(field):
3170 for values in result:
3171 record = self.browse(values.pop('id'))
3172 record._cache.update(record._convert_to_cache(values, validate=False))
3173 if not self._cache.contains(field):
3174 e = AccessError("No value found for %s.%s" % (self, field.name))
3175 self._cache[field] = FailedValue(e)
3178 def _read_from_database(self, field_names):
3179 """ Read the given fields of the records in `self` from the database,
3180 and store them in cache. Access errors are also stored in cache.
3183 cr, user, context = env.args
3185 # FIXME: The query construction needs to be rewritten using the internal Query
3186 # object, as in search(), to avoid ambiguous column references when
3187 # reading/sorting on a table that is auto_joined to another table with
3188 # common columns (e.g. the magical columns)
3190 # Construct a clause for the security rules.
3191 # 'tables' holds the list of tables necessary for the SELECT, including
3192 # the ir.rule clauses, and contains at least self._table.
3193 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3195 # determine the fields that are stored as columns in self._table
3196 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3198 # we need fully-qualified column names in case len(tables) > 1
3200 if isinstance(self._columns.get(f), fields.binary) and \
3201 context.get('bin_size_%s' % f, context.get('bin_size')):
3202 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3203 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3205 return '%s."%s"' % (self._table, f)
3206 qual_names = map(qualify, set(fields_pre + ['id']))
3208 query = """ SELECT %(qual_names)s FROM %(tables)s
3209 WHERE %(table)s.id IN %%s AND (%(extra)s)
3212 'qual_names': ",".join(qual_names),
3213 'tables': ",".join(tables),
3214 'table': self._table,
3215 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3216 'order': self._parent_order or self._order,
3220 for sub_ids in cr.split_for_in_conditions(self.ids):
3221 cr.execute(query, [tuple(sub_ids)] + rule_params)
3222 result.extend(cr.dictfetchall())
3224 ids = [vals['id'] for vals in result]
3227 # translate the fields if necessary
3228 if context.get('lang'):
3229 ir_translation = env['ir.translation']
3230 for f in fields_pre:
3231 if self._columns[f].translate:
3232 #TODO: optimize out of this loop
3233 res_trans = ir_translation._get_ids(
3234 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3236 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3238 # apply the symbol_get functions of the fields we just read
3239 for f in fields_pre:
3240 symbol_get = self._columns[f]._symbol_get
3243 vals[f] = symbol_get(vals[f])
3245 # store result in cache for POST fields
3247 record = self.browse(vals['id'])
3248 record._cache.update(record._convert_to_cache(vals, validate=False))
3250 # determine the fields that must be processed now
3251 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3253 # Compute POST fields, grouped by multi
3254 by_multi = defaultdict(list)
3255 for f in fields_post:
3256 by_multi[self._columns[f]._multi].append(f)
3258 for multi, fs in by_multi.iteritems():
3260 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3261 assert res2 is not None, \
3262 'The function field "%s" on the "%s" model returned None\n' \
3263 '(a dictionary was expected).' % (fs[0], self._name)
3265 # TOCHECK : why got string instend of dict in python2.6
3266 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3267 multi_fields = res2.get(vals['id'], {})
3270 vals[f] = multi_fields.get(f, [])
3273 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3276 vals[f] = res2[vals['id']]
3280 # Warn about deprecated fields now that fields_pre and fields_post are computed
3281 for f in field_names:
3282 column = self._columns[f]
3283 if column.deprecated:
3284 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3286 # store result in cache
3288 record = self.browse(vals.pop('id'))
3289 record._cache.update(record._convert_to_cache(vals, validate=False))
3291 # store failed values in cache for the records that could not be read
3292 fetched = self.browse(ids)
3293 missing = self - fetched
3295 extras = fetched - self
3298 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3299 ', '.join(map(repr, missing._ids)),
3300 ', '.join(map(repr, extras._ids)),
3302 # store an access error exception in existing records
3304 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3305 (self._name, 'read')
3307 forbidden = missing.exists()
3308 forbidden._cache.update(FailedValue(exc))
3309 # store a missing error exception in non-existing records
3311 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3313 (missing - forbidden)._cache.update(FailedValue(exc))
3316 def get_metadata(self):
3318 Returns some metadata about the given records.
3320 :return: list of ownership dictionaries for each requested record
3321 :rtype: list of dictionaries with the following keys:
3324 * create_uid: user who created the record
3325 * create_date: date when the record was created
3326 * write_uid: last user who changed the record
3327 * write_date: date of the last change to the record
3328 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3331 if self._log_access:
3332 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3333 quoted_table = '"%s"' % self._table
3334 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3335 query = '''SELECT %s, __imd.module, __imd.name
3336 FROM %s LEFT JOIN ir_model_data __imd
3337 ON (__imd.model = %%s and __imd.res_id = %s.id)
3338 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3339 self._cr.execute(query, (self._name, tuple(self.ids)))
3340 res = self._cr.dictfetchall()
3342 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3343 names = dict(self.env['res.users'].browse(uids).name_get())
3347 value = r[key] = r[key] or False
3348 if key in ('write_uid', 'create_uid') and value in names:
3349 r[key] = (value, names[value])
3350 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3351 del r['name'], r['module']
3354 def _check_concurrency(self, cr, ids, context):
3357 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3359 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3360 for sub_ids in cr.split_for_in_conditions(ids):
3363 id_ref = "%s,%s" % (self._name, id)
3364 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3366 ids_to_check.extend([id, update_date])
3367 if not ids_to_check:
3369 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3372 # mention the first one only to keep the error message readable
3373 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3375 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3376 """Verify the returned rows after applying record rules matches
3377 the length of `ids`, and raise an appropriate exception if it does not.
3381 ids, result_ids = set(ids), set(result_ids)
3382 missing_ids = ids - result_ids
3384 # Attempt to distinguish record rule restriction vs deleted records,
3385 # to provide a more specific error message - check if the missinf
3386 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3387 forbidden_ids = [x[0] for x in cr.fetchall()]
3389 # the missing ids are (at least partially) hidden by access rules
3390 if uid == SUPERUSER_ID:
3392 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3393 raise except_orm(_('Access Denied'),
3394 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3395 (self._description, operation))
3397 # If we get here, the missing_ids are not in the database
3398 if operation in ('read','unlink'):
3399 # No need to warn about deleting an already deleted record.
3400 # And no error when reading a record that was deleted, to prevent spurious
3401 # errors for non-transactional search/read sequences coming from clients
3403 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3404 raise except_orm(_('Missing document(s)'),
3405 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3408 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3409 """Verifies that the operation given by ``operation`` is allowed for the user
3410 according to the access rights."""
3411 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3413 def check_access_rule(self, cr, uid, ids, operation, context=None):
3414 """Verifies that the operation given by ``operation`` is allowed for the user
3415 according to ir.rules.
3417 :param operation: one of ``write``, ``unlink``
3418 :raise except_orm: * if current ir.rules do not permit this operation.
3419 :return: None if the operation is allowed
3421 if uid == SUPERUSER_ID:
3424 if self.is_transient():
3425 # Only one single implicit access rule for transient models: owner only!
3426 # This is ok to hardcode because we assert that TransientModels always
3427 # have log_access enabled so that the create_uid column is always there.
3428 # And even with _inherits, these fields are always present in the local
3429 # table too, so no need for JOINs.
3430 cr.execute("""SELECT distinct create_uid
3432 WHERE id IN %%s""" % self._table, (tuple(ids),))
3433 uids = [x[0] for x in cr.fetchall()]
3434 if len(uids) != 1 or uids[0] != uid:
3435 raise except_orm(_('Access Denied'),
3436 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3438 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3440 where_clause = ' and ' + ' and '.join(where_clause)
3441 for sub_ids in cr.split_for_in_conditions(ids):
3442 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3443 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3444 [sub_ids] + where_params)
3445 returned_ids = [x['id'] for x in cr.dictfetchall()]
3446 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3448 def create_workflow(self, cr, uid, ids, context=None):
3449 """Create a workflow instance for each given record IDs."""
3450 from openerp import workflow
3452 workflow.trg_create(uid, self._name, res_id, cr)
3453 # self.invalidate_cache(cr, uid, context=context) ?
3456 def delete_workflow(self, cr, uid, ids, context=None):
3457 """Delete the workflow instances bound to the given record IDs."""
3458 from openerp import workflow
3460 workflow.trg_delete(uid, self._name, res_id, cr)
3461 self.invalidate_cache(cr, uid, context=context)
3464 def step_workflow(self, cr, uid, ids, context=None):
3465 """Reevaluate the workflow instances of the given record IDs."""
3466 from openerp import workflow
3468 workflow.trg_write(uid, self._name, res_id, cr)
3469 # self.invalidate_cache(cr, uid, context=context) ?
3472 def signal_workflow(self, cr, uid, ids, signal, context=None):
3473 """Send given workflow signal and return a dict mapping ids to workflow results"""
3474 from openerp import workflow
3477 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3478 # self.invalidate_cache(cr, uid, context=context) ?
3481 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3482 """ Rebind the workflow instance bound to the given 'old' record IDs to
3483 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3485 from openerp import workflow
3486 for old_id, new_id in old_new_ids:
3487 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3488 self.invalidate_cache(cr, uid, context=context)
3491 def unlink(self, cr, uid, ids, context=None):
3494 Deletes the records of the current set
3496 :raise AccessError: * if user has no unlink rights on the requested object
3497 * if user tries to bypass access rules for unlink on the requested object
3498 :raise UserError: if the record is default property for other records
3503 if isinstance(ids, (int, long)):
3506 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3508 # for recomputing new-style fields
3509 recs = self.browse(cr, uid, ids, context)
3510 recs.modified(self._fields)
3512 self._check_concurrency(cr, ids, context)
3514 self.check_access_rights(cr, uid, 'unlink')
3516 ir_property = self.pool.get('ir.property')
3518 # Check if the records are used as default properties.
3519 domain = [('res_id', '=', False),
3520 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3522 if ir_property.search(cr, uid, domain, context=context):
3523 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3525 # Delete the records' properties.
3526 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3527 ir_property.unlink(cr, uid, property_ids, context=context)
3529 self.delete_workflow(cr, uid, ids, context=context)
3531 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3532 pool_model_data = self.pool.get('ir.model.data')
3533 ir_values_obj = self.pool.get('ir.values')
3534 ir_attachment_obj = self.pool.get('ir.attachment')
3535 for sub_ids in cr.split_for_in_conditions(ids):
3536 cr.execute('delete from ' + self._table + ' ' \
3537 'where id IN %s', (sub_ids,))
3539 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3540 # as these are not connected with real database foreign keys, and would be dangling references.
3541 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3542 # to avoid possible side-effects during admin calls.
3543 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3544 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3545 # Step 2. Marching towards the real deletion of referenced records
3547 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3549 # For the same reason, removing the record relevant to ir_values
3550 ir_value_ids = ir_values_obj.search(cr, uid,
3551 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3554 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3556 # For the same reason, removing the record relevant to ir_attachment
3557 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3558 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3559 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3560 if ir_attachment_ids:
3561 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3563 # invalidate the *whole* cache, since the orm does not handle all
3564 # changes made in the database, like cascading delete!
3565 recs.invalidate_cache()
3567 for order, obj_name, store_ids, fields in result_store:
3568 if obj_name == self._name:
3569 effective_store_ids = set(store_ids) - set(ids)
3571 effective_store_ids = store_ids
3572 if effective_store_ids:
3573 obj = self.pool[obj_name]
3574 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3575 rids = map(lambda x: x[0], cr.fetchall())
3577 obj._store_set_values(cr, uid, rids, fields, context)
3579 # recompute new-style fields
3588 def write(self, vals):
3591 Updates all records in the current set with the provided values.
3593 :param dict vals: fields to update and the value to set on them e.g::
3595 {'foo': 1, 'bar': "Qux"}
3597 will set the field ``foo`` to ``1`` and the field ``bar`` to
3598 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3600 :raise AccessError: * if user has no write rights on the requested object
3601 * if user tries to bypass access rules for write on the requested object
3602 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3603 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3605 .. _openerp/models/relationals/format:
3607 .. note:: Relational fields use a special "commands" format to manipulate their values
3609 This format is a list of command triplets executed sequentially,
3610 possible command triplets are:
3612 ``(0, _, values: dict)``
3613 links to a new record created from the provided values
3614 ``(1, id, values: dict)``
3615 updates the already-linked record of id ``id`` with the
3618 unlinks and deletes the linked record of id ``id``
3620 unlinks the linked record of id ``id`` without deleting it
3622 links to an existing record of id ``id``
3624 unlinks all records in the relation, equivalent to using
3625 the command ``3`` on every linked record
3627 replaces the existing list of linked records by the provoded
3628 ones, equivalent to using ``5`` then ``4`` for each id in
3631 (in command triplets, ``_`` values are ignored and can be
3632 anything, generally ``0`` or ``False``)
3634 Any command can be used on :class:`~openerp.fields.Many2many`,
3635 only ``0``, ``1`` and ``2`` can be used on
3636 :class:`~openerp.fields.One2many`.
3641 self._check_concurrency(self._ids)
3642 self.check_access_rights('write')
3644 # No user-driven update of these columns
3645 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3646 vals.pop(field, None)
3648 # split up fields into old-style and pure new-style ones
3649 old_vals, new_vals, unknown = {}, {}, []
3650 for key, val in vals.iteritems():
3651 field = self._fields.get(key)
3653 if field.store or field.inherited:
3655 if field.inverse and not field.inherited:
3661 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3663 # write old-style fields with (low-level) method _write
3665 self._write(old_vals)
3667 # put the values of pure new-style fields into cache, and inverse them
3670 record._cache.update(record._convert_to_cache(new_vals, update=True))
3671 for key in new_vals:
3672 self._fields[key].determine_inverse(self)
3676 def _write(self, cr, user, ids, vals, context=None):
3677 # low-level implementation of write()
3682 self.check_field_access_rights(cr, user, 'write', vals.keys())
3683 deleted_related = defaultdict(list)
3684 for field in vals.keys():
3686 if field in self._columns:
3687 fobj = self._columns[field]
3688 elif field in self._inherit_fields:
3689 fobj = self._inherit_fields[field][2]
3692 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3693 for wtuple in vals[field]:
3694 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3695 deleted_related[fobj._obj].append(wtuple[1])
3700 for group in groups:
3701 module = group.split(".")[0]
3702 grp = group.split(".")[1]
3703 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3704 (grp, module, 'res.groups', user))
3705 readonly = cr.fetchall()
3706 if readonly[0][0] >= 1:
3713 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3715 # for recomputing new-style fields
3716 recs = self.browse(cr, user, ids, context)
3717 modified_fields = list(vals)
3718 if self._log_access:
3719 modified_fields += ['write_date', 'write_uid']
3720 recs.modified(modified_fields)
3722 parents_changed = []
3723 parent_order = self._parent_order or self._order
3724 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3725 # The parent_left/right computation may take up to
3726 # 5 seconds. No need to recompute the values if the
3727 # parent is the same.
3728 # Note: to respect parent_order, nodes must be processed in
3729 # order, so ``parents_changed`` must be ordered properly.
3730 parent_val = vals[self._parent_name]
3732 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3733 (self._table, self._parent_name, self._parent_name, parent_order)
3734 cr.execute(query, (tuple(ids), parent_val))
3736 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3737 (self._table, self._parent_name, parent_order)
3738 cr.execute(query, (tuple(ids),))
3739 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3746 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3748 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3749 if field_column and field_column.deprecated:
3750 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3751 if field in self._columns:
3752 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3753 if (not totranslate) or not self._columns[field].translate:
3754 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3755 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3756 direct.append(field)
3758 upd_todo.append(field)
3760 updend.append(field)
3761 if field in self._columns \
3762 and hasattr(self._columns[field], 'selection') \
3764 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3766 if self._log_access:
3767 upd0.append('write_uid=%s')
3768 upd0.append("write_date=(now() at time zone 'UTC')")
3770 direct.append('write_uid')
3771 direct.append('write_date')
3774 self.check_access_rule(cr, user, ids, 'write', context=context)
3775 for sub_ids in cr.split_for_in_conditions(ids):
3776 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3777 'where id IN %s', upd1 + [sub_ids])
3778 if cr.rowcount != len(sub_ids):
3779 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3784 if self._columns[f].translate:
3785 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3788 # Inserting value to DB
3789 context_wo_lang = dict(context, lang=None)
3790 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3791 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3793 # invalidate and mark new-style fields to recompute; do this before
3794 # setting other fields, because it can require the value of computed
3795 # fields, e.g., a one2many checking constraints on records
3796 recs.modified(direct)
3798 # call the 'set' method of fields which are not classic_write
3799 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3801 # default element in context must be removed when call a one2many or many2many
3802 rel_context = context.copy()
3803 for c in context.items():
3804 if c[0].startswith('default_'):
3805 del rel_context[c[0]]
3807 for field in upd_todo:
3809 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3811 # for recomputing new-style fields
3812 recs.modified(upd_todo)
3814 unknown_fields = updend[:]
3815 for table in self._inherits:
3816 col = self._inherits[table]
3818 for sub_ids in cr.split_for_in_conditions(ids):
3819 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3820 'where id IN %s', (sub_ids,))
3821 nids.extend([x[0] for x in cr.fetchall()])
3825 if self._inherit_fields[val][0] == table:
3827 unknown_fields.remove(val)
3829 self.pool[table].write(cr, user, nids, v, context)
3833 'No such field(s) in model %s: %s.',
3834 self._name, ', '.join(unknown_fields))
3836 # check Python constraints
3837 recs._validate_fields(vals)
3839 # TODO: use _order to set dest at the right position and not first node of parent
3840 # We can't defer parent_store computation because the stored function
3841 # fields that are computer may refer (directly or indirectly) to
3842 # parent_left/right (via a child_of domain)
3845 self.pool._init_parent[self._name] = True
3847 order = self._parent_order or self._order
3848 parent_val = vals[self._parent_name]
3850 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3852 clause, params = '%s IS NULL' % (self._parent_name,), ()
3854 for id in parents_changed:
3855 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3856 pleft, pright = cr.fetchone()
3857 distance = pright - pleft + 1
3859 # Positions of current siblings, to locate proper insertion point;
3860 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3861 # after each update, in case several nodes are sequentially inserted one
3862 # next to the other (i.e computed incrementally)
3863 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3864 parents = cr.fetchall()
3866 # Find Position of the element
3868 for (parent_pright, parent_id) in parents:
3871 position = parent_pright and parent_pright + 1 or 1
3873 # It's the first node of the parent
3878 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3879 position = cr.fetchone()[0] + 1
3881 if pleft < position <= pright:
3882 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3884 if pleft < position:
3885 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3886 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3887 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3889 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3890 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3891 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3892 recs.invalidate_cache(['parent_left', 'parent_right'])
3894 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3898 for order, model_name, ids_to_update, fields_to_recompute in result:
3899 key = (model_name, tuple(fields_to_recompute))
3900 done.setdefault(key, {})
3901 # avoid to do several times the same computation
3903 for id in ids_to_update:
3904 if id not in done[key]:
3905 done[key][id] = True
3906 if id not in deleted_related[model_name]:
3908 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3910 # recompute new-style fields
3911 if context.get('recompute', True):
3914 self.step_workflow(cr, user, ids, context=context)
3918 # TODO: Should set perm to user.xxx
3921 @api.returns('self', lambda value: value.id)
3922 def create(self, vals):
3923 """ create(vals) -> record
3925 Creates a new record for the model.
3927 The new record is initialized using the values from ``vals`` and
3928 if necessary those from :meth:`~.default_get`.
3931 values for the model's fields, as a dictionary::
3933 {'field_name': field_value, ...}
3935 see :meth:`~.write` for details
3936 :return: new record created
3937 :raise AccessError: * if user has no create rights on the requested object
3938 * if user tries to bypass access rules for create on the requested object
3939 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3940 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3942 self.check_access_rights('create')
3944 # add missing defaults, and drop fields that may not be set by user
3945 vals = self._add_missing_default_values(vals)
3946 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3947 vals.pop(field, None)
3949 # split up fields into old-style and pure new-style ones
3950 old_vals, new_vals, unknown = {}, {}, []
3951 for key, val in vals.iteritems():
3952 field = self._fields.get(key)
3954 if field.store or field.inherited:
3956 if field.inverse and not field.inherited:
3962 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3964 # create record with old-style fields
3965 record = self.browse(self._create(old_vals))
3967 # put the values of pure new-style fields into cache, and inverse them
3968 record._cache.update(record._convert_to_cache(new_vals))
3969 for key in new_vals:
3970 self._fields[key].determine_inverse(record)
3974 def _create(self, cr, user, vals, context=None):
3975 # low-level implementation of create()
3979 if self.is_transient():
3980 self._transient_vacuum(cr, user)
3983 for v in self._inherits:
3984 if self._inherits[v] not in vals:
3987 tocreate[v] = {'id': vals[self._inherits[v]]}
3990 # list of column assignments defined as tuples like:
3991 # (column_name, format_string, column_value)
3992 # (column_name, sql_formula)
3993 # Those tuples will be used by the string formatting for the INSERT
3995 ('id', "nextval('%s')" % self._sequence),
4000 for v in vals.keys():
4001 if v in self._inherit_fields and v not in self._columns:
4002 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4003 tocreate[table][v] = vals[v]
4006 if (v not in self._inherit_fields) and (v not in self._columns):
4008 unknown_fields.append(v)
4011 'No such field(s) in model %s: %s.',
4012 self._name, ', '.join(unknown_fields))
4014 for table in tocreate:
4015 if self._inherits[table] in vals:
4016 del vals[self._inherits[table]]
4018 record_id = tocreate[table].pop('id', None)
4020 if record_id is None or not record_id:
4021 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4023 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4025 updates.append((self._inherits[table], '%s', record_id))
4027 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4028 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4030 for bool_field in bool_fields:
4031 if bool_field not in vals:
4032 vals[bool_field] = False
4034 for field in vals.keys():
4036 if field in self._columns:
4037 fobj = self._columns[field]
4039 fobj = self._inherit_fields[field][2]
4045 for group in groups:
4046 module = group.split(".")[0]
4047 grp = group.split(".")[1]
4048 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4049 (grp, module, 'res.groups', user))
4050 readonly = cr.fetchall()
4051 if readonly[0][0] >= 1:
4054 elif readonly[0][0] == 0:
4062 current_field = self._columns[field]
4063 if current_field._classic_write:
4064 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4066 #for the function fields that receive a value, we set them directly in the database
4067 #(they may be required), but we also need to trigger the _fct_inv()
4068 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4069 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4070 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4071 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4072 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4073 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4074 #after the release but, definitively, the behavior shouldn't be different for related and function
4076 upd_todo.append(field)
4078 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4079 #related. See the above TODO comment for further explanations.
4080 if not isinstance(current_field, fields.related):
4081 upd_todo.append(field)
4082 if field in self._columns \
4083 and hasattr(current_field, 'selection') \
4085 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4086 if self._log_access:
4087 updates.append(('create_uid', '%s', user))
4088 updates.append(('write_uid', '%s', user))
4089 updates.append(('create_date', "(now() at time zone 'UTC')"))
4090 updates.append(('write_date', "(now() at time zone 'UTC')"))
4092 # the list of tuples used in this formatting corresponds to
4093 # tuple(field_name, format, value)
4094 # In some case, for example (id, create_date, write_date) we does not
4095 # need to read the third value of the tuple, because the real value is
4096 # encoded in the second value (the format).
4098 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4100 ', '.join('"%s"' % u[0] for u in updates),
4101 ', '.join(u[1] for u in updates)
4103 tuple([u[2] for u in updates if len(u) > 2])
4106 id_new, = cr.fetchone()
4107 recs = self.browse(cr, user, id_new, context)
4109 if self._parent_store and not context.get('defer_parent_store_computation'):
4111 self.pool._init_parent[self._name] = True
4113 parent = vals.get(self._parent_name, False)
4115 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4117 result_p = cr.fetchall()
4118 for (pleft,) in result_p:
4123 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4124 pleft_old = cr.fetchone()[0]
4127 cr.execute('select max(parent_right) from '+self._table)
4128 pleft = cr.fetchone()[0] or 0
4129 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4130 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4131 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4132 recs.invalidate_cache(['parent_left', 'parent_right'])
4134 # invalidate and mark new-style fields to recompute; do this before
4135 # setting other fields, because it can require the value of computed
4136 # fields, e.g., a one2many checking constraints on records
4137 recs.modified([u[0] for u in updates])
4139 # call the 'set' method of fields which are not classic_write
4140 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4142 # default element in context must be remove when call a one2many or many2many
4143 rel_context = context.copy()
4144 for c in context.items():
4145 if c[0].startswith('default_'):
4146 del rel_context[c[0]]
4149 for field in upd_todo:
4150 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4152 # for recomputing new-style fields
4153 recs.modified(upd_todo)
4155 # check Python constraints
4156 recs._validate_fields(vals)
4158 if context.get('recompute', True):
4159 result += self._store_get_values(cr, user, [id_new],
4160 list(set(vals.keys() + self._inherits.values())),
4164 for order, model_name, ids, fields2 in result:
4165 if not (model_name, ids, fields2) in done:
4166 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4167 done.append((model_name, ids, fields2))
4168 # recompute new-style fields
4171 if self._log_create and context.get('recompute', True):
4172 message = self._description + \
4174 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4175 "' " + _("created.")
4176 self.log(cr, user, id_new, message, True, context=context)
4178 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4179 self.create_workflow(cr, user, [id_new], context=context)
4182 def _store_get_values(self, cr, uid, ids, fields, context):
4183 """Returns an ordered list of fields.function to call due to
4184 an update operation on ``fields`` of records with ``ids``,
4185 obtained by calling the 'store' triggers of these fields,
4186 as setup by their 'store' attribute.
4188 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4190 if fields is None: fields = []
4191 stored_functions = self.pool._store_function.get(self._name, [])
4193 # use indexed names for the details of the stored_functions:
4194 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4196 # only keep store triggers that should be triggered for the ``fields``
4198 triggers_to_compute = (
4199 f for f in stored_functions
4200 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4204 target_id_results = {}
4205 for store_trigger in triggers_to_compute:
4206 target_func_id_ = id(store_trigger[target_ids_func_])
4207 if target_func_id_ not in target_id_results:
4208 # use admin user for accessing objects having rules defined on store fields
4209 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4210 target_ids = target_id_results[target_func_id_]
4212 # the compound key must consider the priority and model name
4213 key = (store_trigger[priority_], store_trigger[model_name_])
4214 for target_id in target_ids:
4215 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4217 # Here to_compute_map looks like:
4218 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4219 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4220 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4223 # Now we need to generate the batch function calls list
4225 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4227 for ((priority,model), id_map) in to_compute_map.iteritems():
4228 trigger_ids_maps = {}
4229 # function_ids_maps =
4230 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4231 for target_id, triggers in id_map.iteritems():
4232 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4233 for triggers, target_ids in trigger_ids_maps.iteritems():
4234 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4235 [t[func_field_to_compute_] for t in triggers]))
4238 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4241 def _store_set_values(self, cr, uid, ids, fields, context):
4242 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4243 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4248 if self._log_access:
4249 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4253 field_dict.setdefault(r[0], [])
4254 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4255 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4256 for i in self.pool._store_function.get(self._name, []):
4258 up_write_date = write_date + datetime.timedelta(hours=i[5])
4259 if datetime.datetime.now() < up_write_date:
4261 field_dict[r[0]].append(i[1])
4267 if self._columns[f]._multi not in keys:
4268 keys.append(self._columns[f]._multi)
4269 todo.setdefault(self._columns[f]._multi, [])
4270 todo[self._columns[f]._multi].append(f)
4274 # use admin user for accessing objects having rules defined on store fields
4275 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4276 for id, value in result.items():
4278 for f in value.keys():
4279 if f in field_dict[id]:
4286 if self._columns[v]._type == 'many2one':
4288 value[v] = value[v][0]
4291 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4292 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4295 cr.execute('update "' + self._table + '" set ' + \
4296 ','.join(upd0) + ' where id = %s', upd1)
4300 # use admin user for accessing objects having rules defined on store fields
4301 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4302 for r in result.keys():
4304 if r in field_dict.keys():
4305 if f in field_dict[r]:
4307 for id, value in result.items():
4308 if self._columns[f]._type == 'many2one':
4313 cr.execute('update "' + self._table + '" set ' + \
4314 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4316 # invalidate and mark new-style fields to recompute
4317 self.browse(cr, uid, ids, context).modified(fields)
4321 # TODO: ameliorer avec NULL
4322 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4323 """Computes the WHERE clause needed to implement an OpenERP domain.
4324 :param domain: the domain to compute
4326 :param active_test: whether the default filtering of records with ``active``
4327 field set to ``False`` should be applied.
4328 :return: the query expressing the given domain as provided in domain
4329 :rtype: osv.query.Query
4334 # if the object has a field named 'active', filter out all inactive
4335 # records unless they were explicitely asked for
4336 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4338 # the item[0] trick below works for domain items and '&'/'|'/'!'
4340 if not any(item[0] == 'active' for item in domain):
4341 domain.insert(0, ('active', '=', 1))
4343 domain = [('active', '=', 1)]
4346 e = expression.expression(cr, user, domain, self, context)
4347 tables = e.get_tables()
4348 where_clause, where_params = e.to_sql()
4349 where_clause = where_clause and [where_clause] or []
4351 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4353 return Query(tables, where_clause, where_params)
4355 def _check_qorder(self, word):
4356 if not regex_order.match(word):
4357 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4360 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4361 """Add what's missing in ``query`` to implement all appropriate ir.rules
4362 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4364 :param query: the current query object
4366 if uid == SUPERUSER_ID:
4369 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4370 """ :param parent_model: name of the parent model, if the added
4371 clause comes from a parent model
4375 # as inherited rules are being applied, we need to add the missing JOIN
4376 # to reach the parent table (if it was not JOINed yet in the query)
4377 parent_alias = self._inherits_join_add(self, parent_model, query)
4378 # inherited rules are applied on the external table -> need to get the alias and replace
4379 parent_table = self.pool[parent_model]._table
4380 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4381 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4383 for table in added_tables:
4384 # table is just a table name -> switch to the full alias
4385 if table == '"%s"' % parent_table:
4386 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4387 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4389 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4390 added_tables = new_tables
4391 query.where_clause += added_clause
4392 query.where_clause_params += added_params
4393 for table in added_tables:
4394 if table not in query.tables:
4395 query.tables.append(table)
4399 # apply main rules on the object
4400 rule_obj = self.pool.get('ir.rule')
4401 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4402 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4404 # apply ir.rules from the parents (through _inherits)
4405 for inherited_model in self._inherits:
4406 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4407 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4408 parent_model=inherited_model)
4410 def _generate_m2o_order_by(self, order_field, query):
4412 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4413 either native m2o fields or function/related fields that are stored, including
4414 intermediate JOINs for inheritance if required.
4416 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4418 if order_field not in self._columns and order_field in self._inherit_fields:
4419 # also add missing joins for reaching the table containing the m2o field
4420 qualified_field = self._inherits_join_calc(order_field, query)
4421 order_field_column = self._inherit_fields[order_field][2]
4423 qualified_field = '"%s"."%s"' % (self._table, order_field)
4424 order_field_column = self._columns[order_field]
4426 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4427 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4428 _logger.debug("Many2one function/related fields must be stored " \
4429 "to be used as ordering fields! Ignoring sorting for %s.%s",
4430 self._name, order_field)
4433 # figure out the applicable order_by for the m2o
4434 dest_model = self.pool[order_field_column._obj]
4435 m2o_order = dest_model._order
4436 if not regex_order.match(m2o_order):
4437 # _order is complex, can't use it here, so we default to _rec_name
4438 m2o_order = dest_model._rec_name
4440 # extract the field names, to be able to qualify them and add desc/asc
4442 for order_part in m2o_order.split(","):
4443 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4444 m2o_order = m2o_order_list
4446 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4447 # as we don't want to exclude results that have NULL values for the m2o
4448 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4449 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4450 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4451 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4453 def _generate_order_by(self, order_spec, query):
4455 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4456 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4458 :raise" except_orm in case order_spec is malformed
4460 order_by_clause = ''
4461 order_spec = order_spec or self._order
4463 order_by_elements = []
4464 self._check_qorder(order_spec)
4465 for order_part in order_spec.split(','):
4466 order_split = order_part.strip().split(' ')
4467 order_field = order_split[0].strip()
4468 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4471 if order_field == 'id':
4472 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4473 elif order_field in self._columns:
4474 order_column = self._columns[order_field]
4475 if order_column._classic_read:
4476 inner_clause = '"%s"."%s"' % (self._table, order_field)
4477 elif order_column._type == 'many2one':
4478 inner_clause = self._generate_m2o_order_by(order_field, query)
4480 continue # ignore non-readable or "non-joinable" fields
4481 elif order_field in self._inherit_fields:
4482 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4483 order_column = parent_obj._columns[order_field]
4484 if order_column._classic_read:
4485 inner_clause = self._inherits_join_calc(order_field, query)
4486 elif order_column._type == 'many2one':
4487 inner_clause = self._generate_m2o_order_by(order_field, query)
4489 continue # ignore non-readable or "non-joinable" fields
4491 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4492 if order_column and order_column._type == 'boolean':
4493 inner_clause = "COALESCE(%s, false)" % inner_clause
4495 if isinstance(inner_clause, list):
4496 for clause in inner_clause:
4497 order_by_elements.append("%s %s" % (clause, order_direction))
4499 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4500 if order_by_elements:
4501 order_by_clause = ",".join(order_by_elements)
4503 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4505 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4507 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4508 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4509 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4510 This is ok at the security level because this method is private and not callable through XML-RPC.
4512 :param access_rights_uid: optional user ID to use when checking access rights
4513 (not for ir.rules, this is only for ir.model.access)
4517 self.check_access_rights(cr, access_rights_uid or user, 'read')
4519 # For transient models, restrict acces to the current user, except for the super-user
4520 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4521 args = expression.AND(([('create_uid', '=', user)], args or []))
4523 query = self._where_calc(cr, user, args, context=context)
4524 self._apply_ir_rules(cr, user, query, 'read', context=context)
4525 order_by = self._generate_order_by(order, query)
4526 from_clause, where_clause, where_clause_params = query.get_sql()
4528 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4531 # Ignore order, limit and offset when just counting, they don't make sense and could
4533 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4534 cr.execute(query_str, where_clause_params)
4538 limit_str = limit and ' limit %d' % limit or ''
4539 offset_str = offset and ' offset %d' % offset or ''
4540 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4541 cr.execute(query_str, where_clause_params)
4544 # TDE note: with auto_join, we could have several lines about the same result
4545 # i.e. a lead with several unread messages; we uniquify the result using
4546 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4547 def _uniquify_list(seq):
4549 return [x for x in seq if x not in seen and not seen.add(x)]
4551 return _uniquify_list([x[0] for x in res])
4553 # returns the different values ever entered for one field
4554 # this is used, for example, in the client when the user hits enter on
4556 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4559 if field in self._inherit_fields:
4560 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4562 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4564 def copy_data(self, cr, uid, id, default=None, context=None):
4566 Copy given record's data with all its fields values
4568 :param cr: database cursor
4569 :param uid: current user id
4570 :param id: id of the record to copy
4571 :param default: field values to override in the original values of the copied record
4572 :type default: dictionary
4573 :param context: context arguments, like lang, time zone
4574 :type context: dictionary
4575 :return: dictionary containing all the field values
4581 # avoid recursion through already copied records in case of circular relationship
4582 seen_map = context.setdefault('__copy_data_seen', {})
4583 if id in seen_map.setdefault(self._name, []):
4585 seen_map[self._name].append(id)
4589 if 'state' not in default:
4590 if 'state' in self._defaults:
4591 if callable(self._defaults['state']):
4592 default['state'] = self._defaults['state'](self, cr, uid, context)
4594 default['state'] = self._defaults['state']
4596 # build a black list of fields that should not be copied
4597 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4598 def blacklist_given_fields(obj):
4599 # blacklist the fields that are given by inheritance
4600 for other, field_to_other in obj._inherits.items():
4601 blacklist.add(field_to_other)
4602 if field_to_other in default:
4603 # all the fields of 'other' are given by the record: default[field_to_other],
4604 # except the ones redefined in self
4605 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4607 blacklist_given_fields(self.pool[other])
4608 # blacklist deprecated fields
4609 for name, field in obj._columns.items():
4610 if field.deprecated:
4613 blacklist_given_fields(self)
4616 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4619 if f not in blacklist)
4621 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4625 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4628 for f, colinfo in fields_to_copy.iteritems():
4629 field = colinfo.column
4630 if field._type == 'many2one':
4631 res[f] = data[f] and data[f][0]
4632 elif field._type == 'one2many':
4633 other = self.pool[field._obj]
4634 # duplicate following the order of the ids because we'll rely on
4635 # it later for copying translations in copy_translation()!
4636 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4637 # the lines are duplicated using the wrong (old) parent, but then
4638 # are reassigned to the correct one thanks to the (0, 0, ...)
4639 res[f] = [(0, 0, line) for line in lines if line]
4640 elif field._type == 'many2many':
4641 res[f] = [(6, 0, data[f])]
4647 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4651 # avoid recursion through already copied records in case of circular relationship
4652 seen_map = context.setdefault('__copy_translations_seen',{})
4653 if old_id in seen_map.setdefault(self._name,[]):
4655 seen_map[self._name].append(old_id)
4657 trans_obj = self.pool.get('ir.translation')
4658 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4659 fields = self.fields_get(cr, uid, context=context)
4661 for field_name, field_def in fields.items():
4662 # removing the lang to compare untranslated values
4663 context_wo_lang = dict(context, lang=None)
4664 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4665 # we must recursively copy the translations for o2o and o2m
4666 if field_def['type'] == 'one2many':
4667 target_obj = self.pool[field_def['relation']]
4668 # here we rely on the order of the ids to match the translations
4669 # as foreseen in copy_data()
4670 old_children = sorted(r.id for r in old_record[field_name])
4671 new_children = sorted(r.id for r in new_record[field_name])
4672 for (old_child, new_child) in zip(old_children, new_children):
4673 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4674 # and for translatable fields we keep them for copy
4675 elif field_def.get('translate'):
4676 if field_name in self._columns:
4677 trans_name = self._name + "," + field_name
4680 elif field_name in self._inherit_fields:
4681 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4682 # get the id of the parent record to set the translation
4683 inherit_field_name = self._inherit_fields[field_name][1]
4684 target_id = new_record[inherit_field_name].id
4685 source_id = old_record[inherit_field_name].id
4689 trans_ids = trans_obj.search(cr, uid, [
4690 ('name', '=', trans_name),
4691 ('res_id', '=', source_id)
4693 user_lang = context.get('lang')
4694 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4696 # remove source to avoid triggering _set_src
4697 del record['source']
4698 record.update({'res_id': target_id})
4699 if user_lang and user_lang == record['lang']:
4700 # 'source' to force the call to _set_src
4701 # 'value' needed if value is changed in copy(), want to see the new_value
4702 record['source'] = old_record[field_name]
4703 record['value'] = new_record[field_name]
4704 trans_obj.create(cr, uid, record, context=context)
4706 @api.returns('self', lambda value: value.id)
4707 def copy(self, cr, uid, id, default=None, context=None):
4708 """ copy(default=None)
4710 Duplicate record with given id updating it with default values
4712 :param dict default: dictionary of field values to override in the
4713 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4714 :returns: new record
4719 context = context.copy()
4720 data = self.copy_data(cr, uid, id, default, context)
4721 new_id = self.create(cr, uid, data, context)
4722 self.copy_translations(cr, uid, id, new_id, context)
4726 @api.returns('self')
4728 """ exists() -> records
4730 Returns the subset of records in `self` that exist, and marks deleted
4731 records as such in cache. It can be used as a test on records::
4736 By convention, new records are returned as existing.
4738 ids = filter(None, self._ids) # ids to check in database
4741 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4742 self._cr.execute(query, (ids,))
4743 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4744 [id for id in self._ids if not id]) # new ids
4745 existing = self.browse(ids)
4746 if len(existing) < len(self):
4747 # mark missing records in cache with a failed value
4748 exc = MissingError(_("Record does not exist or has been deleted."))
4749 (self - existing)._cache.update(FailedValue(exc))
4752 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4753 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4755 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4756 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4757 return self._check_recursion(cr, uid, ids, context, parent)
4759 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4761 Verifies that there is no loop in a hierarchical structure of records,
4762 by following the parent relationship using the **parent** field until a loop
4763 is detected or until a top-level record is found.
4765 :param cr: database cursor
4766 :param uid: current user id
4767 :param ids: list of ids of records to check
4768 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4769 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4772 parent = self._parent_name
4774 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4775 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4778 while current_id is not None:
4779 cr.execute(query, (current_id,))
4780 result = cr.fetchone()
4781 current_id = result[0] if result else None
4782 if current_id == id:
4786 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4788 Verifies that there is no loop in a hierarchical structure of records,
4789 by following the parent relationship using the **parent** field until a loop
4790 is detected or until a top-level record is found.
4792 :param cr: database cursor
4793 :param uid: current user id
4794 :param ids: list of ids of records to check
4795 :param field_name: field to check
4796 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4799 field = self._all_columns.get(field_name)
4800 field = field.column if field else None
4801 if not field or field._type != 'many2many' or field._obj != self._name:
4802 # field must be a many2many on itself
4803 raise ValueError('invalid field_name: %r' % (field_name,))
4805 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4809 for i in range(0, len(ids_parent), cr.IN_MAX):
4811 sub_ids_parent = ids_parent[i:j]
4812 cr.execute(query, (tuple(sub_ids_parent),))
4813 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4814 ids_parent = ids_parent2
4815 for i in ids_parent:
4820 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4821 """Retrieve the External ID(s) of any database record.
4823 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4825 :return: map of ids to the list of their fully qualified External IDs
4826 in the form ``module.key``, or an empty list when there's no External
4827 ID for a record, e.g.::
4829 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4832 ir_model_data = self.pool.get('ir.model.data')
4833 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4834 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4837 # can't use dict.fromkeys() as the list would be shared!
4839 for record in data_results:
4840 result[record['res_id']].append('%(module)s.%(name)s' % record)
4843 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4844 """Retrieve the External ID of any database record, if there
4845 is one. This method works as a possible implementation
4846 for a function field, to be able to add it to any
4847 model object easily, referencing it as ``Model.get_external_id``.
4849 When multiple External IDs exist for a record, only one
4850 of them is returned (randomly).
4852 :return: map of ids to their fully qualified XML ID,
4853 defaulting to an empty string when there's none
4854 (to be usable as a function field),
4857 { 'id': 'module.ext_id',
4860 results = self._get_xml_ids(cr, uid, ids)
4861 for k, v in results.iteritems():
4868 # backwards compatibility
4869 get_xml_id = get_external_id
4870 _get_xml_ids = _get_external_ids
4872 def print_report(self, cr, uid, ids, name, data, context=None):
4874 Render the report `name` for the given IDs. The report must be defined
4875 for this model, not another.
4877 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4878 assert self._name == report.table
4879 return report.create(cr, uid, ids, data, context)
4883 def is_transient(cls):
4884 """ Return whether the model is transient.
4886 See :class:`TransientModel`.
4889 return cls._transient
4891 def _transient_clean_rows_older_than(self, cr, seconds):
4892 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4893 # Never delete rows used in last 5 minutes
4894 seconds = max(seconds, 300)
4895 query = ("SELECT id FROM " + self._table + " WHERE"
4896 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4897 " < ((now() at time zone 'UTC') - interval %s)")
4898 cr.execute(query, ("%s seconds" % seconds,))
4899 ids = [x[0] for x in cr.fetchall()]
4900 self.unlink(cr, SUPERUSER_ID, ids)
4902 def _transient_clean_old_rows(self, cr, max_count):
4903 # Check how many rows we have in the table
4904 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4906 if res[0][0] <= max_count:
4907 return # max not reached, nothing to do
4908 self._transient_clean_rows_older_than(cr, 300)
4910 def _transient_vacuum(self, cr, uid, force=False):
4911 """Clean the transient records.
4913 This unlinks old records from the transient model tables whenever the
4914 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4915 Actual cleaning will happen only once every "_transient_check_time" calls.
4916 This means this method can be called frequently called (e.g. whenever
4917 a new record is created).
4918 Example with both max_hours and max_count active:
4919 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4920 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4921 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4922 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4923 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4924 would immediately cause the maximum to be reached again.
4925 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4927 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4928 _transient_check_time = 20 # arbitrary limit on vacuum executions
4929 self._transient_check_count += 1
4930 if not force and (self._transient_check_count < _transient_check_time):
4931 return True # no vacuum cleaning this time
4932 self._transient_check_count = 0
4934 # Age-based expiration
4935 if self._transient_max_hours:
4936 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4938 # Count-based expiration
4939 if self._transient_max_count:
4940 self._transient_clean_old_rows(cr, self._transient_max_count)
4944 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4945 """ Serializes one2many and many2many commands into record dictionaries
4946 (as if all the records came from the database via a read()). This
4947 method is aimed at onchange methods on one2many and many2many fields.
4949 Because commands might be creation commands, not all record dicts
4950 will contain an ``id`` field. Commands matching an existing record
4951 will have an ``id``.
4953 :param field_name: name of the one2many or many2many field matching the commands
4954 :type field_name: str
4955 :param commands: one2many or many2many commands to execute on ``field_name``
4956 :type commands: list((int|False, int|False, dict|False))
4957 :param fields: list of fields to read from the database, when applicable
4958 :type fields: list(str)
4959 :returns: records in a shape similar to that returned by ``read()``
4960 (except records may be missing the ``id`` field if they don't exist in db)
4963 result = [] # result (list of dict)
4964 record_ids = [] # ids of records to read
4965 updates = {} # {id: dict} of updates on particular records
4967 for command in commands or []:
4968 if not isinstance(command, (list, tuple)):
4969 record_ids.append(command)
4970 elif command[0] == 0:
4971 result.append(command[2])
4972 elif command[0] == 1:
4973 record_ids.append(command[1])
4974 updates.setdefault(command[1], {}).update(command[2])
4975 elif command[0] in (2, 3):
4976 record_ids = [id for id in record_ids if id != command[1]]
4977 elif command[0] == 4:
4978 record_ids.append(command[1])
4979 elif command[0] == 5:
4980 result, record_ids = [], []
4981 elif command[0] == 6:
4982 result, record_ids = [], list(command[2])
4984 # read the records and apply the updates
4985 other_model = self.pool[self._all_columns[field_name].column._obj]
4986 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4987 record.update(updates.get(record['id'], {}))
4988 result.append(record)
4992 # for backward compatibility
4993 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
4995 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
4997 Performs a ``search()`` followed by a ``read()``.
4999 :param cr: database cursor
5000 :param user: current user id
5001 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5002 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5003 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5004 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5005 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5006 :param context: context arguments.
5007 :return: List of dictionaries containing the asked fields.
5008 :rtype: List of dictionaries.
5011 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5015 if fields and fields == ['id']:
5016 # shortcut read if we only want the ids
5017 return [{'id': id} for id in record_ids]
5019 # read() ignores active_test, but it would forward it to any downstream search call
5020 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5021 # was presumably only meant for the main search().
5022 # TODO: Move this to read() directly?
5023 read_ctx = dict(context or {})
5024 read_ctx.pop('active_test', None)
5026 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5027 if len(result) <= 1:
5031 index = dict((r['id'], r) for r in result)
5032 return [index[x] for x in record_ids if x in index]
5034 def _register_hook(self, cr):
5035 """ stuff to do right after the registry is built """
5039 def _patch_method(cls, name, method):
5040 """ Monkey-patch a method for all instances of this model. This replaces
5041 the method called `name` by `method` in the given class.
5042 The original method is then accessible via ``method.origin``, and it
5043 can be restored with :meth:`~._revert_method`.
5048 def do_write(self, values):
5049 # do stuff, and call the original method
5050 return do_write.origin(self, values)
5052 # patch method write of model
5053 model._patch_method('write', do_write)
5055 # this will call do_write
5056 records = model.search([...])
5059 # restore the original method
5060 model._revert_method('write')
5062 origin = getattr(cls, name)
5063 method.origin = origin
5064 # propagate decorators from origin to method, and apply api decorator
5065 wrapped = api.guess(api.propagate(origin, method))
5066 wrapped.origin = origin
5067 setattr(cls, name, wrapped)
5070 def _revert_method(cls, name):
5071 """ Revert the original method called `name` in the given class.
5072 See :meth:`~._patch_method`.
5074 method = getattr(cls, name)
5075 setattr(cls, name, method.origin)
5080 # An instance represents an ordered collection of records in a given
5081 # execution environment. The instance object refers to the environment, and
5082 # the records themselves are represented by their cache dictionary. The 'id'
5083 # of each record is found in its corresponding cache dictionary.
5085 # This design has the following advantages:
5086 # - cache access is direct and thus fast;
5087 # - one can consider records without an 'id' (see new records);
5088 # - the global cache is only an index to "resolve" a record 'id'.
5092 def _browse(cls, env, ids):
5093 """ Create an instance attached to `env`; `ids` is a tuple of record
5096 records = object.__new__(cls)
5099 env.prefetch[cls._name].update(ids)
5103 def browse(self, cr, uid, arg=None, context=None):
5104 ids = _normalize_ids(arg)
5105 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5106 return self._browse(Environment(cr, uid, context or {}), ids)
5109 def browse(self, arg=None):
5110 """ browse([ids]) -> records
5112 Returns a recordset for the ids provided as parameter in the current
5115 Can take no ids, a single id or a sequence of ids.
5117 ids = _normalize_ids(arg)
5118 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5119 return self._browse(self.env, ids)
5122 # Internal properties, for manipulating the instance's implementation
5127 """ List of actual record ids in this recordset (ignores placeholder
5128 ids for records to create)
5130 return filter(None, list(self._ids))
5132 # backward-compatibility with former browse records
5133 _cr = property(lambda self: self.env.cr)
5134 _uid = property(lambda self: self.env.uid)
5135 _context = property(lambda self: self.env.context)
5138 # Conversion methods
5141 def ensure_one(self):
5142 """ Verifies that the current recorset holds a single record. Raises
5143 an exception otherwise.
5147 raise except_orm("ValueError", "Expected singleton: %s" % self)
5149 def with_env(self, env):
5150 """ Returns a new version of this recordset attached to the provided
5153 :type env: :class:`~openerp.api.Environment`
5155 return self._browse(env, self._ids)
5157 def sudo(self, user=SUPERUSER_ID):
5158 """ sudo([user=SUPERUSER])
5160 Returns a new version of this recordset attached to the provided
5163 return self.with_env(self.env(user=user))
5165 def with_context(self, *args, **kwargs):
5166 """ with_context([context][, **overrides]) -> records
5168 Returns a new version of this recordset attached to an extended
5171 The extended context is either the provided ``context`` in which
5172 ``overrides`` are merged or the *current* context in which
5173 ``overrides`` are merged e.g.::
5175 # current context is {'key1': True}
5176 r2 = records.with_context({}, key2=True)
5177 # -> r2._context is {'key2': True}
5178 r2 = records.with_context(key2=True)
5179 # -> r2._context is {'key1': True, 'key2': True}
5181 context = dict(args[0] if args else self._context, **kwargs)
5182 return self.with_env(self.env(context=context))
5184 def _convert_to_cache(self, values, update=False, validate=True):
5185 """ Convert the `values` dictionary into cached values.
5187 :param update: whether the conversion is made for updating `self`;
5188 this is necessary for interpreting the commands of *2many fields
5189 :param validate: whether values must be checked
5191 fields = self._fields
5192 target = self if update else self.browse()
5194 name: fields[name].convert_to_cache(value, target, validate=validate)
5195 for name, value in values.iteritems()
5199 def _convert_to_write(self, values):
5200 """ Convert the `values` dictionary into the format of :meth:`write`. """
5201 fields = self._fields
5203 for name, value in values.iteritems():
5205 value = fields[name].convert_to_write(value)
5206 if not isinstance(value, NewId):
5207 result[name] = value
5211 # Record traversal and update
5214 def _mapped_func(self, func):
5215 """ Apply function `func` on all records in `self`, and return the
5216 result as a list or a recordset (if `func` return recordsets).
5218 vals = [func(rec) for rec in self]
5219 val0 = vals[0] if vals else func(self)
5220 if isinstance(val0, BaseModel):
5221 return reduce(operator.or_, vals, val0)
5224 def mapped(self, func):
5225 """ Apply `func` on all records in `self`, and return the result as a
5226 list or a recordset (if `func` return recordsets). In the latter
5227 case, the order of the returned recordset is arbritrary.
5229 :param func: a function or a dot-separated sequence of field names
5231 if isinstance(func, basestring):
5233 for name in func.split('.'):
5234 recs = recs._mapped_func(operator.itemgetter(name))
5237 return self._mapped_func(func)
5239 def _mapped_cache(self, name_seq):
5240 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5241 field names, and only cached values are used.
5244 for name in name_seq.split('.'):
5245 field = recs._fields[name]
5246 null = field.null(self.env)
5247 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5250 def filtered(self, func):
5251 """ Select the records in `self` such that `func(rec)` is true, and
5252 return them as a recordset.
5254 :param func: a function or a dot-separated sequence of field names
5256 if isinstance(func, basestring):
5258 func = lambda rec: filter(None, rec.mapped(name))
5259 return self.browse([rec.id for rec in self if func(rec)])
5261 def sorted(self, key=None):
5262 """ Return the recordset `self` ordered by `key` """
5264 return self.search([('id', 'in', self.ids)])
5266 return self.browse(map(int, sorted(self, key=key)))
5268 def update(self, values):
5269 """ Update record `self[0]` with `values`. """
5270 for name, value in values.iteritems():
5274 # New records - represent records that do not exist in the database yet;
5275 # they are used to perform onchanges.
5279 def new(self, values={}):
5280 """ new([values]) -> record
5282 Return a new record instance attached to the current environment and
5283 initialized with the provided ``value``. The record is *not* created
5284 in database, it only exists in memory.
5286 record = self.browse([NewId()])
5287 record._cache.update(record._convert_to_cache(values, update=True))
5289 if record.env.in_onchange:
5290 # The cache update does not set inverse fields, so do it manually.
5291 # This is useful for computing a function field on secondary
5292 # records, if that field depends on the main record.
5294 field = self._fields.get(name)
5296 for invf in field.inverse_fields:
5297 invf._update(record[name], record)
5302 # Dirty flag, to mark records modified (in draft mode)
5307 """ Return whether any record in `self` is dirty. """
5308 dirty = self.env.dirty
5309 return any(record in dirty for record in self)
5312 def _dirty(self, value):
5313 """ Mark the records in `self` as dirty. """
5315 map(self.env.dirty.add, self)
5317 map(self.env.dirty.discard, self)
5323 def __nonzero__(self):
5324 """ Test whether `self` is nonempty. """
5325 return bool(getattr(self, '_ids', True))
5328 """ Return the size of `self`. """
5329 return len(self._ids)
5332 """ Return an iterator over `self`. """
5333 for id in self._ids:
5334 yield self._browse(self.env, (id,))
5336 def __contains__(self, item):
5337 """ Test whether `item` (record or field name) is an element of `self`.
5338 In the first case, the test is fully equivalent to::
5340 any(item == record for record in self)
5342 if isinstance(item, BaseModel) and self._name == item._name:
5343 return len(item) == 1 and item.id in self._ids
5344 elif isinstance(item, basestring):
5345 return item in self._fields
5347 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5349 def __add__(self, other):
5350 """ Return the concatenation of two recordsets. """
5351 if not isinstance(other, BaseModel) or self._name != other._name:
5352 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5353 return self.browse(self._ids + other._ids)
5355 def __sub__(self, other):
5356 """ Return the recordset of all the records in `self` that are not in `other`. """
5357 if not isinstance(other, BaseModel) or self._name != other._name:
5358 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5359 other_ids = set(other._ids)
5360 return self.browse([id for id in self._ids if id not in other_ids])
5362 def __and__(self, other):
5363 """ Return the intersection of two recordsets.
5364 Note that recordset order is not preserved.
5366 if not isinstance(other, BaseModel) or self._name != other._name:
5367 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5368 return self.browse(set(self._ids) & set(other._ids))
5370 def __or__(self, other):
5371 """ Return the union of two recordsets.
5372 Note that recordset order is not preserved.
5374 if not isinstance(other, BaseModel) or self._name != other._name:
5375 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5376 return self.browse(set(self._ids) | set(other._ids))
5378 def __eq__(self, other):
5379 """ Test whether two recordsets are equivalent (up to reordering). """
5380 if not isinstance(other, BaseModel):
5382 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5384 return self._name == other._name and set(self._ids) == set(other._ids)
5386 def __ne__(self, other):
5387 return not self == other
5389 def __lt__(self, other):
5390 if not isinstance(other, BaseModel) or self._name != other._name:
5391 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5392 return set(self._ids) < set(other._ids)
5394 def __le__(self, other):
5395 if not isinstance(other, BaseModel) or self._name != other._name:
5396 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5397 return set(self._ids) <= set(other._ids)
5399 def __gt__(self, other):
5400 if not isinstance(other, BaseModel) or self._name != other._name:
5401 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5402 return set(self._ids) > set(other._ids)
5404 def __ge__(self, other):
5405 if not isinstance(other, BaseModel) or self._name != other._name:
5406 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5407 return set(self._ids) >= set(other._ids)
5413 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5415 def __unicode__(self):
5416 return unicode(str(self))
5421 if hasattr(self, '_ids'):
5422 return hash((self._name, frozenset(self._ids)))
5424 return hash(self._name)
5426 def __getitem__(self, key):
5427 """ If `key` is an integer or a slice, return the corresponding record
5428 selection as an instance (attached to `self.env`).
5429 Otherwise read the field `key` of the first record in `self`.
5433 inst = model.search(dom) # inst is a recordset
5434 r4 = inst[3] # fourth record in inst
5435 rs = inst[10:20] # subset of inst
5436 nm = rs['name'] # name of first record in inst
5438 if isinstance(key, basestring):
5439 # important: one must call the field's getter
5440 return self._fields[key].__get__(self, type(self))
5441 elif isinstance(key, slice):
5442 return self._browse(self.env, self._ids[key])
5444 return self._browse(self.env, (self._ids[key],))
5446 def __setitem__(self, key, value):
5447 """ Assign the field `key` to `value` in record `self`. """
5448 # important: one must call the field's setter
5449 return self._fields[key].__set__(self, value)
5452 # Cache and recomputation management
5457 """ Return the cache of `self`, mapping field names to values. """
5458 return RecordCache(self)
5461 def _in_cache_without(self, field):
5462 """ Make sure `self` is present in cache (for prefetching), and return
5463 the records of model `self` in cache that have no value for `field`
5464 (:class:`Field` instance).
5467 prefetch_ids = env.prefetch[self._name]
5468 prefetch_ids.update(self._ids)
5469 ids = filter(None, prefetch_ids - set(env.cache[field]))
5470 return self.browse(ids)
5474 """ Clear the records cache.
5477 The record cache is automatically invalidated.
5479 self.invalidate_cache()
5482 def invalidate_cache(self, fnames=None, ids=None):
5483 """ Invalidate the record caches after some records have been modified.
5484 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5486 :param fnames: the list of modified fields, or ``None`` for all fields
5487 :param ids: the list of modified record ids, or ``None`` for all
5491 return self.env.invalidate_all()
5492 fields = self._fields.values()
5494 fields = map(self._fields.__getitem__, fnames)
5496 # invalidate fields and inverse fields, too
5497 spec = [(f, ids) for f in fields] + \
5498 [(invf, None) for f in fields for invf in f.inverse_fields]
5499 self.env.invalidate(spec)
5502 def modified(self, fnames):
5503 """ Notify that fields have been modified on `self`. This invalidates
5504 the cache, and prepares the recomputation of stored function fields
5505 (new-style fields only).
5507 :param fnames: iterable of field names that have been modified on
5510 # each field knows what to invalidate and recompute
5512 for fname in fnames:
5513 spec += self._fields[fname].modified(self)
5517 for env in self.env.all
5518 for field in env.cache
5520 # invalidate non-stored fields.function which are currently cached
5521 spec += [(f, None) for f in self.pool.pure_function_fields
5522 if f in cached_fields]
5524 self.env.invalidate(spec)
5526 def _recompute_check(self, field):
5527 """ If `field` must be recomputed on some record in `self`, return the
5528 corresponding records that must be recomputed.
5530 return self.env.check_todo(field, self)
5532 def _recompute_todo(self, field):
5533 """ Mark `field` to be recomputed. """
5534 self.env.add_todo(field, self)
5536 def _recompute_done(self, field):
5537 """ Mark `field` as recomputed. """
5538 self.env.remove_todo(field, self)
5541 def recompute(self):
5542 """ Recompute stored function fields. The fields and records to
5543 recompute have been determined by method :meth:`modified`.
5545 while self.env.has_todo():
5546 field, recs = self.env.get_todo()
5547 # evaluate the fields to recompute, and save them to database
5548 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5550 values = rec._convert_to_write({
5551 f.name: rec[f.name] for f in field.computed_fields
5554 except MissingError:
5556 # mark the computed fields as done
5557 map(recs._recompute_done, field.computed_fields)
5560 # Generic onchange method
5563 def _has_onchange(self, field, other_fields):
5564 """ Return whether `field` should trigger an onchange event in the
5565 presence of `other_fields`.
5567 # test whether self has an onchange method for field, or field is a
5568 # dependency of any field in other_fields
5569 return field.name in self._onchange_methods or \
5570 any(dep in other_fields for dep in field.dependents)
5573 def _onchange_spec(self, view_info=None):
5574 """ Return the onchange spec from a view description; if not given, the
5575 result of ``self.fields_view_get()`` is used.
5579 # for traversing the XML arch and populating result
5580 def process(node, info, prefix):
5581 if node.tag == 'field':
5582 name = node.attrib['name']
5583 names = "%s.%s" % (prefix, name) if prefix else name
5584 if not result.get(names):
5585 result[names] = node.attrib.get('on_change')
5586 # traverse the subviews included in relational fields
5587 for subinfo in info['fields'][name].get('views', {}).itervalues():
5588 process(etree.fromstring(subinfo['arch']), subinfo, names)
5591 process(child, info, prefix)
5593 if view_info is None:
5594 view_info = self.fields_view_get()
5595 process(etree.fromstring(view_info['arch']), view_info, '')
5598 def _onchange_eval(self, field_name, onchange, result):
5599 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5600 on record `self`. Value assignments are applied on `self`, while
5601 domain and warning messages are put in dictionary `result`.
5603 onchange = onchange.strip()
5606 if onchange in ("1", "true"):
5607 for method in self._onchange_methods.get(field_name, ()):
5608 method_res = method(self)
5611 if 'domain' in method_res:
5612 result.setdefault('domain', {}).update(method_res['domain'])
5613 if 'warning' in method_res:
5614 result['warning'] = method_res['warning']
5618 match = onchange_v7.match(onchange)
5620 method, params = match.groups()
5622 # evaluate params -> tuple
5623 global_vars = {'context': self._context, 'uid': self._uid}
5624 if self._context.get('field_parent'):
5625 class RawRecord(object):
5626 def __init__(self, record):
5627 self._record = record
5628 def __getattr__(self, name):
5629 field = self._record._fields[name]
5630 value = self._record[name]
5631 return field.convert_to_onchange(value)
5632 record = self[self._context['field_parent']]
5633 global_vars['parent'] = RawRecord(record)
5635 key: self._fields[key].convert_to_onchange(val)
5636 for key, val in self._cache.iteritems()
5638 params = eval("[%s]" % params, global_vars, field_vars)
5640 # call onchange method
5641 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5642 method_res = getattr(self._model, method)(*args)
5643 if not isinstance(method_res, dict):
5645 if 'value' in method_res:
5646 method_res['value'].pop('id', None)
5647 self.update(self._convert_to_cache(method_res['value'], validate=False))
5648 if 'domain' in method_res:
5649 result.setdefault('domain', {}).update(method_res['domain'])
5650 if 'warning' in method_res:
5651 result['warning'] = method_res['warning']
5654 def onchange(self, values, field_name, field_onchange):
5655 """ Perform an onchange on the given field.
5657 :param values: dictionary mapping field names to values, giving the
5658 current state of modification
5659 :param field_name: name of the modified field_name
5660 :param field_onchange: dictionary mapping field names to their
5665 if field_name and field_name not in self._fields:
5668 # determine subfields for field.convert_to_write() below
5670 subfields = defaultdict(set)
5671 for dotname in field_onchange:
5673 secondary.append(dotname)
5674 name, subname = dotname.split('.')
5675 subfields[name].add(subname)
5677 # create a new record with values, and attach `self` to it
5678 with env.do_in_onchange():
5679 record = self.new(values)
5680 values = dict(record._cache)
5681 # attach `self` with a different context (for cache consistency)
5682 record._origin = self.with_context(__onchange=True)
5684 # determine which field should be triggered an onchange
5685 todo = set([field_name]) if field_name else set(values)
5688 # dummy assignment: trigger invalidations on the record
5690 value = record[name]
5691 field = self._fields[name]
5692 if not field_name and field.type == 'many2one' and field.delegate and not value:
5693 # do not nullify all fields of parent record for new records
5695 record[name] = value
5697 result = {'value': {}}
5705 with env.do_in_onchange():
5706 # apply field-specific onchange methods
5707 if field_onchange.get(name):
5708 record._onchange_eval(name, field_onchange[name], result)
5710 # force re-evaluation of function fields on secondary records
5711 for field_seq in secondary:
5712 record.mapped(field_seq)
5714 # determine which fields have been modified
5715 for name, oldval in values.iteritems():
5716 field = self._fields[name]
5717 newval = record[name]
5718 if field.type in ('one2many', 'many2many'):
5719 if newval != oldval or newval._dirty:
5720 # put new value in result
5721 result['value'][name] = field.convert_to_write(
5722 newval, record._origin, subfields.get(name),
5726 # keep result: newval may have been dirty before
5729 if newval != oldval:
5730 # put new value in result
5731 result['value'][name] = field.convert_to_write(
5732 newval, record._origin, subfields.get(name),
5736 # clean up result to not return another value
5737 result['value'].pop(name, None)
5739 # At the moment, the client does not support updates on a *2many field
5740 # while this one is modified by the user.
5741 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5742 result['value'].pop(field_name, None)
5747 class RecordCache(MutableMapping):
5748 """ Implements a proxy dictionary to read/update the cache of a record.
5749 Upon iteration, it looks like a dictionary mapping field names to
5750 values. However, fields may be used as keys as well.
5752 def __init__(self, records):
5753 self._recs = records
5755 def contains(self, field):
5756 """ Return whether `records[0]` has a value for `field` in cache. """
5757 if isinstance(field, basestring):
5758 field = self._recs._fields[field]
5759 return self._recs.id in self._recs.env.cache[field]
5761 def __contains__(self, field):
5762 """ Return whether `records[0]` has a regular value for `field` in cache. """
5763 if isinstance(field, basestring):
5764 field = self._recs._fields[field]
5765 dummy = SpecialValue(None)
5766 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5767 return not isinstance(value, SpecialValue)
5769 def __getitem__(self, field):
5770 """ Return the cached value of `field` for `records[0]`. """
5771 if isinstance(field, basestring):
5772 field = self._recs._fields[field]
5773 value = self._recs.env.cache[field][self._recs.id]
5774 return value.get() if isinstance(value, SpecialValue) else value
5776 def __setitem__(self, field, value):
5777 """ Assign the cached value of `field` for all records in `records`. """
5778 if isinstance(field, basestring):
5779 field = self._recs._fields[field]
5780 values = dict.fromkeys(self._recs._ids, value)
5781 self._recs.env.cache[field].update(values)
5783 def update(self, *args, **kwargs):
5784 """ Update the cache of all records in `records`. If the argument is a
5785 `SpecialValue`, update all fields (except "magic" columns).
5787 if args and isinstance(args[0], SpecialValue):
5788 values = dict.fromkeys(self._recs._ids, args[0])
5789 for name, field in self._recs._fields.iteritems():
5791 self._recs.env.cache[field].update(values)
5793 return super(RecordCache, self).update(*args, **kwargs)
5795 def __delitem__(self, field):
5796 """ Remove the cached value of `field` for all `records`. """
5797 if isinstance(field, basestring):
5798 field = self._recs._fields[field]
5799 field_cache = self._recs.env.cache[field]
5800 for id in self._recs._ids:
5801 field_cache.pop(id, None)
5804 """ Iterate over the field names with a regular value in cache. """
5805 cache, id = self._recs.env.cache, self._recs.id
5806 dummy = SpecialValue(None)
5807 for name, field in self._recs._fields.iteritems():
5808 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5812 """ Return the number of fields with a regular value in cache. """
5813 return sum(1 for name in self)
5815 class Model(BaseModel):
5816 """Main super-class for regular database-persisted OpenERP models.
5818 OpenERP models are created by inheriting from this class::
5823 The system will later instantiate the class once per database (on
5824 which the class' module is installed).
5827 _register = False # not visible in ORM registry, meant to be python-inherited only
5828 _transient = False # True in a TransientModel
5830 class TransientModel(BaseModel):
5831 """Model super-class for transient records, meant to be temporarily
5832 persisted, and regularly vaccuum-cleaned.
5834 A TransientModel has a simplified access rights management,
5835 all users can create new records, and may only access the
5836 records they created. The super-user has unrestricted access
5837 to all TransientModel records.
5840 _register = False # not visible in ORM registry, meant to be python-inherited only
5843 class AbstractModel(BaseModel):
5844 """Abstract Model super-class for creating an abstract class meant to be
5845 inherited by regular models (Models or TransientModels) but not meant to
5846 be usable on its own, or persisted.
5848 Technical note: we don't want to make AbstractModel the super-class of
5849 Model or BaseModel because it would not make sense to put the main
5850 definition of persistence methods such as create() in it, and still we
5851 should be able to override them within an AbstractModel.
5853 _auto = False # don't create any database backend for AbstractModels
5854 _register = False # not visible in ORM registry, meant to be python-inherited only
5857 def itemgetter_tuple(items):
5858 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5859 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5864 return lambda gettable: (gettable[items[0]],)
5865 return operator.itemgetter(*items)
5867 def convert_pgerror_23502(model, fields, info, e):
5868 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5869 r'not-null constraint\n',
5871 field_name = m and m.group('field')
5872 if not m or field_name not in fields:
5873 return {'message': unicode(e)}
5874 message = _(u"Missing required value for the field '%s'.") % field_name
5875 field = fields.get(field_name)
5877 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5880 'field': field_name,
5883 def convert_pgerror_23505(model, fields, info, e):
5884 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5886 field_name = m and m.group('field')
5887 if not m or field_name not in fields:
5888 return {'message': unicode(e)}
5889 message = _(u"The value for the field '%s' already exists.") % field_name
5890 field = fields.get(field_name)
5892 message = _(u"%s This might be '%s' in the current model, or a field "
5893 u"of the same name in an o2m.") % (message, field['string'])
5896 'field': field_name,
5899 PGERROR_TO_OE = defaultdict(
5900 # shape of mapped converters
5901 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5902 # not_null_violation
5903 '23502': convert_pgerror_23502,
5904 # unique constraint error
5905 '23505': convert_pgerror_23505,
5908 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5909 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5911 Various implementations were tested on the corpus of all browse() calls
5912 performed during a full crawler run (after having installed all website_*
5913 modules) and this one was the most efficient overall.
5915 A possible bit of correctness was sacrificed by not doing any test on
5916 Iterable and just assuming that any non-atomic type was an iterable of
5921 # much of the corpus is falsy objects (empty list, tuple or set, None)
5925 # `type in set` is significantly faster (because more restrictive) than
5926 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5927 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5928 # (and looks much worse) in most cases, but over millions of calls it
5929 # does have a very minor effect.
5930 if arg.__class__ in atoms:
5935 # keep those imports here to avoid dependency cycle errors
5936 from .osv import expression
5937 from .fields import Field, SpecialValue, FailedValue
5939 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: