1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
51 from collections import defaultdict, MutableMapping
52 from inspect import getmembers
55 import dateutil.relativedelta
57 from lxml import etree
60 from . import SUPERUSER_ID
63 from .api import Environment
64 from .exceptions import except_orm, AccessError, MissingError, ValidationError
65 from .osv import fields
66 from .osv.query import Query
67 from .tools import lazy_property, ormcache
68 from .tools.config import config
69 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
70 from .tools.safe_eval import safe_eval as eval
71 from .tools.translate import _
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
77 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
78 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
80 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def check_object_name(name):
84 """ Check if the given name is a valid openerp object name.
86 The _name attribute in osv and osv_memory object is subject to
87 some restrictions. This function returns True or False whether
88 the given name is allowed or not.
90 TODO: this is an approximation. The goal in this approximation
91 is to disallow uppercase characters (in some places, we quote
92 table/column names and in other not, which leads to this kind
95 psycopg2.ProgrammingError: relation "xxx" does not exist).
97 The same restriction should apply to both osv and osv_memory
98 objects for consistency.
101 if regex_object_name.match(name) is None:
105 def raise_on_invalid_object_name(name):
106 if not check_object_name(name):
107 msg = "The _name attribute %s is not valid." % name
109 raise except_orm('ValueError', msg)
111 POSTGRES_CONFDELTYPES = {
119 def intersect(la, lb):
120 return filter(lambda x: x in lb, la)
123 """ Test whether functions `f` and `g` are identical or have the same name """
124 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
126 def fix_import_export_id_paths(fieldname):
128 Fixes the id fields in import and exports, and splits field paths
131 :param str fieldname: name of the field to import/export
132 :return: split field name
135 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
136 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
137 return fixed_external_id.split('/')
139 def pg_varchar(size=0):
140 """ Returns the VARCHAR declaration for the provided size:
142 * If no size (or an empty or negative size is provided) return an
144 * Otherwise return a VARCHAR(n)
146 :type int size: varchar size, optional
150 if not isinstance(size, int):
151 raise TypeError("VARCHAR parameter should be an int, got %s"
154 return 'VARCHAR(%d)' % size
157 FIELDS_TO_PGTYPES = {
158 fields.boolean: 'bool',
159 fields.integer: 'int4',
163 fields.datetime: 'timestamp',
164 fields.binary: 'bytea',
165 fields.many2one: 'int4',
166 fields.serialized: 'text',
169 def get_pg_type(f, type_override=None):
171 :param fields._column f: field to get a Postgres type for
172 :param type type_override: use the provided type for dispatching instead of the field's own type
173 :returns: (postgres_identification_type, postgres_type_specification)
176 field_type = type_override or type(f)
178 if field_type in FIELDS_TO_PGTYPES:
179 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
180 elif issubclass(field_type, fields.float):
182 pg_type = ('numeric', 'NUMERIC')
184 pg_type = ('float8', 'DOUBLE PRECISION')
185 elif issubclass(field_type, (fields.char, fields.reference)):
186 pg_type = ('varchar', pg_varchar(f.size))
187 elif issubclass(field_type, fields.selection):
188 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
189 or getattr(f, 'size', None) == -1:
190 pg_type = ('int4', 'INTEGER')
192 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
193 elif issubclass(field_type, fields.function):
194 if f._type == 'selection':
195 pg_type = ('varchar', pg_varchar())
197 pg_type = get_pg_type(f, getattr(fields, f._type))
199 _logger.warning('%s type not supported!', field_type)
205 class MetaModel(api.Meta):
206 """ Metaclass for the models.
208 This class is used as the metaclass for the class :class:`BaseModel` to
209 discover the models defined in a module (without instanciating them).
210 If the automatic discovery is not needed, it is possible to set the model's
211 ``_register`` attribute to False.
215 module_to_models = {}
217 def __init__(self, name, bases, attrs):
218 if not self._register:
219 self._register = True
220 super(MetaModel, self).__init__(name, bases, attrs)
223 if not hasattr(self, '_module'):
224 # The (OpenERP) module name can be in the `openerp.addons` namespace
225 # or not. For instance, module `sale` can be imported as
226 # `openerp.addons.sale` (the right way) or `sale` (for backward
228 module_parts = self.__module__.split('.')
229 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
230 module_name = self.__module__.split('.')[2]
232 module_name = self.__module__.split('.')[0]
233 self._module = module_name
235 # Remember which models to instanciate for this module.
237 self.module_to_models.setdefault(self._module, []).append(self)
239 # transform columns into new-style fields (enables field inheritance)
240 for name, column in self._columns.iteritems():
241 if name in self.__dict__:
242 _logger.warning("In class %s, field %r overriding an existing value", self, name)
243 setattr(self, name, column.to_field())
247 """ Pseudo-ids for new records. """
248 def __nonzero__(self):
251 IdType = (int, long, basestring, NewId)
254 # maximum number of prefetched records
257 # special columns automatically created by the ORM
258 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
259 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
261 class BaseModel(object):
262 """ Base class for OpenERP models.
264 OpenERP models are created by inheriting from this class' subclasses:
266 * :class:`Model` for regular database-persisted models
268 * :class:`TransientModel` for temporary data, stored in the database but
269 automatically vaccuumed every so often
271 * :class:`AbstractModel` for abstract super classes meant to be shared by
272 multiple inheriting model
274 The system automatically instantiates every model once per database. Those
275 instances represent the available models on each database, and depend on
276 which modules are installed on that database. The actual class of each
277 instance is built from the Python classes that create and inherit from the
280 Every model instance is a "recordset", i.e., an ordered collection of
281 records of the model. Recordsets are returned by methods like
282 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
283 explicit representation: a record is represented as a recordset of one
286 To create a class that should not be instantiated, the _register class
287 attribute may be set to False.
289 __metaclass__ = MetaModel
290 _auto = True # create database backend
291 _register = False # Set to false if the model shouldn't be automatically discovered.
298 _parent_name = 'parent_id'
299 _parent_store = False
300 _parent_order = False
306 _translate = True # set to False to disable translations export for this model
308 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
309 # to include in the _read_group, if grouped on this field
313 _transient = False # True in a TransientModel
316 # { 'parent_model': 'm2o_field', ... }
319 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
320 # model from which it is inherits'd, r is the (local) field towards m, f
321 # is the _column object itself, and n is the original (i.e. top-most)
324 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
325 # field_column_obj, origina_parent_model), ... }
328 # Mapping field name/column_info object
329 # This is similar to _inherit_fields but:
330 # 1. includes self fields,
331 # 2. uses column_info instead of a triple.
336 _sql_constraints = []
338 # model dependencies, for models backed up by sql views:
339 # {model_name: field_names, ...}
342 CONCURRENCY_CHECK_FIELD = '__last_update'
344 def log(self, cr, uid, id, message, secondary=False, context=None):
345 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
347 def view_init(self, cr, uid, fields_list, context=None):
348 """Override this method to do specific things when a view on the object is opened."""
351 def _field_create(self, cr, context=None):
352 """ Create entries in ir_model_fields for all the model's fields.
354 If necessary, also create an entry in ir_model, and if called from the
355 modules loading scheme (by receiving 'module' in the context), also
356 create entries in ir_model_data (for the model and the fields).
358 - create an entry in ir_model (if there is not already one),
359 - create an entry in ir_model_data (if there is not already one, and if
360 'module' is in the context),
361 - update ir_model_fields with the fields found in _columns
362 (TODO there is some redundancy as _columns is updated from
363 ir_model_fields in __init__).
368 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
370 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
371 model_id = cr.fetchone()[0]
372 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
374 model_id = cr.fetchone()[0]
375 if 'module' in context:
376 name_id = 'model_'+self._name.replace('.', '_')
377 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
379 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
380 (name_id, context['module'], 'ir.model', model_id)
383 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
385 for rec in cr.dictfetchall():
386 cols[rec['name']] = rec
388 ir_model_fields_obj = self.pool.get('ir.model.fields')
390 # sparse field should be created at the end, as it depends on its serialized field already existing
391 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
392 for (k, f) in model_fields:
394 'model_id': model_id,
397 'field_description': f.string,
399 'relation': f._obj or '',
400 'select_level': tools.ustr(int(f.select)),
401 'readonly': (f.readonly and 1) or 0,
402 'required': (f.required and 1) or 0,
403 'selectable': (f.selectable and 1) or 0,
404 'translate': (f.translate and 1) or 0,
405 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
406 'serialization_field_id': None,
408 if getattr(f, 'serialization_field', None):
409 # resolve link to serialization_field if specified by name
410 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
411 if not serialization_field_id:
412 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
413 vals['serialization_field_id'] = serialization_field_id[0]
415 # When its a custom field,it does not contain f.select
416 if context.get('field_state', 'base') == 'manual':
417 if context.get('field_name', '') == k:
418 vals['select_level'] = context.get('select', '0')
419 #setting value to let the problem NOT occur next time
421 vals['select_level'] = cols[k]['select_level']
424 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
425 id = cr.fetchone()[0]
427 cr.execute("""INSERT INTO ir_model_fields (
428 id, model_id, model, name, field_description, ttype,
429 relation,state,select_level,relation_field, translate, serialization_field_id
431 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
433 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
434 vals['relation'], 'base',
435 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
437 if 'module' in context:
438 name1 = 'field_' + self._table + '_' + k
439 cr.execute("select name from ir_model_data where name=%s", (name1,))
441 name1 = name1 + "_" + str(id)
442 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
443 (name1, context['module'], 'ir.model.fields', id)
446 for key, val in vals.items():
447 if cols[k][key] != vals[key]:
448 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
449 cr.execute("""UPDATE ir_model_fields SET
450 model_id=%s, field_description=%s, ttype=%s, relation=%s,
451 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
453 model=%s AND name=%s""", (
454 vals['model_id'], vals['field_description'], vals['ttype'],
456 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
459 self.invalidate_cache(cr, SUPERUSER_ID)
462 def _add_field(cls, name, field):
463 """ Add the given `field` under the given `name` in the class """
464 # add field as an attribute and in cls._fields (for reflection)
465 if not isinstance(getattr(cls, name, field), Field):
466 _logger.warning("In model %r, field %r overriding existing value", cls._name, name)
467 setattr(cls, name, field)
468 cls._fields[name] = field
470 # basic setup of field
471 field.set_class_name(cls, name)
474 cls._columns[name] = field.to_column()
476 # remove potential column that may be overridden by field
477 cls._columns.pop(name, None)
480 def _pop_field(cls, name):
481 """ Remove the field with the given `name` from the model.
482 This method should only be used for manual fields.
484 field = cls._fields.pop(name)
485 cls._columns.pop(name, None)
486 cls._all_columns.pop(name, None)
487 if hasattr(cls, name):
492 def _add_magic_fields(cls):
493 """ Introduce magic fields on the current class
495 * id is a "normal" field (with a specific getter)
496 * create_uid, create_date, write_uid and write_date have become
498 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
499 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
500 to get the same structure as the previous
501 ``(now() at time zone 'UTC')::timestamp``::
503 # select (now() at time zone 'UTC')::timestamp;
505 ----------------------------
506 2013-06-18 08:30:37.292809
508 >>> str(datetime.datetime.utcnow())
509 '2013-06-18 08:31:32.821177'
511 def add(name, field):
512 """ add `field` with the given `name` if it does not exist yet """
513 if name not in cls._columns and name not in cls._fields:
514 cls._add_field(name, field)
519 # this field 'id' must override any other column or field
520 cls._add_field('id', fields.Id(automatic=True))
522 add('display_name', fields.Char(string='Display Name', automatic=True,
523 compute='_compute_display_name'))
526 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
527 add('create_date', fields.Datetime(string='Created on', automatic=True))
528 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
529 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
530 last_modified_name = 'compute_concurrency_field_with_access'
532 last_modified_name = 'compute_concurrency_field'
534 # this field must override any other column or field
535 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
536 string='Last Modified on', compute=last_modified_name, automatic=True))
539 def compute_concurrency_field(self):
540 self[self.CONCURRENCY_CHECK_FIELD] = \
541 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
544 @api.depends('create_date', 'write_date')
545 def compute_concurrency_field_with_access(self):
546 self[self.CONCURRENCY_CHECK_FIELD] = \
547 self.write_date or self.create_date or \
548 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
551 # Goal: try to apply inheritance at the instanciation level and
552 # put objects in the pool var
555 def _build_model(cls, pool, cr):
556 """ Instanciate a given model.
558 This class method instanciates the class of some model (i.e. a class
559 deriving from osv or osv_memory). The class might be the class passed
560 in argument or, if it inherits from another class, a class constructed
561 by combining the two classes.
565 # IMPORTANT: the registry contains an instance for each model. The class
566 # of each model carries inferred metadata that is shared among the
567 # model's instances for this registry, but not among registries. Hence
568 # we cannot use that "registry class" for combining model classes by
569 # inheritance, since it confuses the metadata inference process.
571 # Keep links to non-inherited constraints in cls; this is useful for
572 # instance when exporting translations
573 cls._local_constraints = cls.__dict__.get('_constraints', [])
574 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
576 # determine inherited models
577 parents = getattr(cls, '_inherit', [])
578 parents = [parents] if isinstance(parents, basestring) else (parents or [])
580 # determine the model's name
581 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
583 # determine the module that introduced the model
584 original_module = pool[name]._original_module if name in parents else cls._module
586 # build the class hierarchy for the model
587 for parent in parents:
588 if parent not in pool:
589 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
590 'You may need to add a dependency on the parent class\' module.' % (name, parent))
591 parent_model = pool[parent]
593 # do no use the class of parent_model, since that class contains
594 # inferred metadata; use its ancestor instead
595 parent_class = type(parent_model).__base__
597 # don't inherit custom fields
598 columns = dict((key, val)
599 for key, val in parent_class._columns.iteritems()
602 columns.update(cls._columns)
604 inherits = dict(parent_class._inherits)
605 inherits.update(cls._inherits)
607 depends = dict(parent_class._depends)
608 for m, fs in cls._depends.iteritems():
609 depends[m] = depends.get(m, []) + fs
611 old_constraints = parent_class._constraints
612 new_constraints = cls._constraints
613 # filter out from old_constraints the ones overridden by a
614 # constraint with the same function name in new_constraints
615 constraints = new_constraints + [oldc
616 for oldc in old_constraints
617 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
618 for newc in new_constraints)
621 sql_constraints = cls._sql_constraints + \
622 parent_class._sql_constraints
628 '_inherits': inherits,
630 '_constraints': constraints,
631 '_sql_constraints': sql_constraints,
633 cls = type(name, (cls, parent_class), attrs)
635 # introduce the "registry class" of the model;
636 # duplicate some attributes so that the ORM can modify them
640 '_columns': dict(cls._columns),
641 '_defaults': {}, # filled by Field._determine_default()
642 '_inherits': dict(cls._inherits),
643 '_depends': dict(cls._depends),
644 '_constraints': list(cls._constraints),
645 '_sql_constraints': list(cls._sql_constraints),
646 '_original_module': original_module,
648 cls = type(cls._name, (cls,), attrs)
650 # instantiate the model, and initialize it
651 model = object.__new__(cls)
652 model.__init__(pool, cr)
656 def _init_function_fields(cls, pool, cr):
657 # initialize the list of non-stored function fields for this model
658 pool._pure_function_fields[cls._name] = []
660 # process store of low-level function fields
661 for fname, column in cls._columns.iteritems():
662 if hasattr(column, 'digits_change'):
663 column.digits_change(cr)
664 # filter out existing store about this field
665 pool._store_function[cls._name] = [
667 for stored in pool._store_function.get(cls._name, [])
668 if (stored[0], stored[1]) != (cls._name, fname)
670 if not isinstance(column, fields.function):
673 # register it on the pool for invalidation
674 pool._pure_function_fields[cls._name].append(fname)
676 # process store parameter
679 get_ids = lambda self, cr, uid, ids, c={}: ids
680 store = {cls._name: (get_ids, None, column.priority, None)}
681 for model, spec in store.iteritems():
683 (fnct, fields2, order, length) = spec
685 (fnct, fields2, order) = spec
688 raise except_orm('Error',
689 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
690 pool._store_function.setdefault(model, [])
691 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
692 if t not in pool._store_function[model]:
693 pool._store_function[model].append(t)
694 pool._store_function[model].sort(key=lambda x: x[4])
697 def _init_manual_fields(cls, pool, cr):
698 # Check whether the query is already done
699 if pool.fields_by_model is not None:
700 manual_fields = pool.fields_by_model.get(cls._name, [])
702 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
703 manual_fields = cr.dictfetchall()
705 for field in manual_fields:
706 if field['name'] in cls._columns:
709 'string': field['field_description'],
710 'required': bool(field['required']),
711 'readonly': bool(field['readonly']),
712 'domain': eval(field['domain']) if field['domain'] else None,
713 'size': field['size'] or None,
714 'ondelete': field['on_delete'],
715 'translate': (field['translate']),
718 #'select': int(field['select_level'])
720 if field['serialization_field_id']:
721 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
722 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
723 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
724 attrs.update({'relation': field['relation']})
725 cls._columns[field['name']] = fields.sparse(**attrs)
726 elif field['ttype'] == 'selection':
727 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
728 elif field['ttype'] == 'reference':
729 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
730 elif field['ttype'] == 'many2one':
731 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
732 elif field['ttype'] == 'one2many':
733 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
734 elif field['ttype'] == 'many2many':
735 _rel1 = field['relation'].replace('.', '_')
736 _rel2 = field['model'].replace('.', '_')
737 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
738 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
740 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
743 def _init_constraints_onchanges(cls):
744 # store sql constraint error messages
745 for (key, _, msg) in cls._sql_constraints:
746 cls.pool._sql_error[cls._table + '_' + key] = msg
748 # collect constraint and onchange methods
749 cls._constraint_methods = []
750 cls._onchange_methods = defaultdict(list)
751 for attr, func in getmembers(cls, callable):
752 if hasattr(func, '_constrains'):
753 if not all(name in cls._fields for name in func._constrains):
754 _logger.warning("@constrains%r parameters must be field names", func._constrains)
755 cls._constraint_methods.append(func)
756 if hasattr(func, '_onchange'):
757 if not all(name in cls._fields for name in func._onchange):
758 _logger.warning("@onchange%r parameters must be field names", func._onchange)
759 for name in func._onchange:
760 cls._onchange_methods[name].append(func)
763 # In the past, this method was registering the model class in the server.
764 # This job is now done entirely by the metaclass MetaModel.
766 # Do not create an instance here. Model instances are created by method
770 def __init__(self, pool, cr):
771 """ Initialize a model and make it part of the given registry.
773 - copy the stored fields' functions in the registry,
774 - retrieve custom fields and add them in the model,
775 - ensure there is a many2one for each _inherits'd parent,
776 - update the children's _columns,
777 - give a chance to each field to initialize itself.
782 # link the class to the registry, and update the registry
784 cls._model = self # backward compatibility
785 pool.add(cls._name, self)
787 # determine description, table, sequence and log_access
788 if not cls._description:
789 cls._description = cls._name
791 cls._table = cls._name.replace('.', '_')
792 if not cls._sequence:
793 cls._sequence = cls._table + '_id_seq'
794 if not hasattr(cls, '_log_access'):
795 # If _log_access is not specified, it is the same value as _auto.
796 cls._log_access = cls._auto
799 if cls.is_transient():
800 cls._transient_check_count = 0
801 cls._transient_max_count = config.get('osv_memory_count_limit')
802 cls._transient_max_hours = config.get('osv_memory_age_limit')
803 assert cls._log_access, \
804 "TransientModels must have log_access turned on, " \
805 "in order to implement their access rights policy"
807 # retrieve new-style fields and duplicate them (to avoid clashes with
808 # inheritance between different models)
810 for attr, field in getmembers(cls, Field.__instancecheck__):
811 if not field.inherited:
812 cls._add_field(attr, field.new())
814 # introduce magic fields
815 cls._add_magic_fields()
817 # register stuff about low-level function fields and custom fields
818 cls._init_function_fields(pool, cr)
819 cls._init_manual_fields(pool, cr)
822 cls._inherits_check()
823 cls._inherits_reload()
825 # register constraints and onchange methods
826 cls._init_constraints_onchanges()
829 for k in cls._defaults:
830 assert k in cls._fields, \
831 "Model %s has a default for nonexiting field %s" % (cls._name, k)
834 for column in cls._columns.itervalues():
839 assert cls._rec_name in cls._fields, \
840 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
841 elif 'name' in cls._fields:
842 cls._rec_name = 'name'
844 # prepare ormcache, which must be shared by all instances of the model
849 def _is_an_ordinary_table(self):
850 self.env.cr.execute("""\
854 AND relkind = %s""", [self._table, 'r'])
855 return bool(self.env.cr.fetchone())
857 def __export_xml_id(self):
858 """ Return a valid xml_id for the record `self`. """
859 if not self._is_an_ordinary_table():
861 "You can not export the column ID of model %s, because the "
862 "table %s is not an ordinary table."
863 % (self._name, self._table))
864 ir_model_data = self.sudo().env['ir.model.data']
865 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
868 return '%s.%s' % (data[0].module, data[0].name)
873 name = '%s_%s' % (self._table, self.id)
874 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
876 name = '%s_%s_%s' % (self._table, self.id, postfix)
877 ir_model_data.create({
880 'module': '__export__',
883 return '__export__.' + name
886 def __export_rows(self, fields):
887 """ Export fields of the records in `self`.
889 :param fields: list of lists of fields to traverse
890 :return: list of lists of corresponding values
894 # main line of record, initially empty
895 current = [''] * len(fields)
896 lines.append(current)
898 # list of primary fields followed by secondary field(s)
901 # process column by column
902 for i, path in enumerate(fields):
907 if name in primary_done:
911 current[i] = str(record.id)
913 current[i] = record.__export_xml_id()
915 field = record._fields[name]
918 # this part could be simpler, but it has to be done this way
919 # in order to reproduce the former behavior
920 if not isinstance(value, BaseModel):
921 current[i] = field.convert_to_export(value, self.env)
923 primary_done.append(name)
925 # This is a special case, its strange behavior is intended!
926 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
927 xml_ids = [r.__export_xml_id() for r in value]
928 current[i] = ','.join(xml_ids) or False
931 # recursively export the fields that follow name
932 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
933 lines2 = value.__export_rows(fields2)
935 # merge first line with record's main line
936 for j, val in enumerate(lines2[0]):
939 # check value of current field
941 # assign xml_ids, and forget about remaining lines
942 xml_ids = [item[1] for item in value.name_get()]
943 current[i] = ','.join(xml_ids)
945 # append the other lines at the end
953 def export_data(self, fields_to_export, raw_data=False):
954 """ Export fields for selected objects
956 :param fields_to_export: list of fields
957 :param raw_data: True to return value in native Python type
958 :rtype: dictionary with a *datas* matrix
960 This method is used when exporting data via client menu
962 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
964 self = self.with_context(export_raw_data=True)
965 return {'datas': self.__export_rows(fields_to_export)}
967 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
970 Use :meth:`~load` instead
972 Import given data in given module
974 This method is used when importing data via client menu.
976 Example of fields to import for a sale.order::
979 partner_id, (=name_search)
980 order_line/.id, (=database_id)
982 order_line/product_id/id, (=xml id)
983 order_line/price_unit,
984 order_line/product_uom_qty,
985 order_line/product_uom/id (=xml_id)
987 This method returns a 4-tuple with the following structure::
989 (return_code, errored_resource, error_message, unused)
991 * The first item is a return code, it is ``-1`` in case of
992 import error, or the last imported row number in case of success
993 * The second item contains the record data dict that failed to import
994 in case of error, otherwise it's 0
995 * The third item contains an error message string in case of error,
997 * The last item is currently unused, with no specific semantics
999 :param fields: list of fields to import
1000 :param datas: data to import
1001 :param mode: 'init' or 'update' for record creation
1002 :param current_module: module name
1003 :param noupdate: flag for record creation
1004 :param filename: optional file to store partial import state for recovery
1005 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1006 :rtype: (int, dict or 0, str or 0, str or 0)
1008 context = dict(context) if context is not None else {}
1009 context['_import_current_module'] = current_module
1011 fields = map(fix_import_export_id_paths, fields)
1012 ir_model_data_obj = self.pool.get('ir.model.data')
1015 if m['type'] == 'error':
1016 raise Exception(m['message'])
1018 if config.get('import_partial') and filename:
1019 with open(config.get('import_partial'), 'rb') as partial_import_file:
1020 data = pickle.load(partial_import_file)
1021 position = data.get(filename, 0)
1025 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1026 self._extract_records(cr, uid, fields, datas,
1027 context=context, log=log),
1028 context=context, log=log):
1029 ir_model_data_obj._update(cr, uid, self._name,
1030 current_module, res, mode=mode, xml_id=xml_id,
1031 noupdate=noupdate, res_id=res_id, context=context)
1032 position = info.get('rows', {}).get('to', 0) + 1
1033 if config.get('import_partial') and filename and (not (position%100)):
1034 with open(config.get('import_partial'), 'rb') as partial_import:
1035 data = pickle.load(partial_import)
1036 data[filename] = position
1037 with open(config.get('import_partial'), 'wb') as partial_import:
1038 pickle.dump(data, partial_import)
1039 if context.get('defer_parent_store_computation'):
1040 self._parent_store_compute(cr)
1042 except Exception, e:
1044 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1046 if context.get('defer_parent_store_computation'):
1047 self._parent_store_compute(cr)
1048 return position, 0, 0, 0
1050 def load(self, cr, uid, fields, data, context=None):
1052 Attempts to load the data matrix, and returns a list of ids (or
1053 ``False`` if there was an error and no id could be generated) and a
1056 The ids are those of the records created and saved (in database), in
1057 the same order they were extracted from the file. They can be passed
1058 directly to :meth:`~read`
1060 :param fields: list of fields to import, at the same index as the corresponding data
1061 :type fields: list(str)
1062 :param data: row-major matrix of data to import
1063 :type data: list(list(str))
1064 :param dict context:
1065 :returns: {ids: list(int)|False, messages: [Message]}
1067 cr.execute('SAVEPOINT model_load')
1070 fields = map(fix_import_export_id_paths, fields)
1071 ModelData = self.pool['ir.model.data'].clear_caches()
1073 fg = self.fields_get(cr, uid, context=context)
1080 for id, xid, record, info in self._convert_records(cr, uid,
1081 self._extract_records(cr, uid, fields, data,
1082 context=context, log=messages.append),
1083 context=context, log=messages.append):
1085 cr.execute('SAVEPOINT model_load_save')
1086 except psycopg2.InternalError, e:
1087 # broken transaction, exit and hope the source error was
1089 if not any(message['type'] == 'error' for message in messages):
1090 messages.append(dict(info, type='error',message=
1091 u"Unknown database error: '%s'" % e))
1094 ids.append(ModelData._update(cr, uid, self._name,
1095 current_module, record, mode=mode, xml_id=xid,
1096 noupdate=noupdate, res_id=id, context=context))
1097 cr.execute('RELEASE SAVEPOINT model_load_save')
1098 except psycopg2.Warning, e:
1099 messages.append(dict(info, type='warning', message=str(e)))
1100 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1101 except psycopg2.Error, e:
1102 messages.append(dict(
1104 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1105 # Failed to write, log to messages, rollback savepoint (to
1106 # avoid broken transaction) and keep going
1107 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1108 except Exception, e:
1109 message = (_('Unknown error during import:') +
1110 ' %s: %s' % (type(e), unicode(e)))
1111 moreinfo = _('Resolve other errors first')
1112 messages.append(dict(info, type='error',
1115 # Failed for some reason, perhaps due to invalid data supplied,
1116 # rollback savepoint and keep going
1117 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1118 if any(message['type'] == 'error' for message in messages):
1119 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1121 return {'ids': ids, 'messages': messages}
1123 def _extract_records(self, cr, uid, fields_, data,
1124 context=None, log=lambda a: None):
1125 """ Generates record dicts from the data sequence.
1127 The result is a generator of dicts mapping field names to raw
1128 (unconverted, unvalidated) values.
1130 For relational fields, if sub-fields were provided the value will be
1131 a list of sub-records
1133 The following sub-fields may be set on the record (by key):
1134 * None is the name_get for the record (to use with name_create/name_search)
1135 * "id" is the External ID for the record
1136 * ".id" is the Database ID for the record
1138 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1139 # Fake columns to avoid special cases in extractor
1140 columns[None] = fields.char('rec_name')
1141 columns['id'] = fields.char('External ID')
1142 columns['.id'] = fields.integer('Database ID')
1144 # m2o fields can't be on multiple lines so exclude them from the
1145 # is_relational field rows filter, but special-case it later on to
1146 # be handled with relational fields (as it can have subfields)
1147 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1148 get_o2m_values = itemgetter_tuple(
1149 [index for index, field in enumerate(fields_)
1150 if columns[field[0]]._type == 'one2many'])
1151 get_nono2m_values = itemgetter_tuple(
1152 [index for index, field in enumerate(fields_)
1153 if columns[field[0]]._type != 'one2many'])
1154 # Checks if the provided row has any non-empty non-relational field
1155 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1156 return any(g(row)) and not any(f(row))
1160 if index >= len(data): return
1163 # copy non-relational fields to record dict
1164 record = dict((field[0], value)
1165 for field, value in itertools.izip(fields_, row)
1166 if not is_relational(field[0]))
1168 # Get all following rows which have relational values attached to
1169 # the current record (no non-relational values)
1170 record_span = itertools.takewhile(
1171 only_o2m_values, itertools.islice(data, index + 1, None))
1172 # stitch record row back on for relational fields
1173 record_span = list(itertools.chain([row], record_span))
1174 for relfield in set(
1175 field[0] for field in fields_
1176 if is_relational(field[0])):
1177 column = columns[relfield]
1178 # FIXME: how to not use _obj without relying on fields_get?
1179 Model = self.pool[column._obj]
1181 # get only cells for this sub-field, should be strictly
1182 # non-empty, field path [None] is for name_get column
1183 indices, subfields = zip(*((index, field[1:] or [None])
1184 for index, field in enumerate(fields_)
1185 if field[0] == relfield))
1187 # return all rows which have at least one value for the
1188 # subfields of relfield
1189 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1190 record[relfield] = [subrecord
1191 for subrecord, _subinfo in Model._extract_records(
1192 cr, uid, subfields, relfield_data,
1193 context=context, log=log)]
1195 yield record, {'rows': {
1197 'to': index + len(record_span) - 1
1199 index += len(record_span)
1201 def _convert_records(self, cr, uid, records,
1202 context=None, log=lambda a: None):
1203 """ Converts records from the source iterable (recursive dicts of
1204 strings) into forms which can be written to the database (via
1205 self.create or (ir.model.data)._update)
1207 :returns: a list of triplets of (id, xid, record)
1208 :rtype: list((int|None, str|None, dict))
1210 if context is None: context = {}
1211 Converter = self.pool['ir.fields.converter']
1212 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1213 Translation = self.pool['ir.translation']
1215 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1216 context.get('lang'))
1218 for f, column in columns.iteritems())
1220 convert = Converter.for_model(cr, uid, self, context=context)
1222 def _log(base, field, exception):
1223 type = 'warning' if isinstance(exception, Warning) else 'error'
1224 # logs the logical (not human-readable) field name for automated
1225 # processing of response, but injects human readable in message
1226 record = dict(base, type=type, field=field,
1227 message=unicode(exception.args[0]) % base)
1228 if len(exception.args) > 1 and exception.args[1]:
1229 record.update(exception.args[1])
1232 stream = CountingStream(records)
1233 for record, extras in stream:
1236 # name_get/name_create
1237 if None in record: pass
1244 dbid = int(record['.id'])
1246 # in case of overridden id column
1247 dbid = record['.id']
1248 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1251 record=stream.index,
1253 message=_(u"Unknown database identifier '%s'") % dbid))
1256 converted = convert(record, lambda field, err:\
1257 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1259 yield dbid, xid, converted, dict(extras, record=stream.index)
1262 def _validate_fields(self, field_names):
1263 field_names = set(field_names)
1265 # old-style constraint methods
1266 trans = self.env['ir.translation']
1267 cr, uid, context = self.env.args
1270 for fun, msg, names in self._constraints:
1272 # validation must be context-independent; call `fun` without context
1273 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1275 except Exception, e:
1276 _logger.debug('Exception while validating constraint', exc_info=True)
1278 extra_error = tools.ustr(e)
1281 res_msg = msg(self._model, cr, uid, ids, context=context)
1282 if isinstance(res_msg, tuple):
1283 template, params = res_msg
1284 res_msg = template % params
1286 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1288 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1290 _("Field(s) `%s` failed against a constraint: %s") %
1291 (', '.join(names), res_msg)
1294 raise ValidationError('\n'.join(errors))
1296 # new-style constraint methods
1297 for check in self._constraint_methods:
1298 if set(check._constrains) & field_names:
1301 except ValidationError, e:
1303 except Exception, e:
1304 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1307 def default_get(self, fields_list):
1308 """ default_get(fields) -> default_values
1310 Return default values for the fields in `fields_list`. Default
1311 values are determined by the context, user defaults, and the model
1314 :param fields_list: a list of field names
1315 :return: a dictionary mapping each field name to its corresponding
1316 default value, if it has one.
1319 # trigger view init hook
1320 self.view_init(fields_list)
1323 parent_fields = defaultdict(list)
1325 for name in fields_list:
1326 # 1. look up context
1327 key = 'default_' + name
1328 if key in self._context:
1329 defaults[name] = self._context[key]
1332 # 2. look up ir_values
1333 # Note: performance is good, because get_defaults_dict is cached!
1334 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1335 if name in ir_values_dict:
1336 defaults[name] = ir_values_dict[name]
1339 field = self._fields.get(name)
1341 # 3. look up property fields
1342 # TODO: get rid of this one
1343 if field and field.company_dependent:
1344 defaults[name] = self.env['ir.property'].get(name, self._name)
1347 # 4. look up field.default
1348 if field and field.default:
1349 defaults[name] = field.default(self)
1352 # 5. delegate to parent model
1353 if field and field.inherited:
1354 field = field.related_field
1355 parent_fields[field.model_name].append(field.name)
1357 # convert default values to the right format
1358 defaults = self._convert_to_cache(defaults, validate=False)
1359 defaults = self._convert_to_write(defaults)
1361 # add default values for inherited fields
1362 for model, names in parent_fields.iteritems():
1363 defaults.update(self.env[model].default_get(names))
1367 def fields_get_keys(self, cr, user, context=None):
1368 res = self._columns.keys()
1369 # TODO I believe this loop can be replace by
1370 # res.extend(self._inherit_fields.key())
1371 for parent in self._inherits:
1372 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1375 def _rec_name_fallback(self, cr, uid, context=None):
1376 rec_name = self._rec_name
1377 if rec_name not in self._columns:
1378 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1382 # Overload this method if you need a window title which depends on the context
1384 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1387 def user_has_groups(self, cr, uid, groups, context=None):
1388 """Return true if the user is at least member of one of the groups
1389 in groups_str. Typically used to resolve `groups` attribute
1390 in view and model definitions.
1392 :param str groups: comma-separated list of fully-qualified group
1393 external IDs, e.g.: ``base.group_user,base.group_system``
1394 :return: True if the current user is a member of one of the
1397 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1398 for group_ext_id in groups.split(','))
1400 def _get_default_form_view(self, cr, user, context=None):
1401 """ Generates a default single-line form view using all fields
1402 of the current model except the m2m and o2m ones.
1404 :param cr: database cursor
1405 :param int user: user id
1406 :param dict context: connection context
1407 :returns: a form view as an lxml document
1408 :rtype: etree._Element
1410 view = etree.Element('form', string=self._description)
1411 group = etree.SubElement(view, 'group', col="4")
1412 for fname, field in self._fields.iteritems():
1413 if field.automatic or field.type in ('one2many', 'many2many'):
1416 etree.SubElement(group, 'field', name=fname)
1417 if field.type == 'text':
1418 etree.SubElement(group, 'newline')
1421 def _get_default_search_view(self, cr, user, context=None):
1422 """ Generates a single-field search view, based on _rec_name.
1424 :param cr: database cursor
1425 :param int user: user id
1426 :param dict context: connection context
1427 :returns: a tree view as an lxml document
1428 :rtype: etree._Element
1430 view = etree.Element('search', string=self._description)
1431 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1434 def _get_default_tree_view(self, cr, user, context=None):
1435 """ Generates a single-field tree view, based on _rec_name.
1437 :param cr: database cursor
1438 :param int user: user id
1439 :param dict context: connection context
1440 :returns: a tree view as an lxml document
1441 :rtype: etree._Element
1443 view = etree.Element('tree', string=self._description)
1444 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1447 def _get_default_calendar_view(self, cr, user, context=None):
1448 """ Generates a default calendar view by trying to infer
1449 calendar fields from a number of pre-set attribute names
1451 :param cr: database cursor
1452 :param int user: user id
1453 :param dict context: connection context
1454 :returns: a calendar view
1455 :rtype: etree._Element
1457 def set_first_of(seq, in_, to):
1458 """Sets the first value of `seq` also found in `in_` to
1459 the `to` attribute of the view being closed over.
1461 Returns whether it's found a suitable value (and set it on
1462 the attribute) or not
1470 view = etree.Element('calendar', string=self._description)
1471 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1473 if self._date_name not in self._columns:
1475 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1476 if dt in self._columns:
1477 self._date_name = dt
1482 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1483 view.set('date_start', self._date_name)
1485 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1486 self._columns, 'color')
1488 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1489 self._columns, 'date_stop'):
1490 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1491 self._columns, 'date_delay'):
1493 _('Invalid Object Architecture!'),
1494 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1498 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1499 """ fields_view_get([view_id | view_type='form'])
1501 Get the detailed composition of the requested view like fields, model, view architecture
1503 :param view_id: id of the view or None
1504 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1505 :param toolbar: true to include contextual actions
1506 :param submenu: deprecated
1507 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1508 :raise AttributeError:
1509 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1510 * if some tag other than 'position' is found in parent view
1511 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1515 View = self.pool['ir.ui.view']
1518 'model': self._name,
1519 'field_parent': False,
1522 # try to find a view_id if none provided
1524 # <view_type>_view_ref in context can be used to overrride the default view
1525 view_ref_key = view_type + '_view_ref'
1526 view_ref = context.get(view_ref_key)
1529 module, view_ref = view_ref.split('.', 1)
1530 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1531 view_ref_res = cr.fetchone()
1533 view_id = view_ref_res[0]
1535 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1536 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1540 # otherwise try to find the lowest priority matching ir.ui.view
1541 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1543 # context for post-processing might be overriden
1546 # read the view with inherited views applied
1547 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1548 result['arch'] = root_view['arch']
1549 result['name'] = root_view['name']
1550 result['type'] = root_view['type']
1551 result['view_id'] = root_view['id']
1552 result['field_parent'] = root_view['field_parent']
1553 # override context fro postprocessing
1554 if root_view.get('model') != self._name:
1555 ctx = dict(context, base_model_name=root_view.get('model'))
1557 # fallback on default views methods if no ir.ui.view could be found
1559 get_func = getattr(self, '_get_default_%s_view' % view_type)
1560 arch_etree = get_func(cr, uid, context)
1561 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1562 result['type'] = view_type
1563 result['name'] = 'default'
1564 except AttributeError:
1565 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1567 # Apply post processing, groups and modifiers etc...
1568 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1569 result['arch'] = xarch
1570 result['fields'] = xfields
1572 # Add related action information if aksed
1574 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1580 ir_values_obj = self.pool.get('ir.values')
1581 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1582 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1583 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1584 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1585 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1586 #When multi="True" set it will display only in More of the list view
1587 resrelate = [clean(action) for action in resrelate
1588 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1590 for x in itertools.chain(resprint, resaction, resrelate):
1591 x['string'] = x['name']
1593 result['toolbar'] = {
1595 'action': resaction,
1600 def get_formview_id(self, cr, uid, id, context=None):
1601 """ Return an view id to open the document with. This method is meant to be
1602 overridden in addons that want to give specific view ids for example.
1604 :param int id: id of the document to open
1608 def get_formview_action(self, cr, uid, id, context=None):
1609 """ Return an action to open the document. This method is meant to be
1610 overridden in addons that want to give specific view ids for example.
1612 :param int id: id of the document to open
1614 view_id = self.get_formview_id(cr, uid, id, context=context)
1616 'type': 'ir.actions.act_window',
1617 'res_model': self._name,
1618 'view_type': 'form',
1619 'view_mode': 'form',
1620 'views': [(view_id, 'form')],
1621 'target': 'current',
1625 def get_access_action(self, cr, uid, id, context=None):
1626 """ Return an action to open the document. This method is meant to be
1627 overridden in addons that want to give specific access to the document.
1628 By default it opens the formview of the document.
1630 :paramt int id: id of the document to open
1632 return self.get_formview_action(cr, uid, id, context=context)
1634 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1635 return self.pool['ir.ui.view'].postprocess_and_fields(
1636 cr, uid, self._name, node, view_id, context=context)
1638 def search_count(self, cr, user, args, context=None):
1639 """ search_count(args) -> int
1641 Returns the number of records in the current model matching :ref:`the
1642 provided domain <reference/orm/domains>`.
1644 res = self.search(cr, user, args, context=context, count=True)
1645 if isinstance(res, list):
1649 @api.returns('self')
1650 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1651 """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1653 Searches for records based on the ``args``
1654 :ref:`search domain <reference/orm/domains>`.
1656 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1657 list to match all records.
1658 :param int offset: number of results to ignore (default: none)
1659 :param int limit: maximum number of records to return (default: all)
1660 :param str order: sort string
1661 :param bool count: if ``True``, the call should return the number of
1662 records matching ``args`` rather than the records
1664 :returns: at most ``limit`` records matching the search criteria
1666 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1668 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1671 # display_name, name_get, name_create, name_search
1674 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1675 def _compute_display_name(self):
1676 names = dict(self.name_get())
1678 record.display_name = names.get(record.id, False)
1682 """ name_get() -> [(id, name), ...]
1684 Returns a textual representation for the records in ``self``.
1685 By default this is the value of the ``display_name`` field.
1687 :return: list of pairs ``(id, text_repr)`` for each records
1691 name = self._rec_name
1692 if name in self._fields:
1693 convert = self._fields[name].convert_to_display_name
1695 result.append((record.id, convert(record[name])))
1698 result.append((record.id, "%s,%s" % (record._name, record.id)))
1703 def name_create(self, name):
1704 """ name_create(name) -> record
1706 Create a new record by calling :meth:`~.create` with only one value
1707 provided: the display name of the new record.
1709 The new record will be initialized with any default values
1710 applicable to this model, or provided through the context. The usual
1711 behavior of :meth:`~.create` applies.
1713 :param name: display name of the record to create
1715 :return: the :meth:`~.name_get` pair value of the created record
1718 record = self.create({self._rec_name: name})
1719 return record.name_get()[0]
1721 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1725 def name_search(self, name='', args=None, operator='ilike', limit=100):
1726 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1728 Search for records that have a display name matching the given
1729 `name` pattern when compared with the given `operator`, while also
1730 matching the optional search domain (`args`).
1732 This is used for example to provide suggestions based on a partial
1733 value for a relational field. Sometimes be seen as the inverse
1734 function of :meth:`~.name_get`, but it is not guaranteed to be.
1736 This method is equivalent to calling :meth:`~.search` with a search
1737 domain based on ``display_name`` and then :meth:`~.name_get` on the
1738 result of the search.
1740 :param str name: the name pattern to match
1741 :param list args: optional search domain (see :meth:`~.search` for
1742 syntax), specifying further restrictions
1743 :param str operator: domain operator for matching `name`, such as
1744 ``'like'`` or ``'='``.
1745 :param int limit: optional max number of records to return
1747 :return: list of pairs ``(id, text_repr)`` for all matching records.
1749 return self._name_search(name, args, operator, limit=limit)
1751 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1752 # private implementation of name_search, allows passing a dedicated user
1753 # for the name_get part to solve some access rights issues
1754 args = list(args or [])
1755 # optimize out the default criterion of ``ilike ''`` that matches everything
1756 if not self._rec_name:
1757 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1758 elif not (name == '' and operator == 'ilike'):
1759 args += [(self._rec_name, operator, name)]
1760 access_rights_uid = name_get_uid or user
1761 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1762 res = self.name_get(cr, access_rights_uid, ids, context)
1765 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1768 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1770 fields = self._columns.keys() + self._inherit_fields.keys()
1771 #FIXME: collect all calls to _get_source into one SQL call.
1773 res[lang] = {'code': lang}
1775 if f in self._columns:
1776 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1778 res[lang][f] = res_trans
1780 res[lang][f] = self._columns[f].string
1781 for table in self._inherits:
1782 cols = intersect(self._inherit_fields.keys(), fields)
1783 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1786 res[lang]['code'] = lang
1787 for f in res2[lang]:
1788 res[lang][f] = res2[lang][f]
1791 def write_string(self, cr, uid, id, langs, vals, context=None):
1792 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1793 #FIXME: try to only call the translation in one SQL
1796 if field in self._columns:
1797 src = self._columns[field].string
1798 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1799 for table in self._inherits:
1800 cols = intersect(self._inherit_fields.keys(), vals)
1802 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1805 def _add_missing_default_values(self, cr, uid, values, context=None):
1806 # avoid overriding inherited values when parent is set
1808 for tables, parent_field in self._inherits.items():
1809 if parent_field in values:
1810 avoid_tables.append(tables)
1812 # compute missing fields
1813 missing_defaults = set()
1814 for field in self._columns.keys():
1815 if not field in values:
1816 missing_defaults.add(field)
1817 for field in self._inherit_fields.keys():
1818 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1819 missing_defaults.add(field)
1820 # discard magic fields
1821 missing_defaults -= set(MAGIC_COLUMNS)
1823 if missing_defaults:
1824 # override defaults with the provided values, never allow the other way around
1825 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1827 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1828 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1829 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1830 defaults[dv] = [(6, 0, defaults[dv])]
1831 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1832 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1833 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1834 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1835 defaults.update(values)
1839 def clear_caches(self):
1840 """ Clear the caches
1842 This clears the caches associated to methods decorated with
1843 ``tools.ormcache`` or ``tools.ormcache_multi``.
1846 self._ormcache.clear()
1847 self.pool._any_cache_cleared = True
1848 except AttributeError:
1852 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1853 aggregated_fields, count_field,
1854 read_group_result, read_group_order=None, context=None):
1855 """Helper method for filling in empty groups for all possible values of
1856 the field being grouped by"""
1858 # self._group_by_full should map groupable fields to a method that returns
1859 # a list of all aggregated values that we want to display for this field,
1860 # in the form of a m2o-like pair (key,label).
1861 # This is useful to implement kanban views for instance, where all columns
1862 # should be displayed even if they don't contain any record.
1864 # Grab the list of all groups that should be displayed, including all present groups
1865 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1866 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1867 read_group_order=read_group_order,
1868 access_rights_uid=openerp.SUPERUSER_ID,
1871 result_template = dict.fromkeys(aggregated_fields, False)
1872 result_template[groupby + '_count'] = 0
1873 if remaining_groupbys:
1874 result_template['__context'] = {'group_by': remaining_groupbys}
1876 # Merge the left_side (current results as dicts) with the right_side (all
1877 # possible values as m2o pairs). Both lists are supposed to be using the
1878 # same ordering, and can be merged in one pass.
1881 def append_left(left_side):
1882 grouped_value = left_side[groupby] and left_side[groupby][0]
1883 if not grouped_value in known_values:
1884 result.append(left_side)
1885 known_values[grouped_value] = left_side
1887 known_values[grouped_value].update({count_field: left_side[count_field]})
1888 def append_right(right_side):
1889 grouped_value = right_side[0]
1890 if not grouped_value in known_values:
1891 line = dict(result_template)
1892 line[groupby] = right_side
1893 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1895 known_values[grouped_value] = line
1896 while read_group_result or all_groups:
1897 left_side = read_group_result[0] if read_group_result else None
1898 right_side = all_groups[0] if all_groups else None
1899 assert left_side is None or left_side[groupby] is False \
1900 or isinstance(left_side[groupby], (tuple,list)), \
1901 'M2O-like pair expected, got %r' % left_side[groupby]
1902 assert right_side is None or isinstance(right_side, (tuple,list)), \
1903 'M2O-like pair expected, got %r' % right_side
1904 if left_side is None:
1905 append_right(all_groups.pop(0))
1906 elif right_side is None:
1907 append_left(read_group_result.pop(0))
1908 elif left_side[groupby] == right_side:
1909 append_left(read_group_result.pop(0))
1910 all_groups.pop(0) # discard right_side
1911 elif not left_side[groupby] or not left_side[groupby][0]:
1912 # left side == "Undefined" entry, not present on right_side
1913 append_left(read_group_result.pop(0))
1915 append_right(all_groups.pop(0))
1919 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1922 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1924 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1925 to the query if order should be computed against m2o field.
1926 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1927 :param aggregated_fields: list of aggregated fields in the query
1928 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1929 These dictionaries contains the qualified name of each groupby
1930 (fully qualified SQL name for the corresponding field),
1931 and the (non raw) field name.
1932 :param osv.Query query: the query under construction
1933 :return: (groupby_terms, orderby_terms)
1936 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1937 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1939 return groupby_terms, orderby_terms
1941 self._check_qorder(orderby)
1942 for order_part in orderby.split(','):
1943 order_split = order_part.split()
1944 order_field = order_split[0]
1945 if order_field in groupby_fields:
1947 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1948 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1950 orderby_terms.append(order_clause)
1951 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1953 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1954 orderby_terms.append(order)
1955 elif order_field in aggregated_fields:
1956 orderby_terms.append(order_part)
1958 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1959 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1960 self._name, order_part)
1961 return groupby_terms, orderby_terms
1963 def _read_group_process_groupby(self, gb, query, context):
1965 Helper method to collect important information about groupbys: raw
1966 field name, type, time informations, qualified name, ...
1968 split = gb.split(':')
1969 field_type = self._all_columns[split[0]].column._type
1970 gb_function = split[1] if len(split) == 2 else None
1971 temporal = field_type in ('date', 'datetime')
1972 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1973 qualified_field = self._inherits_join_calc(split[0], query)
1976 # Careful with week/year formats:
1977 # - yyyy (lower) must always be used, *except* for week+year formats
1978 # - YYYY (upper) must always be used for week+year format
1979 # e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
1980 # and W1 2006 for others
1982 # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
1983 # such as 2006-01-01 being formatted as "January 2005" in some locales.
1984 # Cfr: http://babel.pocoo.org/docs/dates/#date-fields
1985 'day': 'dd MMM yyyy', # yyyy = normal year
1986 'week': "'W'w YYYY", # w YYYY = ISO week-year
1987 'month': 'MMMM yyyy',
1988 'quarter': 'QQQ yyyy',
1992 'day': dateutil.relativedelta.relativedelta(days=1),
1993 'week': datetime.timedelta(days=7),
1994 'month': dateutil.relativedelta.relativedelta(months=1),
1995 'quarter': dateutil.relativedelta.relativedelta(months=3),
1996 'year': dateutil.relativedelta.relativedelta(years=1)
1999 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
2000 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
2001 if field_type == 'boolean':
2002 qualified_field = "coalesce(%s,false)" % qualified_field
2007 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2008 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2009 'tz_convert': tz_convert,
2010 'qualified_field': qualified_field
2013 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2015 Helper method to sanitize the data received by read_group. The None
2016 values are converted to False, and the date/datetime are formatted,
2017 and corrected according to the timezones.
2019 value = False if value is None else value
2020 gb = groupby_dict.get(key)
2021 if gb and gb['type'] in ('date', 'datetime') and value:
2022 if isinstance(value, basestring):
2023 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2024 value = datetime.datetime.strptime(value, dt_format)
2025 if gb['tz_convert']:
2026 value = pytz.timezone(context['tz']).localize(value)
2029 def _read_group_get_domain(self, groupby, value):
2031 Helper method to construct the domain corresponding to a groupby and
2032 a given value. This is mostly relevant for date/datetime.
2034 if groupby['type'] in ('date', 'datetime') and value:
2035 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2036 domain_dt_begin = value
2037 domain_dt_end = value + groupby['interval']
2038 if groupby['tz_convert']:
2039 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2040 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2041 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2042 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2043 if groupby['type'] == 'many2one' and value:
2045 return [(groupby['field'], '=', value)]
2047 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2049 Helper method to format the data contained in the dictianary data by
2050 adding the domain corresponding to its values, the groupbys in the
2051 context and by properly formatting the date/datetime values.
2053 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2054 for k,v in data.iteritems():
2055 gb = groupby_dict.get(k)
2056 if gb and gb['type'] in ('date', 'datetime') and v:
2057 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2059 data['__domain'] = domain_group + domain
2060 if len(groupby) - len(annotated_groupbys) >= 1:
2061 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2065 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2067 Get the list of records in list view grouped by the given ``groupby`` fields
2069 :param cr: database cursor
2070 :param uid: current user id
2071 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2072 :param list fields: list of fields present in the list view specified on the object
2073 :param list groupby: list of groupby descriptions by which the records will be grouped.
2074 A groupby description is either a field (then it will be grouped by that field)
2075 or a string 'field:groupby_function'. Right now, the only functions supported
2076 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2077 date/datetime fields.
2078 :param int offset: optional number of records to skip
2079 :param int limit: optional max number of records to return
2080 :param dict context: context arguments, like lang, time zone.
2081 :param list orderby: optional ``order by`` specification, for
2082 overriding the natural sort ordering of the
2083 groups, see also :py:meth:`~osv.osv.osv.search`
2084 (supported only for many2one fields currently)
2085 :param bool lazy: if true, the results are only grouped by the first groupby and the
2086 remaining groupbys are put in the __context key. If false, all the groupbys are
2088 :return: list of dictionaries(one dictionary for each record) containing:
2090 * the values of fields grouped by the fields in ``groupby`` argument
2091 * __domain: list of tuples specifying the search criteria
2092 * __context: dictionary with argument like ``groupby``
2093 :rtype: [{'field_name_1': value, ...]
2094 :raise AccessError: * if user has no read rights on the requested object
2095 * if user tries to bypass access rules for read on the requested object
2099 self.check_access_rights(cr, uid, 'read')
2100 query = self._where_calc(cr, uid, domain, context=context)
2101 fields = fields or self._columns.keys()
2103 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2104 groupby_list = groupby[:1] if lazy else groupby
2105 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2106 for gb in groupby_list]
2107 groupby_fields = [g['field'] for g in annotated_groupbys]
2108 order = orderby or ','.join([g for g in groupby_list])
2109 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2111 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2112 for gb in groupby_fields:
2113 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2114 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2115 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2116 if not (gb in self._all_columns):
2117 # Don't allow arbitrary values, as this would be a SQL injection vector!
2118 raise except_orm(_('Invalid group_by'),
2119 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2121 aggregated_fields = [
2123 if f not in ('id', 'sequence')
2124 if f not in groupby_fields
2125 if f in self._all_columns
2126 if self._all_columns[f].column._type in ('integer', 'float')
2127 if getattr(self._all_columns[f].column, '_classic_write')]
2129 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2130 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2132 for gb in annotated_groupbys:
2133 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2135 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2136 from_clause, where_clause, where_clause_params = query.get_sql()
2137 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2138 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2141 count_field += '_count'
2143 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2144 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2147 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2155 'table': self._table,
2156 'count_field': count_field,
2157 'extra_fields': prefix_terms(',', select_terms),
2158 'from': from_clause,
2159 'where': prefix_term('WHERE', where_clause),
2160 'groupby': prefix_terms('GROUP BY', groupby_terms),
2161 'orderby': prefix_terms('ORDER BY', orderby_terms),
2162 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2163 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2165 cr.execute(query, where_clause_params)
2166 fetched_data = cr.dictfetchall()
2168 if not groupby_fields:
2171 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2173 data_ids = [r['id'] for r in fetched_data]
2174 many2onefields = list(set(many2onefields))
2175 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2176 for d in fetched_data:
2177 d.update(data_dict[d['id']])
2179 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2180 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2181 if lazy and groupby_fields[0] in self._group_by_full:
2182 # Right now, read_group only fill results in lazy mode (by default).
2183 # If you need to have the empty groups in 'eager' mode, then the
2184 # method _read_group_fill_results need to be completely reimplemented
2186 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2187 aggregated_fields, count_field, result, read_group_order=order,
2191 def _inherits_join_add(self, current_model, parent_model_name, query):
2193 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2194 :param current_model: current model object
2195 :param parent_model_name: name of the parent model for which the clauses should be added
2196 :param query: query object on which the JOIN should be added
2198 inherits_field = current_model._inherits[parent_model_name]
2199 parent_model = self.pool[parent_model_name]
2200 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2203 def _inherits_join_calc(self, field, query):
2205 Adds missing table select and join clause(s) to ``query`` for reaching
2206 the field coming from an '_inherits' parent table (no duplicates).
2208 :param field: name of inherited field to reach
2209 :param query: query object on which the JOIN should be added
2210 :return: qualified name of field, to be used in SELECT clause
2212 current_table = self
2213 parent_alias = '"%s"' % current_table._table
2214 while field in current_table._inherit_fields and not field in current_table._columns:
2215 parent_model_name = current_table._inherit_fields[field][0]
2216 parent_table = self.pool[parent_model_name]
2217 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2218 current_table = parent_table
2219 return '%s."%s"' % (parent_alias, field)
2221 def _parent_store_compute(self, cr):
2222 if not self._parent_store:
2224 _logger.info('Computing parent left and right for table %s...', self._table)
2225 def browse_rec(root, pos=0):
2227 where = self._parent_name+'='+str(root)
2229 where = self._parent_name+' IS NULL'
2230 if self._parent_order:
2231 where += ' order by '+self._parent_order
2232 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2234 for id in cr.fetchall():
2235 pos2 = browse_rec(id[0], pos2)
2236 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2238 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2239 if self._parent_order:
2240 query += ' order by ' + self._parent_order
2243 for (root,) in cr.fetchall():
2244 pos = browse_rec(root, pos)
2245 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2248 def _update_store(self, cr, f, k):
2249 _logger.info("storing computed values of fields.function '%s'", k)
2250 ss = self._columns[k]._symbol_set
2251 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2252 cr.execute('select id from '+self._table)
2253 ids_lst = map(lambda x: x[0], cr.fetchall())
2255 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2256 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2257 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2258 for key, val in res.items():
2261 # if val is a many2one, just write the ID
2262 if type(val) == tuple:
2264 if val is not False:
2265 cr.execute(update_query, (ss[1](val), key))
2268 def _check_selection_field_value(self, field, value):
2269 """ Check whether value is among the valid values for the given
2270 selection/reference field, and raise an exception if not.
2272 field = self._fields[field]
2273 field.convert_to_cache(value, self)
2275 def _check_removed_columns(self, cr, log=False):
2276 # iterate on the database columns to drop the NOT NULL constraints
2277 # of fields which were required but have been removed (or will be added by another module)
2278 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2279 columns += MAGIC_COLUMNS
2280 cr.execute("SELECT a.attname, a.attnotnull"
2281 " FROM pg_class c, pg_attribute a"
2282 " WHERE c.relname=%s"
2283 " AND c.oid=a.attrelid"
2284 " AND a.attisdropped=%s"
2285 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2286 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2288 for column in cr.dictfetchall():
2290 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2291 column['attname'], self._table, self._name)
2292 if column['attnotnull']:
2293 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2294 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2295 self._table, column['attname'])
2297 def _save_constraint(self, cr, constraint_name, type):
2299 Record the creation of a constraint for this model, to make it possible
2300 to delete it later when the module is uninstalled. Type can be either
2301 'f' or 'u' depending on the constraint being a foreign key or not.
2303 if not self._module:
2304 # no need to save constraints for custom models as they're not part
2307 assert type in ('f', 'u')
2309 SELECT 1 FROM ir_model_constraint, ir_module_module
2310 WHERE ir_model_constraint.module=ir_module_module.id
2311 AND ir_model_constraint.name=%s
2312 AND ir_module_module.name=%s
2313 """, (constraint_name, self._module))
2316 INSERT INTO ir_model_constraint
2317 (name, date_init, date_update, module, model, type)
2318 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2319 (SELECT id FROM ir_module_module WHERE name=%s),
2320 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2321 (constraint_name, self._module, self._name, type))
2323 def _save_relation_table(self, cr, relation_table):
2325 Record the creation of a many2many for this model, to make it possible
2326 to delete it later when the module is uninstalled.
2329 SELECT 1 FROM ir_model_relation, ir_module_module
2330 WHERE ir_model_relation.module=ir_module_module.id
2331 AND ir_model_relation.name=%s
2332 AND ir_module_module.name=%s
2333 """, (relation_table, self._module))
2335 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2336 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2337 (SELECT id FROM ir_module_module WHERE name=%s),
2338 (SELECT id FROM ir_model WHERE model=%s))""",
2339 (relation_table, self._module, self._name))
2340 self.invalidate_cache(cr, SUPERUSER_ID)
2342 # checked version: for direct m2o starting from `self`
2343 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2344 assert self.is_transient() or not dest_model.is_transient(), \
2345 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2346 if self.is_transient() and not dest_model.is_transient():
2347 # TransientModel relationships to regular Models are annoying
2348 # usually because they could block deletion due to the FKs.
2349 # So unless stated otherwise we default them to ondelete=cascade.
2350 ondelete = ondelete or 'cascade'
2351 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2352 self._foreign_keys.add(fk_def)
2353 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2355 # unchecked version: for custom cases, such as m2m relationships
2356 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2357 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2358 self._foreign_keys.add(fk_def)
2359 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2361 def _drop_constraint(self, cr, source_table, constraint_name):
2362 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2364 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2365 # Find FK constraint(s) currently established for the m2o field,
2366 # and see whether they are stale or not
2367 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2368 cl2.relname as foreign_table
2369 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2370 pg_attribute as att1, pg_attribute as att2
2371 WHERE con.conrelid = cl1.oid
2372 AND cl1.relname = %s
2373 AND con.confrelid = cl2.oid
2374 AND array_lower(con.conkey, 1) = 1
2375 AND con.conkey[1] = att1.attnum
2376 AND att1.attrelid = cl1.oid
2377 AND att1.attname = %s
2378 AND array_lower(con.confkey, 1) = 1
2379 AND con.confkey[1] = att2.attnum
2380 AND att2.attrelid = cl2.oid
2381 AND att2.attname = %s
2382 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2383 constraints = cr.dictfetchall()
2385 if len(constraints) == 1:
2386 # Is it the right constraint?
2388 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2389 or cons['foreign_table'] != dest_model._table:
2390 # Wrong FK: drop it and recreate
2391 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2392 source_table, cons['constraint_name'])
2393 self._drop_constraint(cr, source_table, cons['constraint_name'])
2395 # it's all good, nothing to do!
2398 # Multiple FKs found for the same field, drop them all, and re-create
2399 for cons in constraints:
2400 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2401 source_table, cons['constraint_name'])
2402 self._drop_constraint(cr, source_table, cons['constraint_name'])
2404 # (re-)create the FK
2405 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2408 def _set_default_value_on_column(self, cr, column_name, context=None):
2409 # ideally, we should use default_get(), but it fails due to ir.values
2413 default = self._defaults.get(column_name)
2414 if callable(default):
2415 default = default(self, cr, SUPERUSER_ID, context)
2417 column = self._columns[column_name]
2418 ss = column._symbol_set
2419 db_default = ss[1](default)
2420 # Write default if non-NULL, except for booleans for which False means
2421 # the same as NULL - this saves us an expensive query on large tables.
2422 write_default = (db_default is not None if column._type != 'boolean'
2425 _logger.debug("Table '%s': setting default value of new column %s to %r",
2426 self._table, column_name, default)
2427 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2428 self._table, column_name, ss[0], column_name)
2429 cr.execute(query, (db_default,))
2430 # this is a disgrace
2433 def _auto_init(self, cr, context=None):
2436 Call _field_create and, unless _auto is False:
2438 - create the corresponding table in database for the model,
2439 - possibly add the parent columns in database,
2440 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2441 'write_date' in database if _log_access is True (the default),
2442 - report on database columns no more existing in _columns,
2443 - remove no more existing not null constraints,
2444 - alter existing database columns to match _columns,
2445 - create database tables to match _columns,
2446 - add database indices to match _columns,
2447 - save in self._foreign_keys a list a foreign keys to create (see
2451 self._foreign_keys = set()
2452 raise_on_invalid_object_name(self._name)
2455 store_compute = False
2456 stored_fields = [] # new-style stored fields with compute
2458 update_custom_fields = context.get('update_custom_fields', False)
2459 self._field_create(cr, context=context)
2460 create = not self._table_exist(cr)
2464 self._create_table(cr)
2467 cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
2468 has_rows = cr.rowcount
2471 if self._parent_store:
2472 if not self._parent_columns_exist(cr):
2473 self._create_parent_columns(cr)
2474 store_compute = True
2476 self._check_removed_columns(cr, log=False)
2478 # iterate on the "object columns"
2479 column_data = self._select_column_data(cr)
2481 for k, f in self._columns.iteritems():
2482 if k == 'id': # FIXME: maybe id should be a regular column?
2484 # Don't update custom (also called manual) fields
2485 if f.manual and not update_custom_fields:
2488 if isinstance(f, fields.one2many):
2489 self._o2m_raise_on_missing_reference(cr, f)
2491 elif isinstance(f, fields.many2many):
2492 self._m2m_raise_or_create_relation(cr, f)
2495 res = column_data.get(k)
2497 # The field is not found as-is in database, try if it
2498 # exists with an old name.
2499 if not res and hasattr(f, 'oldname'):
2500 res = column_data.get(f.oldname)
2502 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2504 column_data[k] = res
2505 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2506 self._table, f.oldname, k)
2508 # The field already exists in database. Possibly
2509 # change its type, rename it, drop it or change its
2512 f_pg_type = res['typname']
2513 f_pg_size = res['size']
2514 f_pg_notnull = res['attnotnull']
2515 if isinstance(f, fields.function) and not f.store and\
2516 not getattr(f, 'nodrop', False):
2517 _logger.info('column %s (%s) converted to a function, removed from table %s',
2518 k, f.string, self._table)
2519 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2521 _schema.debug("Table '%s': dropped column '%s' with cascade",
2525 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2530 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2531 ('varchar', 'text', 'TEXT', ''),
2532 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2533 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2534 ('timestamp', 'date', 'date', '::date'),
2535 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2536 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2538 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2540 with cr.savepoint():
2541 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2542 except psycopg2.NotSupportedError:
2543 # In place alter table cannot be done because a view is depending of this field.
2544 # Do a manual copy. This will drop the view (that will be recreated later)
2545 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2546 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2547 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2548 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2550 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2551 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2553 if (f_pg_type==c[0]) and (f._type==c[1]):
2554 if f_pg_type != f_obj_type:
2556 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2557 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2558 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2559 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2561 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2562 self._table, k, c[0], c[1])
2565 if f_pg_type != f_obj_type:
2569 newname = k + '_moved' + str(i)
2570 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2571 "WHERE c.relname=%s " \
2572 "AND a.attname=%s " \
2573 "AND c.oid=a.attrelid ", (self._table, newname))
2574 if not cr.fetchone()[0]:
2578 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2579 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2580 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2581 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2582 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2583 self._table, k, f_pg_type, f._type, newname)
2585 # if the field is required and hasn't got a NOT NULL constraint
2586 if f.required and f_pg_notnull == 0:
2588 self._set_default_value_on_column(cr, k, context=context)
2589 # add the NOT NULL constraint
2591 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2593 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2596 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2597 "If you want to have it, you should update the records and execute manually:\n"\
2598 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2599 _schema.warning(msg, self._table, k, self._table, k)
2601 elif not f.required and f_pg_notnull == 1:
2602 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2604 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2607 indexname = '%s_%s_index' % (self._table, k)
2608 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2609 res2 = cr.dictfetchall()
2610 if not res2 and f.select:
2611 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2613 if f._type == 'text':
2614 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2615 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2616 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2617 " because there is a length limit for indexable btree values!\n"\
2618 "Use a search view instead if you simply want to make the field searchable."
2619 _schema.warning(msg, self._table, f._type, k)
2620 if res2 and not f.select:
2621 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2623 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2624 _schema.debug(msg, self._table, k, f._type)
2626 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2627 dest_model = self.pool[f._obj]
2628 if dest_model._auto and dest_model._table != 'ir_actions':
2629 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2631 # The field doesn't exist in database. Create it if necessary.
2633 if not isinstance(f, fields.function) or f.store:
2634 # add the missing field
2635 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2636 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2637 _schema.debug("Table '%s': added column '%s' with definition=%s",
2638 self._table, k, get_pg_type(f)[1])
2642 self._set_default_value_on_column(cr, k, context=context)
2644 # remember the functions to call for the stored fields
2645 if isinstance(f, fields.function):
2647 if f.store is not True: # i.e. if f.store is a dict
2648 order = f.store[f.store.keys()[0]][2]
2649 todo_end.append((order, self._update_store, (f, k)))
2651 # remember new-style stored fields with compute method
2652 if k in self._fields and self._fields[k].depends:
2653 stored_fields.append(self._fields[k])
2655 # and add constraints if needed
2656 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2657 if f._obj not in self.pool:
2658 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2659 dest_model = self.pool[f._obj]
2660 ref = dest_model._table
2661 # ir_actions is inherited so foreign key doesn't work on it
2662 if dest_model._auto and ref != 'ir_actions':
2663 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2665 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2669 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2670 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2673 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2674 "Try to re-run: openerp-server --update=module\n"\
2675 "If it doesn't work, update records and execute manually:\n"\
2676 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2677 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2681 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2682 create = not bool(cr.fetchone())
2684 cr.commit() # start a new transaction
2687 self._add_sql_constraints(cr)
2690 self._execute_sql(cr)
2693 self._parent_store_compute(cr)
2697 # trigger computation of new-style stored fields with a compute
2699 _logger.info("Storing computed values of %s fields %s",
2700 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2701 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2702 recs = recs.search([])
2704 map(recs._recompute_todo, stored_fields)
2707 todo_end.append((1000, func, ()))
2711 def _auto_end(self, cr, context=None):
2712 """ Create the foreign keys recorded by _auto_init. """
2713 for t, k, r, d in self._foreign_keys:
2714 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2715 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2717 del self._foreign_keys
2720 def _table_exist(self, cr):
2721 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2725 def _create_table(self, cr):
2726 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2727 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2728 _schema.debug("Table '%s': created", self._table)
2731 def _parent_columns_exist(self, cr):
2732 cr.execute("""SELECT c.relname
2733 FROM pg_class c, pg_attribute a
2734 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2735 """, (self._table, 'parent_left'))
2739 def _create_parent_columns(self, cr):
2740 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2741 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2742 if 'parent_left' not in self._columns:
2743 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2745 _schema.debug("Table '%s': added column '%s' with definition=%s",
2746 self._table, 'parent_left', 'INTEGER')
2747 elif not self._columns['parent_left'].select:
2748 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2750 if 'parent_right' not in self._columns:
2751 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2753 _schema.debug("Table '%s': added column '%s' with definition=%s",
2754 self._table, 'parent_right', 'INTEGER')
2755 elif not self._columns['parent_right'].select:
2756 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2758 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2759 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2760 self._parent_name, self._name)
2765 def _select_column_data(self, cr):
2766 # attlen is the number of bytes necessary to represent the type when
2767 # the type has a fixed size. If the type has a varying size attlen is
2768 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2769 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2770 "FROM pg_class c,pg_attribute a,pg_type t " \
2771 "WHERE c.relname=%s " \
2772 "AND c.oid=a.attrelid " \
2773 "AND a.atttypid=t.oid", (self._table,))
2774 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2777 def _o2m_raise_on_missing_reference(self, cr, f):
2778 # TODO this check should be a method on fields.one2many.
2779 if f._obj in self.pool:
2780 other = self.pool[f._obj]
2781 # TODO the condition could use fields_get_keys().
2782 if f._fields_id not in other._columns.keys():
2783 if f._fields_id not in other._inherit_fields.keys():
2784 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2786 def _m2m_raise_or_create_relation(self, cr, f):
2787 m2m_tbl, col1, col2 = f._sql_names(self)
2788 # do not create relations for custom fields as they do not belong to a module
2789 # they will be automatically removed when dropping the corresponding ir.model.field
2790 # table name for custom relation all starts with x_, see __init__
2791 if not m2m_tbl.startswith('x_'):
2792 self._save_relation_table(cr, m2m_tbl)
2793 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2794 if not cr.dictfetchall():
2795 if f._obj not in self.pool:
2796 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2797 dest_model = self.pool[f._obj]
2798 ref = dest_model._table
2799 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2800 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2801 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2802 if not cr.fetchall():
2803 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2804 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2805 if not cr.fetchall():
2806 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2808 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2809 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2810 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2812 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2815 def _add_sql_constraints(self, cr):
2818 Modify this model's database table constraints so they match the one in
2822 def unify_cons_text(txt):
2823 return txt.lower().replace(', ',',').replace(' (','(')
2825 for (key, con, _) in self._sql_constraints:
2826 conname = '%s_%s' % (self._table, key)
2828 self._save_constraint(cr, conname, 'u')
2829 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2830 existing_constraints = cr.dictfetchall()
2834 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2835 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2836 self._table, conname, con),
2837 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2842 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2843 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2844 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2850 if not existing_constraints:
2851 # constraint does not exists:
2852 sql_actions['add']['execute'] = True
2853 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2854 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2855 # constraint exists but its definition has changed:
2856 sql_actions['drop']['execute'] = True
2857 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2858 sql_actions['add']['execute'] = True
2859 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2861 # we need to add the constraint:
2862 sql_actions = [item for item in sql_actions.values()]
2863 sql_actions.sort(key=lambda x: x['order'])
2864 for sql_action in [action for action in sql_actions if action['execute']]:
2866 cr.execute(sql_action['query'])
2868 _schema.debug(sql_action['msg_ok'])
2870 _schema.warning(sql_action['msg_err'])
2874 def _execute_sql(self, cr):
2875 """ Execute the SQL code from the _sql attribute (if any)."""
2876 if hasattr(self, "_sql"):
2877 for line in self._sql.split(';'):
2878 line2 = line.replace('\n', '').strip()
2884 # Update objects that uses this one to update their _inherits fields
2888 def _inherits_reload_src(cls):
2889 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2890 for model in cls.pool.values():
2891 if cls._name in model._inherits:
2892 model._inherits_reload()
2895 def _inherits_reload(cls):
2896 """ Recompute the _inherit_fields mapping.
2898 This will also call itself on each inherits'd child model.
2902 for table in cls._inherits:
2903 other = cls.pool[table]
2904 for col in other._columns.keys():
2905 res[col] = (table, cls._inherits[table], other._columns[col], table)
2906 for col in other._inherit_fields.keys():
2907 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2908 cls._inherit_fields = res
2909 cls._all_columns = cls._get_column_infos()
2911 # interface columns with new-style fields
2912 for attr, column in cls._columns.items():
2913 if attr not in cls._fields:
2914 cls._add_field(attr, column.to_field())
2916 # interface inherited fields with new-style fields (note that the
2917 # reverse order is for being consistent with _all_columns above)
2918 for parent_model, parent_field in reversed(cls._inherits.items()):
2919 for attr, field in cls.pool[parent_model]._fields.iteritems():
2920 if attr not in cls._fields:
2921 cls._add_field(attr, field.new(
2923 related=(parent_field, attr),
2927 cls._inherits_reload_src()
2930 def _get_column_infos(cls):
2931 """Returns a dict mapping all fields names (direct fields and
2932 inherited field via _inherits) to a ``column_info`` struct
2933 giving detailed columns """
2935 # do not inverse for loops, since local fields may hide inherited ones!
2936 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2937 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2938 for k, col in cls._columns.iteritems():
2939 result[k] = fields.column_info(k, col)
2943 def _inherits_check(cls):
2944 for table, field_name in cls._inherits.items():
2945 if field_name not in cls._columns:
2946 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2947 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2948 required=True, ondelete="cascade")
2949 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2950 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2951 cls._columns[field_name].required = True
2952 cls._columns[field_name].ondelete = "cascade"
2954 # reflect fields with delegate=True in dictionary cls._inherits
2955 for field in cls._fields.itervalues():
2956 if field.type == 'many2one' and not field.related and field.delegate:
2957 if not field.required:
2958 _logger.warning("Field %s with delegate=True must be required.", field)
2959 field.required = True
2960 if field.ondelete.lower() not in ('cascade', 'restrict'):
2961 field.ondelete = 'cascade'
2962 cls._inherits[field.comodel_name] = field.name
2965 def _prepare_setup_fields(self):
2966 """ Prepare the setup of fields once the models have been loaded. """
2967 for field in self._fields.itervalues():
2971 def _setup_fields(self, partial=False):
2972 """ Setup the fields (dependency triggers, etc). """
2973 for field in self._fields.itervalues():
2974 if partial and field.manual and \
2975 field.relational and \
2976 (field.comodel_name not in self.pool or \
2977 (field.type == 'one2many' and field.inverse_name not in self.pool[field.comodel_name]._fields)):
2978 # do not set up manual fields that refer to unknown models
2980 field.setup(self.env)
2982 # group fields by compute to determine field.computed_fields
2983 fields_by_compute = defaultdict(list)
2984 for field in self._fields.itervalues():
2986 field.computed_fields = fields_by_compute[field.compute]
2987 field.computed_fields.append(field)
2989 field.computed_fields = []
2991 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
2992 """ fields_get([fields])
2994 Return the definition of each field.
2996 The returned value is a dictionary (indiced by field name) of
2997 dictionaries. The _inherits'd fields are included. The string, help,
2998 and selection (if present) attributes are translated.
3000 :param cr: database cursor
3001 :param user: current user id
3002 :param allfields: list of fields
3003 :param context: context arguments, like lang, time zone
3004 :return: dictionary of field dictionaries, each one describing a field of the business object
3005 :raise AccessError: * if user has no create/write rights on the requested object
3008 recs = self.browse(cr, user, [], context)
3011 for fname, field in self._fields.iteritems():
3012 if allfields and fname not in allfields:
3014 if not field.setup_done:
3016 if field.groups and not recs.user_has_groups(field.groups):
3018 res[fname] = field.get_description(recs.env)
3020 # if user cannot create or modify records, make all fields readonly
3021 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3022 if not (has_access('write') or has_access('create')):
3023 for description in res.itervalues():
3024 description['readonly'] = True
3025 description['states'] = {}
3029 def get_empty_list_help(self, cr, user, help, context=None):
3030 """ Generic method giving the help message displayed when having
3031 no result to display in a list or kanban view. By default it returns
3032 the help given in parameter that is generally the help message
3033 defined in the action.
3037 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3039 Check the user access rights on the given fields. This raises Access
3040 Denied if the user does not have the rights. Otherwise it returns the
3041 fields (as is if the fields is not falsy, or the readable/writable
3042 fields if fields is falsy).
3044 if user == SUPERUSER_ID:
3045 return fields or list(self._fields)
3048 """ determine whether user has access to field `fname` """
3049 field = self._fields.get(fname)
3050 if field and field.groups:
3051 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3056 fields = filter(valid, self._fields)
3058 invalid_fields = set(filter(lambda name: not valid(name), fields))
3060 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3061 operation, user, self._name, ', '.join(invalid_fields))
3063 _('The requested operation cannot be completed due to security restrictions. '
3064 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3065 (self._description, operation))
3069 # add explicit old-style implementation to read()
3071 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3072 records = self.browse(cr, user, ids, context)
3073 result = BaseModel.read(records, fields, load=load)
3074 return result if isinstance(ids, list) else (bool(result) and result[0])
3076 # new-style implementation of read()
3078 def read(self, fields=None, load='_classic_read'):
3081 Reads the requested fields for the records in `self`, low-level/RPC
3082 method. In Python code, prefer :meth:`~.browse`.
3084 :param fields: list of field names to return (default is all fields)
3085 :return: a list of dictionaries mapping field names to their values,
3086 with one dictionary per record
3087 :raise AccessError: if user has no read rights on some of the given
3090 # check access rights
3091 self.check_access_rights('read')
3092 fields = self.check_field_access_rights('read', fields)
3094 # split fields into stored and computed fields
3095 stored, computed = [], []
3097 if name in self._columns:
3099 elif name in self._fields:
3100 computed.append(name)
3102 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3104 # fetch stored fields from the database to the cache
3105 self._read_from_database(stored)
3107 # retrieve results from records; this takes values from the cache and
3108 # computes remaining fields
3110 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3111 use_name_get = (load == '_classic_read')
3114 values = {'id': record.id}
3115 for name, field in name_fields:
3116 values[name] = field.convert_to_read(record[name], use_name_get)
3117 result.append(values)
3118 except MissingError:
3124 def _prefetch_field(self, field):
3125 """ Read from the database in order to fetch `field` (:class:`Field`
3126 instance) for `self` in cache.
3128 # fetch the records of this model without field_name in their cache
3129 records = self._in_cache_without(field)
3131 if len(records) > PREFETCH_MAX:
3132 records = records[:PREFETCH_MAX] | self
3134 # determine which fields can be prefetched
3135 if not self.env.in_draft and \
3136 self._context.get('prefetch_fields', True) and \
3137 self._columns[field.name]._prefetch:
3138 # prefetch all classic and many2one fields that the user can access
3140 for fname, fcolumn in self._columns.iteritems()
3141 if fcolumn._prefetch
3142 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3145 fnames = {field.name}
3147 # important: never prefetch fields to recompute!
3148 get_recs_todo = self.env.field_todo
3149 for fname in list(fnames):
3150 if get_recs_todo(self._fields[fname]):
3151 if fname == field.name:
3152 records -= get_recs_todo(field)
3154 fnames.discard(fname)
3156 # fetch records with read()
3157 assert self in records and field.name in fnames
3160 result = records.read(list(fnames), load='_classic_write')
3164 # check the cache, and update it if necessary
3165 if not self._cache.contains(field):
3166 for values in result:
3167 record = self.browse(values.pop('id'))
3168 record._cache.update(record._convert_to_cache(values, validate=False))
3169 if not self._cache.contains(field):
3170 e = AccessError("No value found for %s.%s" % (self, field.name))
3171 self._cache[field] = FailedValue(e)
3174 def _read_from_database(self, field_names):
3175 """ Read the given fields of the records in `self` from the database,
3176 and store them in cache. Access errors are also stored in cache.
3179 cr, user, context = env.args
3181 # FIXME: The query construction needs to be rewritten using the internal Query
3182 # object, as in search(), to avoid ambiguous column references when
3183 # reading/sorting on a table that is auto_joined to another table with
3184 # common columns (e.g. the magical columns)
3186 # Construct a clause for the security rules.
3187 # 'tables' holds the list of tables necessary for the SELECT, including
3188 # the ir.rule clauses, and contains at least self._table.
3189 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3191 # determine the fields that are stored as columns in self._table
3192 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3194 # we need fully-qualified column names in case len(tables) > 1
3196 if isinstance(self._columns.get(f), fields.binary) and \
3197 context.get('bin_size_%s' % f, context.get('bin_size')):
3198 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3199 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3201 return '%s."%s"' % (self._table, f)
3202 qual_names = map(qualify, set(fields_pre + ['id']))
3204 query = """ SELECT %(qual_names)s FROM %(tables)s
3205 WHERE %(table)s.id IN %%s AND (%(extra)s)
3208 'qual_names': ",".join(qual_names),
3209 'tables': ",".join(tables),
3210 'table': self._table,
3211 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3212 'order': self._parent_order or self._order,
3216 for sub_ids in cr.split_for_in_conditions(self.ids):
3217 cr.execute(query, [tuple(sub_ids)] + rule_params)
3218 result.extend(cr.dictfetchall())
3220 ids = [vals['id'] for vals in result]
3223 # translate the fields if necessary
3224 if context.get('lang'):
3225 ir_translation = env['ir.translation']
3226 for f in fields_pre:
3227 if self._columns[f].translate:
3228 #TODO: optimize out of this loop
3229 res_trans = ir_translation._get_ids(
3230 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3232 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3234 # apply the symbol_get functions of the fields we just read
3235 for f in fields_pre:
3236 symbol_get = self._columns[f]._symbol_get
3239 vals[f] = symbol_get(vals[f])
3241 # store result in cache for POST fields
3243 record = self.browse(vals['id'])
3244 record._cache.update(record._convert_to_cache(vals, validate=False))
3246 # determine the fields that must be processed now
3247 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3249 # Compute POST fields, grouped by multi
3250 by_multi = defaultdict(list)
3251 for f in fields_post:
3252 by_multi[self._columns[f]._multi].append(f)
3254 for multi, fs in by_multi.iteritems():
3256 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3257 assert res2 is not None, \
3258 'The function field "%s" on the "%s" model returned None\n' \
3259 '(a dictionary was expected).' % (fs[0], self._name)
3261 # TOCHECK : why got string instend of dict in python2.6
3262 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3263 multi_fields = res2.get(vals['id'], {})
3266 vals[f] = multi_fields.get(f, [])
3269 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3272 vals[f] = res2[vals['id']]
3276 # Warn about deprecated fields now that fields_pre and fields_post are computed
3277 for f in field_names:
3278 column = self._columns[f]
3279 if column.deprecated:
3280 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3282 # store result in cache
3284 record = self.browse(vals.pop('id'))
3285 record._cache.update(record._convert_to_cache(vals, validate=False))
3287 # store failed values in cache for the records that could not be read
3288 fetched = self.browse(ids)
3289 missing = self - fetched
3291 extras = fetched - self
3294 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3295 ', '.join(map(repr, missing._ids)),
3296 ', '.join(map(repr, extras._ids)),
3298 # store an access error exception in existing records
3300 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3301 (self._name, 'read')
3303 forbidden = missing.exists()
3304 forbidden._cache.update(FailedValue(exc))
3305 # store a missing error exception in non-existing records
3307 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3309 (missing - forbidden)._cache.update(FailedValue(exc))
3312 def get_metadata(self):
3314 Returns some metadata about the given records.
3316 :return: list of ownership dictionaries for each requested record
3317 :rtype: list of dictionaries with the following keys:
3320 * create_uid: user who created the record
3321 * create_date: date when the record was created
3322 * write_uid: last user who changed the record
3323 * write_date: date of the last change to the record
3324 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3327 if self._log_access:
3328 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3329 quoted_table = '"%s"' % self._table
3330 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3331 query = '''SELECT %s, __imd.module, __imd.name
3332 FROM %s LEFT JOIN ir_model_data __imd
3333 ON (__imd.model = %%s and __imd.res_id = %s.id)
3334 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3335 self._cr.execute(query, (self._name, tuple(self.ids)))
3336 res = self._cr.dictfetchall()
3338 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3339 names = dict(self.env['res.users'].browse(uids).name_get())
3343 value = r[key] = r[key] or False
3344 if key in ('write_uid', 'create_uid') and value in names:
3345 r[key] = (value, names[value])
3346 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3347 del r['name'], r['module']
3350 def _check_concurrency(self, cr, ids, context):
3353 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3355 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3356 for sub_ids in cr.split_for_in_conditions(ids):
3359 id_ref = "%s,%s" % (self._name, id)
3360 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3362 ids_to_check.extend([id, update_date])
3363 if not ids_to_check:
3365 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3368 # mention the first one only to keep the error message readable
3369 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3371 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3372 """Verify the returned rows after applying record rules matches
3373 the length of `ids`, and raise an appropriate exception if it does not.
3377 ids, result_ids = set(ids), set(result_ids)
3378 missing_ids = ids - result_ids
3380 # Attempt to distinguish record rule restriction vs deleted records,
3381 # to provide a more specific error message - check if the missinf
3382 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3383 forbidden_ids = [x[0] for x in cr.fetchall()]
3385 # the missing ids are (at least partially) hidden by access rules
3386 if uid == SUPERUSER_ID:
3388 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3389 raise except_orm(_('Access Denied'),
3390 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3391 (self._description, operation))
3393 # If we get here, the missing_ids are not in the database
3394 if operation in ('read','unlink'):
3395 # No need to warn about deleting an already deleted record.
3396 # And no error when reading a record that was deleted, to prevent spurious
3397 # errors for non-transactional search/read sequences coming from clients
3399 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3400 raise except_orm(_('Missing document(s)'),
3401 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3404 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3405 """Verifies that the operation given by ``operation`` is allowed for the user
3406 according to the access rights."""
3407 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3409 def check_access_rule(self, cr, uid, ids, operation, context=None):
3410 """Verifies that the operation given by ``operation`` is allowed for the user
3411 according to ir.rules.
3413 :param operation: one of ``write``, ``unlink``
3414 :raise except_orm: * if current ir.rules do not permit this operation.
3415 :return: None if the operation is allowed
3417 if uid == SUPERUSER_ID:
3420 if self.is_transient():
3421 # Only one single implicit access rule for transient models: owner only!
3422 # This is ok to hardcode because we assert that TransientModels always
3423 # have log_access enabled so that the create_uid column is always there.
3424 # And even with _inherits, these fields are always present in the local
3425 # table too, so no need for JOINs.
3426 cr.execute("""SELECT distinct create_uid
3428 WHERE id IN %%s""" % self._table, (tuple(ids),))
3429 uids = [x[0] for x in cr.fetchall()]
3430 if len(uids) != 1 or uids[0] != uid:
3431 raise except_orm(_('Access Denied'),
3432 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3434 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3436 where_clause = ' and ' + ' and '.join(where_clause)
3437 for sub_ids in cr.split_for_in_conditions(ids):
3438 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3439 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3440 [sub_ids] + where_params)
3441 returned_ids = [x['id'] for x in cr.dictfetchall()]
3442 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3444 def create_workflow(self, cr, uid, ids, context=None):
3445 """Create a workflow instance for each given record IDs."""
3446 from openerp import workflow
3448 workflow.trg_create(uid, self._name, res_id, cr)
3449 # self.invalidate_cache(cr, uid, context=context) ?
3452 def delete_workflow(self, cr, uid, ids, context=None):
3453 """Delete the workflow instances bound to the given record IDs."""
3454 from openerp import workflow
3456 workflow.trg_delete(uid, self._name, res_id, cr)
3457 self.invalidate_cache(cr, uid, context=context)
3460 def step_workflow(self, cr, uid, ids, context=None):
3461 """Reevaluate the workflow instances of the given record IDs."""
3462 from openerp import workflow
3464 workflow.trg_write(uid, self._name, res_id, cr)
3465 # self.invalidate_cache(cr, uid, context=context) ?
3468 def signal_workflow(self, cr, uid, ids, signal, context=None):
3469 """Send given workflow signal and return a dict mapping ids to workflow results"""
3470 from openerp import workflow
3473 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3474 # self.invalidate_cache(cr, uid, context=context) ?
3477 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3478 """ Rebind the workflow instance bound to the given 'old' record IDs to
3479 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3481 from openerp import workflow
3482 for old_id, new_id in old_new_ids:
3483 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3484 self.invalidate_cache(cr, uid, context=context)
3487 def unlink(self, cr, uid, ids, context=None):
3490 Deletes the records of the current set
3492 :raise AccessError: * if user has no unlink rights on the requested object
3493 * if user tries to bypass access rules for unlink on the requested object
3494 :raise UserError: if the record is default property for other records
3499 if isinstance(ids, (int, long)):
3502 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3504 # for recomputing new-style fields
3505 recs = self.browse(cr, uid, ids, context)
3506 recs.modified(self._fields)
3508 self._check_concurrency(cr, ids, context)
3510 self.check_access_rights(cr, uid, 'unlink')
3512 ir_property = self.pool.get('ir.property')
3514 # Check if the records are used as default properties.
3515 domain = [('res_id', '=', False),
3516 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3518 if ir_property.search(cr, uid, domain, context=context):
3519 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3521 # Delete the records' properties.
3522 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3523 ir_property.unlink(cr, uid, property_ids, context=context)
3525 self.delete_workflow(cr, uid, ids, context=context)
3527 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3528 pool_model_data = self.pool.get('ir.model.data')
3529 ir_values_obj = self.pool.get('ir.values')
3530 ir_attachment_obj = self.pool.get('ir.attachment')
3531 for sub_ids in cr.split_for_in_conditions(ids):
3532 cr.execute('delete from ' + self._table + ' ' \
3533 'where id IN %s', (sub_ids,))
3535 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3536 # as these are not connected with real database foreign keys, and would be dangling references.
3537 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3538 # to avoid possible side-effects during admin calls.
3539 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3540 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3541 # Step 2. Marching towards the real deletion of referenced records
3543 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3545 # For the same reason, removing the record relevant to ir_values
3546 ir_value_ids = ir_values_obj.search(cr, uid,
3547 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3550 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3552 # For the same reason, removing the record relevant to ir_attachment
3553 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3554 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3555 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3556 if ir_attachment_ids:
3557 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3559 # invalidate the *whole* cache, since the orm does not handle all
3560 # changes made in the database, like cascading delete!
3561 recs.invalidate_cache()
3563 for order, obj_name, store_ids, fields in result_store:
3564 if obj_name == self._name:
3565 effective_store_ids = set(store_ids) - set(ids)
3567 effective_store_ids = store_ids
3568 if effective_store_ids:
3569 obj = self.pool[obj_name]
3570 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3571 rids = map(lambda x: x[0], cr.fetchall())
3573 obj._store_set_values(cr, uid, rids, fields, context)
3575 # recompute new-style fields
3584 def write(self, vals):
3587 Updates all records in the current set with the provided values.
3589 :param dict vals: fields to update and the value to set on them e.g::
3591 {'foo': 1, 'bar': "Qux"}
3593 will set the field ``foo`` to ``1`` and the field ``bar`` to
3594 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3596 :raise AccessError: * if user has no write rights on the requested object
3597 * if user tries to bypass access rules for write on the requested object
3598 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3599 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3601 .. _openerp/models/relationals/format:
3603 .. note:: Relational fields use a special "commands" format to manipulate their values
3605 This format is a list of command triplets executed sequentially,
3606 possible command triplets are:
3608 ``(0, _, values: dict)``
3609 links to a new record created from the provided values
3610 ``(1, id, values: dict)``
3611 updates the already-linked record of id ``id`` with the
3614 unlinks and deletes the linked record of id ``id``
3616 unlinks the linked record of id ``id`` without deleting it
3618 links to an existing record of id ``id``
3620 unlinks all records in the relation, equivalent to using
3621 the command ``3`` on every linked record
3623 replaces the existing list of linked records by the provoded
3624 ones, equivalent to using ``5`` then ``4`` for each id in
3627 (in command triplets, ``_`` values are ignored and can be
3628 anything, generally ``0`` or ``False``)
3630 Any command can be used on :class:`~openerp.fields.Many2many`,
3631 only ``0``, ``1`` and ``2`` can be used on
3632 :class:`~openerp.fields.One2many`.
3637 self._check_concurrency(self._ids)
3638 self.check_access_rights('write')
3640 # No user-driven update of these columns
3641 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3642 vals.pop(field, None)
3644 # split up fields into old-style and pure new-style ones
3645 old_vals, new_vals, unknown = {}, {}, []
3646 for key, val in vals.iteritems():
3647 field = self._fields.get(key)
3649 if field.store or field.inherited:
3651 if field.inverse and not field.inherited:
3657 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3659 # write old-style fields with (low-level) method _write
3661 self._write(old_vals)
3663 # put the values of pure new-style fields into cache, and inverse them
3666 record._cache.update(record._convert_to_cache(new_vals, update=True))
3667 for key in new_vals:
3668 self._fields[key].determine_inverse(self)
3672 def _write(self, cr, user, ids, vals, context=None):
3673 # low-level implementation of write()
3678 self.check_field_access_rights(cr, user, 'write', vals.keys())
3679 deleted_related = defaultdict(list)
3680 for field in vals.keys():
3682 if field in self._columns:
3683 fobj = self._columns[field]
3684 elif field in self._inherit_fields:
3685 fobj = self._inherit_fields[field][2]
3688 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3689 for wtuple in vals[field]:
3690 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3691 deleted_related[fobj._obj].append(wtuple[1])
3696 for group in groups:
3697 module = group.split(".")[0]
3698 grp = group.split(".")[1]
3699 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3700 (grp, module, 'res.groups', user))
3701 readonly = cr.fetchall()
3702 if readonly[0][0] >= 1:
3709 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3711 # for recomputing new-style fields
3712 recs = self.browse(cr, user, ids, context)
3713 modified_fields = list(vals)
3714 if self._log_access:
3715 modified_fields += ['write_date', 'write_uid']
3716 recs.modified(modified_fields)
3718 parents_changed = []
3719 parent_order = self._parent_order or self._order
3720 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3721 # The parent_left/right computation may take up to
3722 # 5 seconds. No need to recompute the values if the
3723 # parent is the same.
3724 # Note: to respect parent_order, nodes must be processed in
3725 # order, so ``parents_changed`` must be ordered properly.
3726 parent_val = vals[self._parent_name]
3728 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3729 (self._table, self._parent_name, self._parent_name, parent_order)
3730 cr.execute(query, (tuple(ids), parent_val))
3732 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3733 (self._table, self._parent_name, parent_order)
3734 cr.execute(query, (tuple(ids),))
3735 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3742 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3744 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3745 if field_column and field_column.deprecated:
3746 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3747 if field in self._columns:
3748 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3749 if (not totranslate) or not self._columns[field].translate:
3750 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3751 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3752 direct.append(field)
3754 upd_todo.append(field)
3756 updend.append(field)
3757 if field in self._columns \
3758 and hasattr(self._columns[field], 'selection') \
3760 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3762 if self._log_access:
3763 upd0.append('write_uid=%s')
3764 upd0.append("write_date=(now() at time zone 'UTC')")
3766 direct.append('write_uid')
3767 direct.append('write_date')
3770 self.check_access_rule(cr, user, ids, 'write', context=context)
3771 for sub_ids in cr.split_for_in_conditions(ids):
3772 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3773 'where id IN %s', upd1 + [sub_ids])
3774 if cr.rowcount != len(sub_ids):
3775 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3780 if self._columns[f].translate:
3781 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3784 # Inserting value to DB
3785 context_wo_lang = dict(context, lang=None)
3786 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3787 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3789 # invalidate and mark new-style fields to recompute; do this before
3790 # setting other fields, because it can require the value of computed
3791 # fields, e.g., a one2many checking constraints on records
3792 recs.modified(direct)
3794 # call the 'set' method of fields which are not classic_write
3795 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3797 # default element in context must be removed when call a one2many or many2many
3798 rel_context = context.copy()
3799 for c in context.items():
3800 if c[0].startswith('default_'):
3801 del rel_context[c[0]]
3803 for field in upd_todo:
3805 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3807 # for recomputing new-style fields
3808 recs.modified(upd_todo)
3810 unknown_fields = updend[:]
3811 for table in self._inherits:
3812 col = self._inherits[table]
3814 for sub_ids in cr.split_for_in_conditions(ids):
3815 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3816 'where id IN %s', (sub_ids,))
3817 nids.extend([x[0] for x in cr.fetchall()])
3821 if self._inherit_fields[val][0] == table:
3823 unknown_fields.remove(val)
3825 self.pool[table].write(cr, user, nids, v, context)
3829 'No such field(s) in model %s: %s.',
3830 self._name, ', '.join(unknown_fields))
3832 # check Python constraints
3833 recs._validate_fields(vals)
3835 # TODO: use _order to set dest at the right position and not first node of parent
3836 # We can't defer parent_store computation because the stored function
3837 # fields that are computer may refer (directly or indirectly) to
3838 # parent_left/right (via a child_of domain)
3841 self.pool._init_parent[self._name] = True
3843 order = self._parent_order or self._order
3844 parent_val = vals[self._parent_name]
3846 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3848 clause, params = '%s IS NULL' % (self._parent_name,), ()
3850 for id in parents_changed:
3851 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3852 pleft, pright = cr.fetchone()
3853 distance = pright - pleft + 1
3855 # Positions of current siblings, to locate proper insertion point;
3856 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3857 # after each update, in case several nodes are sequentially inserted one
3858 # next to the other (i.e computed incrementally)
3859 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3860 parents = cr.fetchall()
3862 # Find Position of the element
3864 for (parent_pright, parent_id) in parents:
3867 position = parent_pright and parent_pright + 1 or 1
3869 # It's the first node of the parent
3874 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3875 position = cr.fetchone()[0] + 1
3877 if pleft < position <= pright:
3878 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3880 if pleft < position:
3881 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3882 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3883 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3885 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3886 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3887 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3888 recs.invalidate_cache(['parent_left', 'parent_right'])
3890 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3894 for order, model_name, ids_to_update, fields_to_recompute in result:
3895 key = (model_name, tuple(fields_to_recompute))
3896 done.setdefault(key, {})
3897 # avoid to do several times the same computation
3899 for id in ids_to_update:
3900 if id not in done[key]:
3901 done[key][id] = True
3902 if id not in deleted_related[model_name]:
3904 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3906 # recompute new-style fields
3907 if context.get('recompute', True):
3910 self.step_workflow(cr, user, ids, context=context)
3914 # TODO: Should set perm to user.xxx
3917 @api.returns('self', lambda value: value.id)
3918 def create(self, vals):
3919 """ create(vals) -> record
3921 Creates a new record for the model.
3923 The new record is initialized using the values from ``vals`` and
3924 if necessary those from :meth:`~.default_get`.
3927 values for the model's fields, as a dictionary::
3929 {'field_name': field_value, ...}
3931 see :meth:`~.write` for details
3932 :return: new record created
3933 :raise AccessError: * if user has no create rights on the requested object
3934 * if user tries to bypass access rules for create on the requested object
3935 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3936 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3938 self.check_access_rights('create')
3940 # add missing defaults, and drop fields that may not be set by user
3941 vals = self._add_missing_default_values(vals)
3942 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3943 vals.pop(field, None)
3945 # split up fields into old-style and pure new-style ones
3946 old_vals, new_vals, unknown = {}, {}, []
3947 for key, val in vals.iteritems():
3948 field = self._fields.get(key)
3950 if field.store or field.inherited:
3952 if field.inverse and not field.inherited:
3958 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3960 # create record with old-style fields
3961 record = self.browse(self._create(old_vals))
3963 # put the values of pure new-style fields into cache, and inverse them
3964 record._cache.update(record._convert_to_cache(new_vals))
3965 for key in new_vals:
3966 self._fields[key].determine_inverse(record)
3970 def _create(self, cr, user, vals, context=None):
3971 # low-level implementation of create()
3975 if self.is_transient():
3976 self._transient_vacuum(cr, user)
3979 for v in self._inherits:
3980 if self._inherits[v] not in vals:
3983 tocreate[v] = {'id': vals[self._inherits[v]]}
3986 # list of column assignments defined as tuples like:
3987 # (column_name, format_string, column_value)
3988 # (column_name, sql_formula)
3989 # Those tuples will be used by the string formatting for the INSERT
3991 ('id', "nextval('%s')" % self._sequence),
3996 for v in vals.keys():
3997 if v in self._inherit_fields and v not in self._columns:
3998 (table, col, col_detail, original_parent) = self._inherit_fields[v]
3999 tocreate[table][v] = vals[v]
4002 if (v not in self._inherit_fields) and (v not in self._columns):
4004 unknown_fields.append(v)
4007 'No such field(s) in model %s: %s.',
4008 self._name, ', '.join(unknown_fields))
4010 for table in tocreate:
4011 if self._inherits[table] in vals:
4012 del vals[self._inherits[table]]
4014 record_id = tocreate[table].pop('id', None)
4016 if record_id is None or not record_id:
4017 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4019 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4021 updates.append((self._inherits[table], '%s', record_id))
4023 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4024 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4026 for bool_field in bool_fields:
4027 if bool_field not in vals:
4028 vals[bool_field] = False
4030 for field in vals.keys():
4032 if field in self._columns:
4033 fobj = self._columns[field]
4035 fobj = self._inherit_fields[field][2]
4041 for group in groups:
4042 module = group.split(".")[0]
4043 grp = group.split(".")[1]
4044 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4045 (grp, module, 'res.groups', user))
4046 readonly = cr.fetchall()
4047 if readonly[0][0] >= 1:
4050 elif readonly[0][0] == 0:
4058 current_field = self._columns[field]
4059 if current_field._classic_write:
4060 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4062 #for the function fields that receive a value, we set them directly in the database
4063 #(they may be required), but we also need to trigger the _fct_inv()
4064 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4065 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4066 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4067 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4068 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4069 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4070 #after the release but, definitively, the behavior shouldn't be different for related and function
4072 upd_todo.append(field)
4074 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4075 #related. See the above TODO comment for further explanations.
4076 if not isinstance(current_field, fields.related):
4077 upd_todo.append(field)
4078 if field in self._columns \
4079 and hasattr(current_field, 'selection') \
4081 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4082 if self._log_access:
4083 updates.append(('create_uid', '%s', user))
4084 updates.append(('write_uid', '%s', user))
4085 updates.append(('create_date', "(now() at time zone 'UTC')"))
4086 updates.append(('write_date', "(now() at time zone 'UTC')"))
4088 # the list of tuples used in this formatting corresponds to
4089 # tuple(field_name, format, value)
4090 # In some case, for example (id, create_date, write_date) we does not
4091 # need to read the third value of the tuple, because the real value is
4092 # encoded in the second value (the format).
4094 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4096 ', '.join('"%s"' % u[0] for u in updates),
4097 ', '.join(u[1] for u in updates)
4099 tuple([u[2] for u in updates if len(u) > 2])
4102 id_new, = cr.fetchone()
4103 recs = self.browse(cr, user, id_new, context)
4105 if self._parent_store and not context.get('defer_parent_store_computation'):
4107 self.pool._init_parent[self._name] = True
4109 parent = vals.get(self._parent_name, False)
4111 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4113 result_p = cr.fetchall()
4114 for (pleft,) in result_p:
4119 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4120 pleft_old = cr.fetchone()[0]
4123 cr.execute('select max(parent_right) from '+self._table)
4124 pleft = cr.fetchone()[0] or 0
4125 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4126 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4127 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4128 recs.invalidate_cache(['parent_left', 'parent_right'])
4130 # invalidate and mark new-style fields to recompute; do this before
4131 # setting other fields, because it can require the value of computed
4132 # fields, e.g., a one2many checking constraints on records
4133 recs.modified([u[0] for u in updates])
4135 # call the 'set' method of fields which are not classic_write
4136 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4138 # default element in context must be remove when call a one2many or many2many
4139 rel_context = context.copy()
4140 for c in context.items():
4141 if c[0].startswith('default_'):
4142 del rel_context[c[0]]
4145 for field in upd_todo:
4146 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4148 # for recomputing new-style fields
4149 recs.modified(upd_todo)
4151 # check Python constraints
4152 recs._validate_fields(vals)
4154 if context.get('recompute', True):
4155 result += self._store_get_values(cr, user, [id_new],
4156 list(set(vals.keys() + self._inherits.values())),
4160 for order, model_name, ids, fields2 in result:
4161 if not (model_name, ids, fields2) in done:
4162 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4163 done.append((model_name, ids, fields2))
4164 # recompute new-style fields
4167 if self._log_create and context.get('recompute', True):
4168 message = self._description + \
4170 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4171 "' " + _("created.")
4172 self.log(cr, user, id_new, message, True, context=context)
4174 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4175 self.create_workflow(cr, user, [id_new], context=context)
4178 def _store_get_values(self, cr, uid, ids, fields, context):
4179 """Returns an ordered list of fields.function to call due to
4180 an update operation on ``fields`` of records with ``ids``,
4181 obtained by calling the 'store' triggers of these fields,
4182 as setup by their 'store' attribute.
4184 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4186 if fields is None: fields = []
4187 stored_functions = self.pool._store_function.get(self._name, [])
4189 # use indexed names for the details of the stored_functions:
4190 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4192 # only keep store triggers that should be triggered for the ``fields``
4194 triggers_to_compute = (
4195 f for f in stored_functions
4196 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4200 target_id_results = {}
4201 for store_trigger in triggers_to_compute:
4202 target_func_id_ = id(store_trigger[target_ids_func_])
4203 if target_func_id_ not in target_id_results:
4204 # use admin user for accessing objects having rules defined on store fields
4205 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4206 target_ids = target_id_results[target_func_id_]
4208 # the compound key must consider the priority and model name
4209 key = (store_trigger[priority_], store_trigger[model_name_])
4210 for target_id in target_ids:
4211 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4213 # Here to_compute_map looks like:
4214 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4215 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4216 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4219 # Now we need to generate the batch function calls list
4221 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4223 for ((priority,model), id_map) in to_compute_map.iteritems():
4224 trigger_ids_maps = {}
4225 # function_ids_maps =
4226 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4227 for target_id, triggers in id_map.iteritems():
4228 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4229 for triggers, target_ids in trigger_ids_maps.iteritems():
4230 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4231 [t[func_field_to_compute_] for t in triggers]))
4234 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4237 def _store_set_values(self, cr, uid, ids, fields, context):
4238 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4239 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4244 if self._log_access:
4245 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4249 field_dict.setdefault(r[0], [])
4250 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4251 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4252 for i in self.pool._store_function.get(self._name, []):
4254 up_write_date = write_date + datetime.timedelta(hours=i[5])
4255 if datetime.datetime.now() < up_write_date:
4257 field_dict[r[0]].append(i[1])
4263 if self._columns[f]._multi not in keys:
4264 keys.append(self._columns[f]._multi)
4265 todo.setdefault(self._columns[f]._multi, [])
4266 todo[self._columns[f]._multi].append(f)
4270 # use admin user for accessing objects having rules defined on store fields
4271 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4272 for id, value in result.items():
4274 for f in value.keys():
4275 if f in field_dict[id]:
4282 if self._columns[v]._type == 'many2one':
4284 value[v] = value[v][0]
4287 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4288 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4291 cr.execute('update "' + self._table + '" set ' + \
4292 ','.join(upd0) + ' where id = %s', upd1)
4296 # use admin user for accessing objects having rules defined on store fields
4297 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4298 for r in result.keys():
4300 if r in field_dict.keys():
4301 if f in field_dict[r]:
4303 for id, value in result.items():
4304 if self._columns[f]._type == 'many2one':
4309 cr.execute('update "' + self._table + '" set ' + \
4310 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4312 # invalidate and mark new-style fields to recompute
4313 self.browse(cr, uid, ids, context).modified(fields)
4317 # TODO: ameliorer avec NULL
4318 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4319 """Computes the WHERE clause needed to implement an OpenERP domain.
4320 :param domain: the domain to compute
4322 :param active_test: whether the default filtering of records with ``active``
4323 field set to ``False`` should be applied.
4324 :return: the query expressing the given domain as provided in domain
4325 :rtype: osv.query.Query
4330 # if the object has a field named 'active', filter out all inactive
4331 # records unless they were explicitely asked for
4332 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4334 # the item[0] trick below works for domain items and '&'/'|'/'!'
4336 if not any(item[0] == 'active' for item in domain):
4337 domain.insert(0, ('active', '=', 1))
4339 domain = [('active', '=', 1)]
4342 e = expression.expression(cr, user, domain, self, context)
4343 tables = e.get_tables()
4344 where_clause, where_params = e.to_sql()
4345 where_clause = where_clause and [where_clause] or []
4347 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4349 return Query(tables, where_clause, where_params)
4351 def _check_qorder(self, word):
4352 if not regex_order.match(word):
4353 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4356 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4357 """Add what's missing in ``query`` to implement all appropriate ir.rules
4358 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4360 :param query: the current query object
4362 if uid == SUPERUSER_ID:
4365 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4366 """ :param parent_model: name of the parent model, if the added
4367 clause comes from a parent model
4371 # as inherited rules are being applied, we need to add the missing JOIN
4372 # to reach the parent table (if it was not JOINed yet in the query)
4373 parent_alias = self._inherits_join_add(self, parent_model, query)
4374 # inherited rules are applied on the external table -> need to get the alias and replace
4375 parent_table = self.pool[parent_model]._table
4376 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4377 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4379 for table in added_tables:
4380 # table is just a table name -> switch to the full alias
4381 if table == '"%s"' % parent_table:
4382 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4383 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4385 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4386 added_tables = new_tables
4387 query.where_clause += added_clause
4388 query.where_clause_params += added_params
4389 for table in added_tables:
4390 if table not in query.tables:
4391 query.tables.append(table)
4395 # apply main rules on the object
4396 rule_obj = self.pool.get('ir.rule')
4397 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4398 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4400 # apply ir.rules from the parents (through _inherits)
4401 for inherited_model in self._inherits:
4402 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4403 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4404 parent_model=inherited_model)
4406 def _generate_m2o_order_by(self, order_field, query):
4408 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4409 either native m2o fields or function/related fields that are stored, including
4410 intermediate JOINs for inheritance if required.
4412 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4414 if order_field not in self._columns and order_field in self._inherit_fields:
4415 # also add missing joins for reaching the table containing the m2o field
4416 qualified_field = self._inherits_join_calc(order_field, query)
4417 order_field_column = self._inherit_fields[order_field][2]
4419 qualified_field = '"%s"."%s"' % (self._table, order_field)
4420 order_field_column = self._columns[order_field]
4422 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4423 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4424 _logger.debug("Many2one function/related fields must be stored " \
4425 "to be used as ordering fields! Ignoring sorting for %s.%s",
4426 self._name, order_field)
4429 # figure out the applicable order_by for the m2o
4430 dest_model = self.pool[order_field_column._obj]
4431 m2o_order = dest_model._order
4432 if not regex_order.match(m2o_order):
4433 # _order is complex, can't use it here, so we default to _rec_name
4434 m2o_order = dest_model._rec_name
4436 # extract the field names, to be able to qualify them and add desc/asc
4438 for order_part in m2o_order.split(","):
4439 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4440 m2o_order = m2o_order_list
4442 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4443 # as we don't want to exclude results that have NULL values for the m2o
4444 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4445 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4446 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4447 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4449 def _generate_order_by(self, order_spec, query):
4451 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4452 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4454 :raise" except_orm in case order_spec is malformed
4456 order_by_clause = ''
4457 order_spec = order_spec or self._order
4459 order_by_elements = []
4460 self._check_qorder(order_spec)
4461 for order_part in order_spec.split(','):
4462 order_split = order_part.strip().split(' ')
4463 order_field = order_split[0].strip()
4464 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4467 if order_field == 'id':
4468 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4469 elif order_field in self._columns:
4470 order_column = self._columns[order_field]
4471 if order_column._classic_read:
4472 inner_clause = '"%s"."%s"' % (self._table, order_field)
4473 elif order_column._type == 'many2one':
4474 inner_clause = self._generate_m2o_order_by(order_field, query)
4476 continue # ignore non-readable or "non-joinable" fields
4477 elif order_field in self._inherit_fields:
4478 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4479 order_column = parent_obj._columns[order_field]
4480 if order_column._classic_read:
4481 inner_clause = self._inherits_join_calc(order_field, query)
4482 elif order_column._type == 'many2one':
4483 inner_clause = self._generate_m2o_order_by(order_field, query)
4485 continue # ignore non-readable or "non-joinable" fields
4487 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4488 if order_column and order_column._type == 'boolean':
4489 inner_clause = "COALESCE(%s, false)" % inner_clause
4491 if isinstance(inner_clause, list):
4492 for clause in inner_clause:
4493 order_by_elements.append("%s %s" % (clause, order_direction))
4495 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4496 if order_by_elements:
4497 order_by_clause = ",".join(order_by_elements)
4499 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4501 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4503 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4504 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4505 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4506 This is ok at the security level because this method is private and not callable through XML-RPC.
4508 :param access_rights_uid: optional user ID to use when checking access rights
4509 (not for ir.rules, this is only for ir.model.access)
4513 self.check_access_rights(cr, access_rights_uid or user, 'read')
4515 # For transient models, restrict acces to the current user, except for the super-user
4516 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4517 args = expression.AND(([('create_uid', '=', user)], args or []))
4519 query = self._where_calc(cr, user, args, context=context)
4520 self._apply_ir_rules(cr, user, query, 'read', context=context)
4521 order_by = self._generate_order_by(order, query)
4522 from_clause, where_clause, where_clause_params = query.get_sql()
4524 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4527 # Ignore order, limit and offset when just counting, they don't make sense and could
4529 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4530 cr.execute(query_str, where_clause_params)
4534 limit_str = limit and ' limit %d' % limit or ''
4535 offset_str = offset and ' offset %d' % offset or ''
4536 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4537 cr.execute(query_str, where_clause_params)
4540 # TDE note: with auto_join, we could have several lines about the same result
4541 # i.e. a lead with several unread messages; we uniquify the result using
4542 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4543 def _uniquify_list(seq):
4545 return [x for x in seq if x not in seen and not seen.add(x)]
4547 return _uniquify_list([x[0] for x in res])
4549 # returns the different values ever entered for one field
4550 # this is used, for example, in the client when the user hits enter on
4552 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4555 if field in self._inherit_fields:
4556 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4558 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4560 def copy_data(self, cr, uid, id, default=None, context=None):
4562 Copy given record's data with all its fields values
4564 :param cr: database cursor
4565 :param uid: current user id
4566 :param id: id of the record to copy
4567 :param default: field values to override in the original values of the copied record
4568 :type default: dictionary
4569 :param context: context arguments, like lang, time zone
4570 :type context: dictionary
4571 :return: dictionary containing all the field values
4577 # avoid recursion through already copied records in case of circular relationship
4578 seen_map = context.setdefault('__copy_data_seen', {})
4579 if id in seen_map.setdefault(self._name, []):
4581 seen_map[self._name].append(id)
4585 if 'state' not in default:
4586 if 'state' in self._defaults:
4587 if callable(self._defaults['state']):
4588 default['state'] = self._defaults['state'](self, cr, uid, context)
4590 default['state'] = self._defaults['state']
4592 # build a black list of fields that should not be copied
4593 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4594 def blacklist_given_fields(obj):
4595 # blacklist the fields that are given by inheritance
4596 for other, field_to_other in obj._inherits.items():
4597 blacklist.add(field_to_other)
4598 if field_to_other in default:
4599 # all the fields of 'other' are given by the record: default[field_to_other],
4600 # except the ones redefined in self
4601 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4603 blacklist_given_fields(self.pool[other])
4604 # blacklist deprecated fields
4605 for name, field in obj._columns.items():
4606 if field.deprecated:
4609 blacklist_given_fields(self)
4612 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4615 if f not in blacklist)
4617 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4621 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4624 for f, colinfo in fields_to_copy.iteritems():
4625 field = colinfo.column
4626 if field._type == 'many2one':
4627 res[f] = data[f] and data[f][0]
4628 elif field._type == 'one2many':
4629 other = self.pool[field._obj]
4630 # duplicate following the order of the ids because we'll rely on
4631 # it later for copying translations in copy_translation()!
4632 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4633 # the lines are duplicated using the wrong (old) parent, but then
4634 # are reassigned to the correct one thanks to the (0, 0, ...)
4635 res[f] = [(0, 0, line) for line in lines if line]
4636 elif field._type == 'many2many':
4637 res[f] = [(6, 0, data[f])]
4643 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4647 # avoid recursion through already copied records in case of circular relationship
4648 seen_map = context.setdefault('__copy_translations_seen',{})
4649 if old_id in seen_map.setdefault(self._name,[]):
4651 seen_map[self._name].append(old_id)
4653 trans_obj = self.pool.get('ir.translation')
4654 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4655 fields = self.fields_get(cr, uid, context=context)
4657 for field_name, field_def in fields.items():
4658 # removing the lang to compare untranslated values
4659 context_wo_lang = dict(context, lang=None)
4660 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4661 # we must recursively copy the translations for o2o and o2m
4662 if field_def['type'] == 'one2many':
4663 target_obj = self.pool[field_def['relation']]
4664 # here we rely on the order of the ids to match the translations
4665 # as foreseen in copy_data()
4666 old_children = sorted(r.id for r in old_record[field_name])
4667 new_children = sorted(r.id for r in new_record[field_name])
4668 for (old_child, new_child) in zip(old_children, new_children):
4669 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4670 # and for translatable fields we keep them for copy
4671 elif field_def.get('translate'):
4672 if field_name in self._columns:
4673 trans_name = self._name + "," + field_name
4676 elif field_name in self._inherit_fields:
4677 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4678 # get the id of the parent record to set the translation
4679 inherit_field_name = self._inherit_fields[field_name][1]
4680 target_id = new_record[inherit_field_name].id
4681 source_id = old_record[inherit_field_name].id
4685 trans_ids = trans_obj.search(cr, uid, [
4686 ('name', '=', trans_name),
4687 ('res_id', '=', source_id)
4689 user_lang = context.get('lang')
4690 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4692 # remove source to avoid triggering _set_src
4693 del record['source']
4694 record.update({'res_id': target_id})
4695 if user_lang and user_lang == record['lang']:
4696 # 'source' to force the call to _set_src
4697 # 'value' needed if value is changed in copy(), want to see the new_value
4698 record['source'] = old_record[field_name]
4699 record['value'] = new_record[field_name]
4700 trans_obj.create(cr, uid, record, context=context)
4702 @api.returns('self', lambda value: value.id)
4703 def copy(self, cr, uid, id, default=None, context=None):
4704 """ copy(default=None)
4706 Duplicate record with given id updating it with default values
4708 :param dict default: dictionary of field values to override in the
4709 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4710 :returns: new record
4715 context = context.copy()
4716 data = self.copy_data(cr, uid, id, default, context)
4717 new_id = self.create(cr, uid, data, context)
4718 self.copy_translations(cr, uid, id, new_id, context)
4722 @api.returns('self')
4724 """ exists() -> records
4726 Returns the subset of records in `self` that exist, and marks deleted
4727 records as such in cache. It can be used as a test on records::
4732 By convention, new records are returned as existing.
4734 ids = filter(None, self._ids) # ids to check in database
4737 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4738 self._cr.execute(query, (ids,))
4739 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4740 [id for id in self._ids if not id]) # new ids
4741 existing = self.browse(ids)
4742 if len(existing) < len(self):
4743 # mark missing records in cache with a failed value
4744 exc = MissingError(_("Record does not exist or has been deleted."))
4745 (self - existing)._cache.update(FailedValue(exc))
4748 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4749 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4751 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4752 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4753 return self._check_recursion(cr, uid, ids, context, parent)
4755 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4757 Verifies that there is no loop in a hierarchical structure of records,
4758 by following the parent relationship using the **parent** field until a loop
4759 is detected or until a top-level record is found.
4761 :param cr: database cursor
4762 :param uid: current user id
4763 :param ids: list of ids of records to check
4764 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4765 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4768 parent = self._parent_name
4770 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4771 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4774 while current_id is not None:
4775 cr.execute(query, (current_id,))
4776 result = cr.fetchone()
4777 current_id = result[0] if result else None
4778 if current_id == id:
4782 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4784 Verifies that there is no loop in a hierarchical structure of records,
4785 by following the parent relationship using the **parent** field until a loop
4786 is detected or until a top-level record is found.
4788 :param cr: database cursor
4789 :param uid: current user id
4790 :param ids: list of ids of records to check
4791 :param field_name: field to check
4792 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4795 field = self._all_columns.get(field_name)
4796 field = field.column if field else None
4797 if not field or field._type != 'many2many' or field._obj != self._name:
4798 # field must be a many2many on itself
4799 raise ValueError('invalid field_name: %r' % (field_name,))
4801 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4805 for i in range(0, len(ids_parent), cr.IN_MAX):
4807 sub_ids_parent = ids_parent[i:j]
4808 cr.execute(query, (tuple(sub_ids_parent),))
4809 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4810 ids_parent = ids_parent2
4811 for i in ids_parent:
4816 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4817 """Retrieve the External ID(s) of any database record.
4819 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4821 :return: map of ids to the list of their fully qualified External IDs
4822 in the form ``module.key``, or an empty list when there's no External
4823 ID for a record, e.g.::
4825 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4828 ir_model_data = self.pool.get('ir.model.data')
4829 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4830 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4833 # can't use dict.fromkeys() as the list would be shared!
4835 for record in data_results:
4836 result[record['res_id']].append('%(module)s.%(name)s' % record)
4839 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4840 """Retrieve the External ID of any database record, if there
4841 is one. This method works as a possible implementation
4842 for a function field, to be able to add it to any
4843 model object easily, referencing it as ``Model.get_external_id``.
4845 When multiple External IDs exist for a record, only one
4846 of them is returned (randomly).
4848 :return: map of ids to their fully qualified XML ID,
4849 defaulting to an empty string when there's none
4850 (to be usable as a function field),
4853 { 'id': 'module.ext_id',
4856 results = self._get_xml_ids(cr, uid, ids)
4857 for k, v in results.iteritems():
4864 # backwards compatibility
4865 get_xml_id = get_external_id
4866 _get_xml_ids = _get_external_ids
4868 def print_report(self, cr, uid, ids, name, data, context=None):
4870 Render the report `name` for the given IDs. The report must be defined
4871 for this model, not another.
4873 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4874 assert self._name == report.table
4875 return report.create(cr, uid, ids, data, context)
4879 def is_transient(cls):
4880 """ Return whether the model is transient.
4882 See :class:`TransientModel`.
4885 return cls._transient
4887 def _transient_clean_rows_older_than(self, cr, seconds):
4888 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4889 # Never delete rows used in last 5 minutes
4890 seconds = max(seconds, 300)
4891 query = ("SELECT id FROM " + self._table + " WHERE"
4892 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4893 " < ((now() at time zone 'UTC') - interval %s)")
4894 cr.execute(query, ("%s seconds" % seconds,))
4895 ids = [x[0] for x in cr.fetchall()]
4896 self.unlink(cr, SUPERUSER_ID, ids)
4898 def _transient_clean_old_rows(self, cr, max_count):
4899 # Check how many rows we have in the table
4900 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4902 if res[0][0] <= max_count:
4903 return # max not reached, nothing to do
4904 self._transient_clean_rows_older_than(cr, 300)
4906 def _transient_vacuum(self, cr, uid, force=False):
4907 """Clean the transient records.
4909 This unlinks old records from the transient model tables whenever the
4910 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4911 Actual cleaning will happen only once every "_transient_check_time" calls.
4912 This means this method can be called frequently called (e.g. whenever
4913 a new record is created).
4914 Example with both max_hours and max_count active:
4915 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4916 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4917 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4918 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4919 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4920 would immediately cause the maximum to be reached again.
4921 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4923 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4924 _transient_check_time = 20 # arbitrary limit on vacuum executions
4925 self._transient_check_count += 1
4926 if not force and (self._transient_check_count < _transient_check_time):
4927 return True # no vacuum cleaning this time
4928 self._transient_check_count = 0
4930 # Age-based expiration
4931 if self._transient_max_hours:
4932 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4934 # Count-based expiration
4935 if self._transient_max_count:
4936 self._transient_clean_old_rows(cr, self._transient_max_count)
4940 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4941 """ Serializes one2many and many2many commands into record dictionaries
4942 (as if all the records came from the database via a read()). This
4943 method is aimed at onchange methods on one2many and many2many fields.
4945 Because commands might be creation commands, not all record dicts
4946 will contain an ``id`` field. Commands matching an existing record
4947 will have an ``id``.
4949 :param field_name: name of the one2many or many2many field matching the commands
4950 :type field_name: str
4951 :param commands: one2many or many2many commands to execute on ``field_name``
4952 :type commands: list((int|False, int|False, dict|False))
4953 :param fields: list of fields to read from the database, when applicable
4954 :type fields: list(str)
4955 :returns: records in a shape similar to that returned by ``read()``
4956 (except records may be missing the ``id`` field if they don't exist in db)
4959 result = [] # result (list of dict)
4960 record_ids = [] # ids of records to read
4961 updates = {} # {id: dict} of updates on particular records
4963 for command in commands or []:
4964 if not isinstance(command, (list, tuple)):
4965 record_ids.append(command)
4966 elif command[0] == 0:
4967 result.append(command[2])
4968 elif command[0] == 1:
4969 record_ids.append(command[1])
4970 updates.setdefault(command[1], {}).update(command[2])
4971 elif command[0] in (2, 3):
4972 record_ids = [id for id in record_ids if id != command[1]]
4973 elif command[0] == 4:
4974 record_ids.append(command[1])
4975 elif command[0] == 5:
4976 result, record_ids = [], []
4977 elif command[0] == 6:
4978 result, record_ids = [], list(command[2])
4980 # read the records and apply the updates
4981 other_model = self.pool[self._all_columns[field_name].column._obj]
4982 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4983 record.update(updates.get(record['id'], {}))
4984 result.append(record)
4988 # for backward compatibility
4989 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
4991 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
4993 Performs a ``search()`` followed by a ``read()``.
4995 :param cr: database cursor
4996 :param user: current user id
4997 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
4998 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
4999 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5000 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5001 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5002 :param context: context arguments.
5003 :return: List of dictionaries containing the asked fields.
5004 :rtype: List of dictionaries.
5007 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5011 if fields and fields == ['id']:
5012 # shortcut read if we only want the ids
5013 return [{'id': id} for id in record_ids]
5015 # read() ignores active_test, but it would forward it to any downstream search call
5016 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5017 # was presumably only meant for the main search().
5018 # TODO: Move this to read() directly?
5019 read_ctx = dict(context or {})
5020 read_ctx.pop('active_test', None)
5022 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5023 if len(result) <= 1:
5027 index = dict((r['id'], r) for r in result)
5028 return [index[x] for x in record_ids if x in index]
5030 def _register_hook(self, cr):
5031 """ stuff to do right after the registry is built """
5035 def _patch_method(cls, name, method):
5036 """ Monkey-patch a method for all instances of this model. This replaces
5037 the method called `name` by `method` in the given class.
5038 The original method is then accessible via ``method.origin``, and it
5039 can be restored with :meth:`~._revert_method`.
5044 def do_write(self, values):
5045 # do stuff, and call the original method
5046 return do_write.origin(self, values)
5048 # patch method write of model
5049 model._patch_method('write', do_write)
5051 # this will call do_write
5052 records = model.search([...])
5055 # restore the original method
5056 model._revert_method('write')
5058 origin = getattr(cls, name)
5059 method.origin = origin
5060 # propagate decorators from origin to method, and apply api decorator
5061 wrapped = api.guess(api.propagate(origin, method))
5062 wrapped.origin = origin
5063 setattr(cls, name, wrapped)
5066 def _revert_method(cls, name):
5067 """ Revert the original method called `name` in the given class.
5068 See :meth:`~._patch_method`.
5070 method = getattr(cls, name)
5071 setattr(cls, name, method.origin)
5076 # An instance represents an ordered collection of records in a given
5077 # execution environment. The instance object refers to the environment, and
5078 # the records themselves are represented by their cache dictionary. The 'id'
5079 # of each record is found in its corresponding cache dictionary.
5081 # This design has the following advantages:
5082 # - cache access is direct and thus fast;
5083 # - one can consider records without an 'id' (see new records);
5084 # - the global cache is only an index to "resolve" a record 'id'.
5088 def _browse(cls, env, ids):
5089 """ Create an instance attached to `env`; `ids` is a tuple of record
5092 records = object.__new__(cls)
5095 env.prefetch[cls._name].update(ids)
5099 def browse(self, cr, uid, arg=None, context=None):
5100 ids = _normalize_ids(arg)
5101 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5102 return self._browse(Environment(cr, uid, context or {}), ids)
5105 def browse(self, arg=None):
5106 """ browse([ids]) -> records
5108 Returns a recordset for the ids provided as parameter in the current
5111 Can take no ids, a single id or a sequence of ids.
5113 ids = _normalize_ids(arg)
5114 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5115 return self._browse(self.env, ids)
5118 # Internal properties, for manipulating the instance's implementation
5123 """ List of actual record ids in this recordset (ignores placeholder
5124 ids for records to create)
5126 return filter(None, list(self._ids))
5128 # backward-compatibility with former browse records
5129 _cr = property(lambda self: self.env.cr)
5130 _uid = property(lambda self: self.env.uid)
5131 _context = property(lambda self: self.env.context)
5134 # Conversion methods
5137 def ensure_one(self):
5138 """ Verifies that the current recorset holds a single record. Raises
5139 an exception otherwise.
5143 raise except_orm("ValueError", "Expected singleton: %s" % self)
5145 def with_env(self, env):
5146 """ Returns a new version of this recordset attached to the provided
5149 :type env: :class:`~openerp.api.Environment`
5151 return self._browse(env, self._ids)
5153 def sudo(self, user=SUPERUSER_ID):
5154 """ sudo([user=SUPERUSER])
5156 Returns a new version of this recordset attached to the provided
5159 return self.with_env(self.env(user=user))
5161 def with_context(self, *args, **kwargs):
5162 """ with_context([context][, **overrides]) -> records
5164 Returns a new version of this recordset attached to an extended
5167 The extended context is either the provided ``context`` in which
5168 ``overrides`` are merged or the *current* context in which
5169 ``overrides`` are merged e.g.::
5171 # current context is {'key1': True}
5172 r2 = records.with_context({}, key2=True)
5173 # -> r2._context is {'key2': True}
5174 r2 = records.with_context(key2=True)
5175 # -> r2._context is {'key1': True, 'key2': True}
5177 context = dict(args[0] if args else self._context, **kwargs)
5178 return self.with_env(self.env(context=context))
5180 def _convert_to_cache(self, values, update=False, validate=True):
5181 """ Convert the `values` dictionary into cached values.
5183 :param update: whether the conversion is made for updating `self`;
5184 this is necessary for interpreting the commands of *2many fields
5185 :param validate: whether values must be checked
5187 fields = self._fields
5188 target = self if update else self.browse()
5190 name: fields[name].convert_to_cache(value, target, validate=validate)
5191 for name, value in values.iteritems()
5195 def _convert_to_write(self, values):
5196 """ Convert the `values` dictionary into the format of :meth:`write`. """
5197 fields = self._fields
5199 for name, value in values.iteritems():
5201 value = fields[name].convert_to_write(value)
5202 if not isinstance(value, NewId):
5203 result[name] = value
5207 # Record traversal and update
5210 def _mapped_func(self, func):
5211 """ Apply function `func` on all records in `self`, and return the
5212 result as a list or a recordset (if `func` return recordsets).
5214 vals = [func(rec) for rec in self]
5215 val0 = vals[0] if vals else func(self)
5216 if isinstance(val0, BaseModel):
5217 return reduce(operator.or_, vals, val0)
5220 def mapped(self, func):
5221 """ Apply `func` on all records in `self`, and return the result as a
5222 list or a recordset (if `func` return recordsets). In the latter
5223 case, the order of the returned recordset is arbritrary.
5225 :param func: a function or a dot-separated sequence of field names
5227 if isinstance(func, basestring):
5229 for name in func.split('.'):
5230 recs = recs._mapped_func(operator.itemgetter(name))
5233 return self._mapped_func(func)
5235 def _mapped_cache(self, name_seq):
5236 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5237 field names, and only cached values are used.
5240 for name in name_seq.split('.'):
5241 field = recs._fields[name]
5242 null = field.null(self.env)
5243 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5246 def filtered(self, func):
5247 """ Select the records in `self` such that `func(rec)` is true, and
5248 return them as a recordset.
5250 :param func: a function or a dot-separated sequence of field names
5252 if isinstance(func, basestring):
5254 func = lambda rec: filter(None, rec.mapped(name))
5255 return self.browse([rec.id for rec in self if func(rec)])
5257 def sorted(self, key=None):
5258 """ Return the recordset `self` ordered by `key` """
5260 return self.search([('id', 'in', self.ids)])
5262 return self.browse(map(int, sorted(self, key=key)))
5264 def update(self, values):
5265 """ Update record `self[0]` with `values`. """
5266 for name, value in values.iteritems():
5270 # New records - represent records that do not exist in the database yet;
5271 # they are used to perform onchanges.
5275 def new(self, values={}):
5276 """ new([values]) -> record
5278 Return a new record instance attached to the current environment and
5279 initialized with the provided ``value``. The record is *not* created
5280 in database, it only exists in memory.
5282 record = self.browse([NewId()])
5283 record._cache.update(record._convert_to_cache(values, update=True))
5285 if record.env.in_onchange:
5286 # The cache update does not set inverse fields, so do it manually.
5287 # This is useful for computing a function field on secondary
5288 # records, if that field depends on the main record.
5290 field = self._fields.get(name)
5292 for invf in field.inverse_fields:
5293 invf._update(record[name], record)
5298 # Dirty flag, to mark records modified (in draft mode)
5303 """ Return whether any record in `self` is dirty. """
5304 dirty = self.env.dirty
5305 return any(record in dirty for record in self)
5308 def _dirty(self, value):
5309 """ Mark the records in `self` as dirty. """
5311 map(self.env.dirty.add, self)
5313 map(self.env.dirty.discard, self)
5319 def __nonzero__(self):
5320 """ Test whether `self` is nonempty. """
5321 return bool(getattr(self, '_ids', True))
5324 """ Return the size of `self`. """
5325 return len(self._ids)
5328 """ Return an iterator over `self`. """
5329 for id in self._ids:
5330 yield self._browse(self.env, (id,))
5332 def __contains__(self, item):
5333 """ Test whether `item` (record or field name) is an element of `self`.
5334 In the first case, the test is fully equivalent to::
5336 any(item == record for record in self)
5338 if isinstance(item, BaseModel) and self._name == item._name:
5339 return len(item) == 1 and item.id in self._ids
5340 elif isinstance(item, basestring):
5341 return item in self._fields
5343 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5345 def __add__(self, other):
5346 """ Return the concatenation of two recordsets. """
5347 if not isinstance(other, BaseModel) or self._name != other._name:
5348 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5349 return self.browse(self._ids + other._ids)
5351 def __sub__(self, other):
5352 """ Return the recordset of all the records in `self` that are not in `other`. """
5353 if not isinstance(other, BaseModel) or self._name != other._name:
5354 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5355 other_ids = set(other._ids)
5356 return self.browse([id for id in self._ids if id not in other_ids])
5358 def __and__(self, other):
5359 """ Return the intersection of two recordsets.
5360 Note that recordset order is not preserved.
5362 if not isinstance(other, BaseModel) or self._name != other._name:
5363 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5364 return self.browse(set(self._ids) & set(other._ids))
5366 def __or__(self, other):
5367 """ Return the union of two recordsets.
5368 Note that recordset order is not preserved.
5370 if not isinstance(other, BaseModel) or self._name != other._name:
5371 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5372 return self.browse(set(self._ids) | set(other._ids))
5374 def __eq__(self, other):
5375 """ Test whether two recordsets are equivalent (up to reordering). """
5376 if not isinstance(other, BaseModel):
5378 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5380 return self._name == other._name and set(self._ids) == set(other._ids)
5382 def __ne__(self, other):
5383 return not self == other
5385 def __lt__(self, other):
5386 if not isinstance(other, BaseModel) or self._name != other._name:
5387 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5388 return set(self._ids) < set(other._ids)
5390 def __le__(self, other):
5391 if not isinstance(other, BaseModel) or self._name != other._name:
5392 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5393 return set(self._ids) <= set(other._ids)
5395 def __gt__(self, other):
5396 if not isinstance(other, BaseModel) or self._name != other._name:
5397 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5398 return set(self._ids) > set(other._ids)
5400 def __ge__(self, other):
5401 if not isinstance(other, BaseModel) or self._name != other._name:
5402 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5403 return set(self._ids) >= set(other._ids)
5409 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5411 def __unicode__(self):
5412 return unicode(str(self))
5417 if hasattr(self, '_ids'):
5418 return hash((self._name, frozenset(self._ids)))
5420 return hash(self._name)
5422 def __getitem__(self, key):
5423 """ If `key` is an integer or a slice, return the corresponding record
5424 selection as an instance (attached to `self.env`).
5425 Otherwise read the field `key` of the first record in `self`.
5429 inst = model.search(dom) # inst is a recordset
5430 r4 = inst[3] # fourth record in inst
5431 rs = inst[10:20] # subset of inst
5432 nm = rs['name'] # name of first record in inst
5434 if isinstance(key, basestring):
5435 # important: one must call the field's getter
5436 return self._fields[key].__get__(self, type(self))
5437 elif isinstance(key, slice):
5438 return self._browse(self.env, self._ids[key])
5440 return self._browse(self.env, (self._ids[key],))
5442 def __setitem__(self, key, value):
5443 """ Assign the field `key` to `value` in record `self`. """
5444 # important: one must call the field's setter
5445 return self._fields[key].__set__(self, value)
5448 # Cache and recomputation management
5453 """ Return the cache of `self`, mapping field names to values. """
5454 return RecordCache(self)
5457 def _in_cache_without(self, field):
5458 """ Make sure `self` is present in cache (for prefetching), and return
5459 the records of model `self` in cache that have no value for `field`
5460 (:class:`Field` instance).
5463 prefetch_ids = env.prefetch[self._name]
5464 prefetch_ids.update(self._ids)
5465 ids = filter(None, prefetch_ids - set(env.cache[field]))
5466 return self.browse(ids)
5470 """ Clear the records cache.
5473 The record cache is automatically invalidated.
5475 self.invalidate_cache()
5478 def invalidate_cache(self, fnames=None, ids=None):
5479 """ Invalidate the record caches after some records have been modified.
5480 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5482 :param fnames: the list of modified fields, or ``None`` for all fields
5483 :param ids: the list of modified record ids, or ``None`` for all
5487 return self.env.invalidate_all()
5488 fields = self._fields.values()
5490 fields = map(self._fields.__getitem__, fnames)
5492 # invalidate fields and inverse fields, too
5493 spec = [(f, ids) for f in fields] + \
5494 [(invf, None) for f in fields for invf in f.inverse_fields]
5495 self.env.invalidate(spec)
5498 def modified(self, fnames):
5499 """ Notify that fields have been modified on `self`. This invalidates
5500 the cache, and prepares the recomputation of stored function fields
5501 (new-style fields only).
5503 :param fnames: iterable of field names that have been modified on
5506 # each field knows what to invalidate and recompute
5508 for fname in fnames:
5509 spec += self._fields[fname].modified(self)
5513 for env in self.env.all
5514 for field in env.cache
5516 # invalidate non-stored fields.function which are currently cached
5517 spec += [(f, None) for f in self.pool.pure_function_fields
5518 if f in cached_fields]
5520 self.env.invalidate(spec)
5522 def _recompute_check(self, field):
5523 """ If `field` must be recomputed on some record in `self`, return the
5524 corresponding records that must be recomputed.
5526 return self.env.check_todo(field, self)
5528 def _recompute_todo(self, field):
5529 """ Mark `field` to be recomputed. """
5530 self.env.add_todo(field, self)
5532 def _recompute_done(self, field):
5533 """ Mark `field` as recomputed. """
5534 self.env.remove_todo(field, self)
5537 def recompute(self):
5538 """ Recompute stored function fields. The fields and records to
5539 recompute have been determined by method :meth:`modified`.
5541 while self.env.has_todo():
5542 field, recs = self.env.get_todo()
5543 # evaluate the fields to recompute, and save them to database
5544 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5546 values = rec._convert_to_write({
5547 f.name: rec[f.name] for f in field.computed_fields
5550 except MissingError:
5552 # mark the computed fields as done
5553 map(recs._recompute_done, field.computed_fields)
5556 # Generic onchange method
5559 def _has_onchange(self, field, other_fields):
5560 """ Return whether `field` should trigger an onchange event in the
5561 presence of `other_fields`.
5563 # test whether self has an onchange method for field, or field is a
5564 # dependency of any field in other_fields
5565 return field.name in self._onchange_methods or \
5566 any(dep in other_fields for dep in field.dependents)
5569 def _onchange_spec(self, view_info=None):
5570 """ Return the onchange spec from a view description; if not given, the
5571 result of ``self.fields_view_get()`` is used.
5575 # for traversing the XML arch and populating result
5576 def process(node, info, prefix):
5577 if node.tag == 'field':
5578 name = node.attrib['name']
5579 names = "%s.%s" % (prefix, name) if prefix else name
5580 if not result.get(names):
5581 result[names] = node.attrib.get('on_change')
5582 # traverse the subviews included in relational fields
5583 for subinfo in info['fields'][name].get('views', {}).itervalues():
5584 process(etree.fromstring(subinfo['arch']), subinfo, names)
5587 process(child, info, prefix)
5589 if view_info is None:
5590 view_info = self.fields_view_get()
5591 process(etree.fromstring(view_info['arch']), view_info, '')
5594 def _onchange_eval(self, field_name, onchange, result):
5595 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5596 on record `self`. Value assignments are applied on `self`, while
5597 domain and warning messages are put in dictionary `result`.
5599 onchange = onchange.strip()
5602 if onchange in ("1", "true"):
5603 for method in self._onchange_methods.get(field_name, ()):
5604 method_res = method(self)
5607 if 'domain' in method_res:
5608 result.setdefault('domain', {}).update(method_res['domain'])
5609 if 'warning' in method_res:
5610 result['warning'] = method_res['warning']
5614 match = onchange_v7.match(onchange)
5616 method, params = match.groups()
5618 # evaluate params -> tuple
5619 global_vars = {'context': self._context, 'uid': self._uid}
5620 if self._context.get('field_parent'):
5621 class RawRecord(object):
5622 def __init__(self, record):
5623 self._record = record
5624 def __getattr__(self, name):
5625 field = self._record._fields[name]
5626 value = self._record[name]
5627 return field.convert_to_onchange(value)
5628 record = self[self._context['field_parent']]
5629 global_vars['parent'] = RawRecord(record)
5631 key: self._fields[key].convert_to_onchange(val)
5632 for key, val in self._cache.iteritems()
5634 params = eval("[%s]" % params, global_vars, field_vars)
5636 # call onchange method
5637 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5638 method_res = getattr(self._model, method)(*args)
5639 if not isinstance(method_res, dict):
5641 if 'value' in method_res:
5642 method_res['value'].pop('id', None)
5643 self.update(self._convert_to_cache(method_res['value'], validate=False))
5644 if 'domain' in method_res:
5645 result.setdefault('domain', {}).update(method_res['domain'])
5646 if 'warning' in method_res:
5647 result['warning'] = method_res['warning']
5650 def onchange(self, values, field_name, field_onchange):
5651 """ Perform an onchange on the given field.
5653 :param values: dictionary mapping field names to values, giving the
5654 current state of modification
5655 :param field_name: name of the modified field_name
5656 :param field_onchange: dictionary mapping field names to their
5661 if field_name and field_name not in self._fields:
5664 # determine subfields for field.convert_to_write() below
5666 subfields = defaultdict(set)
5667 for dotname in field_onchange:
5669 secondary.append(dotname)
5670 name, subname = dotname.split('.')
5671 subfields[name].add(subname)
5673 # create a new record with values, and attach `self` to it
5674 with env.do_in_onchange():
5675 record = self.new(values)
5676 values = dict(record._cache)
5677 # attach `self` with a different context (for cache consistency)
5678 record._origin = self.with_context(__onchange=True)
5680 # determine which field should be triggered an onchange
5681 todo = set([field_name]) if field_name else set(values)
5684 # dummy assignment: trigger invalidations on the record
5686 value = record[name]
5687 field = self._fields[name]
5688 if not field_name and field.type == 'many2one' and field.delegate and not value:
5689 # do not nullify all fields of parent record for new records
5691 record[name] = value
5693 result = {'value': {}}
5701 with env.do_in_onchange():
5702 # apply field-specific onchange methods
5703 if field_onchange.get(name):
5704 record._onchange_eval(name, field_onchange[name], result)
5706 # force re-evaluation of function fields on secondary records
5707 for field_seq in secondary:
5708 record.mapped(field_seq)
5710 # determine which fields have been modified
5711 for name, oldval in values.iteritems():
5712 field = self._fields[name]
5713 newval = record[name]
5714 if field.type in ('one2many', 'many2many'):
5715 if newval != oldval or newval._dirty:
5716 # put new value in result
5717 result['value'][name] = field.convert_to_write(
5718 newval, record._origin, subfields.get(name),
5722 # keep result: newval may have been dirty before
5725 if newval != oldval:
5726 # put new value in result
5727 result['value'][name] = field.convert_to_write(
5728 newval, record._origin, subfields.get(name),
5732 # clean up result to not return another value
5733 result['value'].pop(name, None)
5735 # At the moment, the client does not support updates on a *2many field
5736 # while this one is modified by the user.
5737 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5738 result['value'].pop(field_name, None)
5743 class RecordCache(MutableMapping):
5744 """ Implements a proxy dictionary to read/update the cache of a record.
5745 Upon iteration, it looks like a dictionary mapping field names to
5746 values. However, fields may be used as keys as well.
5748 def __init__(self, records):
5749 self._recs = records
5751 def contains(self, field):
5752 """ Return whether `records[0]` has a value for `field` in cache. """
5753 if isinstance(field, basestring):
5754 field = self._recs._fields[field]
5755 return self._recs.id in self._recs.env.cache[field]
5757 def __contains__(self, field):
5758 """ Return whether `records[0]` has a regular value for `field` in cache. """
5759 if isinstance(field, basestring):
5760 field = self._recs._fields[field]
5761 dummy = SpecialValue(None)
5762 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5763 return not isinstance(value, SpecialValue)
5765 def __getitem__(self, field):
5766 """ Return the cached value of `field` for `records[0]`. """
5767 if isinstance(field, basestring):
5768 field = self._recs._fields[field]
5769 value = self._recs.env.cache[field][self._recs.id]
5770 return value.get() if isinstance(value, SpecialValue) else value
5772 def __setitem__(self, field, value):
5773 """ Assign the cached value of `field` for all records in `records`. """
5774 if isinstance(field, basestring):
5775 field = self._recs._fields[field]
5776 values = dict.fromkeys(self._recs._ids, value)
5777 self._recs.env.cache[field].update(values)
5779 def update(self, *args, **kwargs):
5780 """ Update the cache of all records in `records`. If the argument is a
5781 `SpecialValue`, update all fields (except "magic" columns).
5783 if args and isinstance(args[0], SpecialValue):
5784 values = dict.fromkeys(self._recs._ids, args[0])
5785 for name, field in self._recs._fields.iteritems():
5787 self._recs.env.cache[field].update(values)
5789 return super(RecordCache, self).update(*args, **kwargs)
5791 def __delitem__(self, field):
5792 """ Remove the cached value of `field` for all `records`. """
5793 if isinstance(field, basestring):
5794 field = self._recs._fields[field]
5795 field_cache = self._recs.env.cache[field]
5796 for id in self._recs._ids:
5797 field_cache.pop(id, None)
5800 """ Iterate over the field names with a regular value in cache. """
5801 cache, id = self._recs.env.cache, self._recs.id
5802 dummy = SpecialValue(None)
5803 for name, field in self._recs._fields.iteritems():
5804 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5808 """ Return the number of fields with a regular value in cache. """
5809 return sum(1 for name in self)
5811 class Model(BaseModel):
5812 """Main super-class for regular database-persisted OpenERP models.
5814 OpenERP models are created by inheriting from this class::
5819 The system will later instantiate the class once per database (on
5820 which the class' module is installed).
5823 _register = False # not visible in ORM registry, meant to be python-inherited only
5824 _transient = False # True in a TransientModel
5826 class TransientModel(BaseModel):
5827 """Model super-class for transient records, meant to be temporarily
5828 persisted, and regularly vaccuum-cleaned.
5830 A TransientModel has a simplified access rights management,
5831 all users can create new records, and may only access the
5832 records they created. The super-user has unrestricted access
5833 to all TransientModel records.
5836 _register = False # not visible in ORM registry, meant to be python-inherited only
5839 class AbstractModel(BaseModel):
5840 """Abstract Model super-class for creating an abstract class meant to be
5841 inherited by regular models (Models or TransientModels) but not meant to
5842 be usable on its own, or persisted.
5844 Technical note: we don't want to make AbstractModel the super-class of
5845 Model or BaseModel because it would not make sense to put the main
5846 definition of persistence methods such as create() in it, and still we
5847 should be able to override them within an AbstractModel.
5849 _auto = False # don't create any database backend for AbstractModels
5850 _register = False # not visible in ORM registry, meant to be python-inherited only
5853 def itemgetter_tuple(items):
5854 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5855 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5860 return lambda gettable: (gettable[items[0]],)
5861 return operator.itemgetter(*items)
5863 def convert_pgerror_23502(model, fields, info, e):
5864 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5865 r'not-null constraint\n',
5867 field_name = m and m.group('field')
5868 if not m or field_name not in fields:
5869 return {'message': unicode(e)}
5870 message = _(u"Missing required value for the field '%s'.") % field_name
5871 field = fields.get(field_name)
5873 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5876 'field': field_name,
5879 def convert_pgerror_23505(model, fields, info, e):
5880 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5882 field_name = m and m.group('field')
5883 if not m or field_name not in fields:
5884 return {'message': unicode(e)}
5885 message = _(u"The value for the field '%s' already exists.") % field_name
5886 field = fields.get(field_name)
5888 message = _(u"%s This might be '%s' in the current model, or a field "
5889 u"of the same name in an o2m.") % (message, field['string'])
5892 'field': field_name,
5895 PGERROR_TO_OE = defaultdict(
5896 # shape of mapped converters
5897 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5898 # not_null_violation
5899 '23502': convert_pgerror_23502,
5900 # unique constraint error
5901 '23505': convert_pgerror_23505,
5904 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5905 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5907 Various implementations were tested on the corpus of all browse() calls
5908 performed during a full crawler run (after having installed all website_*
5909 modules) and this one was the most efficient overall.
5911 A possible bit of correctness was sacrificed by not doing any test on
5912 Iterable and just assuming that any non-atomic type was an iterable of
5917 # much of the corpus is falsy objects (empty list, tuple or set, None)
5921 # `type in set` is significantly faster (because more restrictive) than
5922 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5923 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5924 # (and looks much worse) in most cases, but over millions of calls it
5925 # does have a very minor effect.
5926 if arg.__class__ in atoms:
5931 # keep those imports here to avoid dependency cycle errors
5932 from .osv import expression
5933 from .fields import Field, SpecialValue, FailedValue
5935 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: