1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
51 from collections import defaultdict, MutableMapping
52 from inspect import getmembers
55 import dateutil.relativedelta
57 from lxml import etree
60 from . import SUPERUSER_ID
63 from .api import Environment
64 from .exceptions import except_orm, AccessError, MissingError, ValidationError
65 from .osv import fields
66 from .osv.query import Query
67 from .tools import lazy_property, ormcache
68 from .tools.config import config
69 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
70 from .tools.safe_eval import safe_eval as eval
71 from .tools.translate import _
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
77 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
78 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
80 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def check_object_name(name):
84 """ Check if the given name is a valid openerp object name.
86 The _name attribute in osv and osv_memory object is subject to
87 some restrictions. This function returns True or False whether
88 the given name is allowed or not.
90 TODO: this is an approximation. The goal in this approximation
91 is to disallow uppercase characters (in some places, we quote
92 table/column names and in other not, which leads to this kind
95 psycopg2.ProgrammingError: relation "xxx" does not exist).
97 The same restriction should apply to both osv and osv_memory
98 objects for consistency.
101 if regex_object_name.match(name) is None:
105 def raise_on_invalid_object_name(name):
106 if not check_object_name(name):
107 msg = "The _name attribute %s is not valid." % name
109 raise except_orm('ValueError', msg)
111 POSTGRES_CONFDELTYPES = {
119 def intersect(la, lb):
120 return filter(lambda x: x in lb, la)
123 """ Test whether functions `f` and `g` are identical or have the same name """
124 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
126 def fix_import_export_id_paths(fieldname):
128 Fixes the id fields in import and exports, and splits field paths
131 :param str fieldname: name of the field to import/export
132 :return: split field name
135 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
136 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
137 return fixed_external_id.split('/')
139 def pg_varchar(size=0):
140 """ Returns the VARCHAR declaration for the provided size:
142 * If no size (or an empty or negative size is provided) return an
144 * Otherwise return a VARCHAR(n)
146 :type int size: varchar size, optional
150 if not isinstance(size, int):
151 raise TypeError("VARCHAR parameter should be an int, got %s"
154 return 'VARCHAR(%d)' % size
157 FIELDS_TO_PGTYPES = {
158 fields.boolean: 'bool',
159 fields.integer: 'int4',
163 fields.datetime: 'timestamp',
164 fields.binary: 'bytea',
165 fields.many2one: 'int4',
166 fields.serialized: 'text',
169 def get_pg_type(f, type_override=None):
171 :param fields._column f: field to get a Postgres type for
172 :param type type_override: use the provided type for dispatching instead of the field's own type
173 :returns: (postgres_identification_type, postgres_type_specification)
176 field_type = type_override or type(f)
178 if field_type in FIELDS_TO_PGTYPES:
179 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
180 elif issubclass(field_type, fields.float):
182 pg_type = ('numeric', 'NUMERIC')
184 pg_type = ('float8', 'DOUBLE PRECISION')
185 elif issubclass(field_type, (fields.char, fields.reference)):
186 pg_type = ('varchar', pg_varchar(f.size))
187 elif issubclass(field_type, fields.selection):
188 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
189 or getattr(f, 'size', None) == -1:
190 pg_type = ('int4', 'INTEGER')
192 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
193 elif issubclass(field_type, fields.function):
194 if f._type == 'selection':
195 pg_type = ('varchar', pg_varchar())
197 pg_type = get_pg_type(f, getattr(fields, f._type))
199 _logger.warning('%s type not supported!', field_type)
205 class MetaModel(api.Meta):
206 """ Metaclass for the models.
208 This class is used as the metaclass for the class :class:`BaseModel` to
209 discover the models defined in a module (without instanciating them).
210 If the automatic discovery is not needed, it is possible to set the model's
211 ``_register`` attribute to False.
215 module_to_models = {}
217 def __init__(self, name, bases, attrs):
218 if not self._register:
219 self._register = True
220 super(MetaModel, self).__init__(name, bases, attrs)
223 if not hasattr(self, '_module'):
224 # The (OpenERP) module name can be in the `openerp.addons` namespace
225 # or not. For instance, module `sale` can be imported as
226 # `openerp.addons.sale` (the right way) or `sale` (for backward
228 module_parts = self.__module__.split('.')
229 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
230 module_name = self.__module__.split('.')[2]
232 module_name = self.__module__.split('.')[0]
233 self._module = module_name
235 # Remember which models to instanciate for this module.
237 self.module_to_models.setdefault(self._module, []).append(self)
239 # transform columns into new-style fields (enables field inheritance)
240 for name, column in self._columns.iteritems():
241 if name in self.__dict__:
242 _logger.warning("In class %s, field %r overriding an existing value", self, name)
243 setattr(self, name, column.to_field())
247 """ Pseudo-ids for new records. """
248 def __nonzero__(self):
251 IdType = (int, long, basestring, NewId)
254 # maximum number of prefetched records
257 # special columns automatically created by the ORM
258 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
259 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
261 class BaseModel(object):
262 """ Base class for OpenERP models.
264 OpenERP models are created by inheriting from this class' subclasses:
266 * :class:`Model` for regular database-persisted models
268 * :class:`TransientModel` for temporary data, stored in the database but
269 automatically vaccuumed every so often
271 * :class:`AbstractModel` for abstract super classes meant to be shared by
272 multiple inheriting model
274 The system automatically instantiates every model once per database. Those
275 instances represent the available models on each database, and depend on
276 which modules are installed on that database. The actual class of each
277 instance is built from the Python classes that create and inherit from the
280 Every model instance is a "recordset", i.e., an ordered collection of
281 records of the model. Recordsets are returned by methods like
282 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
283 explicit representation: a record is represented as a recordset of one
286 To create a class that should not be instantiated, the _register class
287 attribute may be set to False.
289 __metaclass__ = MetaModel
290 _auto = True # create database backend
291 _register = False # Set to false if the model shouldn't be automatically discovered.
298 _parent_name = 'parent_id'
299 _parent_store = False
300 _parent_order = False
306 _translate = True # set to False to disable translations export for this model
308 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
309 # to include in the _read_group, if grouped on this field
313 _transient = False # True in a TransientModel
316 # { 'parent_model': 'm2o_field', ... }
319 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
320 # model from which it is inherits'd, r is the (local) field towards m, f
321 # is the _column object itself, and n is the original (i.e. top-most)
324 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
325 # field_column_obj, origina_parent_model), ... }
328 # Mapping field name/column_info object
329 # This is similar to _inherit_fields but:
330 # 1. includes self fields,
331 # 2. uses column_info instead of a triple.
336 _sql_constraints = []
338 # model dependencies, for models backed up by sql views:
339 # {model_name: field_names, ...}
342 CONCURRENCY_CHECK_FIELD = '__last_update'
344 def log(self, cr, uid, id, message, secondary=False, context=None):
345 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
347 def view_init(self, cr, uid, fields_list, context=None):
348 """Override this method to do specific things when a view on the object is opened."""
351 def _field_create(self, cr, context=None):
352 """ Create entries in ir_model_fields for all the model's fields.
354 If necessary, also create an entry in ir_model, and if called from the
355 modules loading scheme (by receiving 'module' in the context), also
356 create entries in ir_model_data (for the model and the fields).
358 - create an entry in ir_model (if there is not already one),
359 - create an entry in ir_model_data (if there is not already one, and if
360 'module' is in the context),
361 - update ir_model_fields with the fields found in _columns
362 (TODO there is some redundancy as _columns is updated from
363 ir_model_fields in __init__).
368 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
370 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
371 model_id = cr.fetchone()[0]
372 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
374 model_id = cr.fetchone()[0]
375 if 'module' in context:
376 name_id = 'model_'+self._name.replace('.', '_')
377 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
379 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
380 (name_id, context['module'], 'ir.model', model_id)
383 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
385 for rec in cr.dictfetchall():
386 cols[rec['name']] = rec
388 ir_model_fields_obj = self.pool.get('ir.model.fields')
390 # sparse field should be created at the end, as it depends on its serialized field already existing
391 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
392 for (k, f) in model_fields:
394 'model_id': model_id,
397 'field_description': f.string,
399 'relation': f._obj or '',
400 'select_level': tools.ustr(int(f.select)),
401 'readonly': (f.readonly and 1) or 0,
402 'required': (f.required and 1) or 0,
403 'selectable': (f.selectable and 1) or 0,
404 'translate': (f.translate and 1) or 0,
405 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
406 'serialization_field_id': None,
408 if getattr(f, 'serialization_field', None):
409 # resolve link to serialization_field if specified by name
410 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
411 if not serialization_field_id:
412 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
413 vals['serialization_field_id'] = serialization_field_id[0]
415 # When its a custom field,it does not contain f.select
416 if context.get('field_state', 'base') == 'manual':
417 if context.get('field_name', '') == k:
418 vals['select_level'] = context.get('select', '0')
419 #setting value to let the problem NOT occur next time
421 vals['select_level'] = cols[k]['select_level']
424 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
425 id = cr.fetchone()[0]
427 cr.execute("""INSERT INTO ir_model_fields (
428 id, model_id, model, name, field_description, ttype,
429 relation,state,select_level,relation_field, translate, serialization_field_id
431 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
433 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
434 vals['relation'], 'base',
435 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
437 if 'module' in context:
438 name1 = 'field_' + self._table + '_' + k
439 cr.execute("select name from ir_model_data where name=%s", (name1,))
441 name1 = name1 + "_" + str(id)
442 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
443 (name1, context['module'], 'ir.model.fields', id)
446 for key, val in vals.items():
447 if cols[k][key] != vals[key]:
448 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
449 cr.execute("""UPDATE ir_model_fields SET
450 model_id=%s, field_description=%s, ttype=%s, relation=%s,
451 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
453 model=%s AND name=%s""", (
454 vals['model_id'], vals['field_description'], vals['ttype'],
456 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
459 self.invalidate_cache(cr, SUPERUSER_ID)
462 def _add_field(cls, name, field):
463 """ Add the given `field` under the given `name` in the class """
464 # add field as an attribute and in cls._fields (for reflection)
465 if not isinstance(getattr(cls, name, field), Field):
466 _logger.warning("In model %r, field %r overriding existing value", cls._name, name)
467 setattr(cls, name, field)
468 cls._fields[name] = field
470 # basic setup of field
471 field.set_class_name(cls, name)
474 cls._columns[name] = field.to_column()
476 # remove potential column that may be overridden by field
477 cls._columns.pop(name, None)
480 def _pop_field(cls, name):
481 """ Remove the field with the given `name` from the model.
482 This method should only be used for manual fields.
484 field = cls._fields.pop(name)
485 cls._columns.pop(name, None)
486 cls._all_columns.pop(name, None)
487 if hasattr(cls, name):
492 def _add_magic_fields(cls):
493 """ Introduce magic fields on the current class
495 * id is a "normal" field (with a specific getter)
496 * create_uid, create_date, write_uid and write_date have become
498 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
499 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
500 to get the same structure as the previous
501 ``(now() at time zone 'UTC')::timestamp``::
503 # select (now() at time zone 'UTC')::timestamp;
505 ----------------------------
506 2013-06-18 08:30:37.292809
508 >>> str(datetime.datetime.utcnow())
509 '2013-06-18 08:31:32.821177'
511 def add(name, field):
512 """ add `field` with the given `name` if it does not exist yet """
513 if name not in cls._columns and name not in cls._fields:
514 cls._add_field(name, field)
519 # this field 'id' must override any other column or field
520 cls._add_field('id', fields.Id(automatic=True))
522 add('display_name', fields.Char(string='Display Name', automatic=True,
523 compute='_compute_display_name'))
526 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
527 add('create_date', fields.Datetime(string='Created on', automatic=True))
528 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
529 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
530 last_modified_name = 'compute_concurrency_field_with_access'
532 last_modified_name = 'compute_concurrency_field'
534 # this field must override any other column or field
535 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
536 string='Last Modified on', compute=last_modified_name, automatic=True))
539 def compute_concurrency_field(self):
540 self[self.CONCURRENCY_CHECK_FIELD] = \
541 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
544 @api.depends('create_date', 'write_date')
545 def compute_concurrency_field_with_access(self):
546 self[self.CONCURRENCY_CHECK_FIELD] = \
547 self.write_date or self.create_date or \
548 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
551 # Goal: try to apply inheritance at the instanciation level and
552 # put objects in the pool var
555 def _build_model(cls, pool, cr):
556 """ Instanciate a given model.
558 This class method instanciates the class of some model (i.e. a class
559 deriving from osv or osv_memory). The class might be the class passed
560 in argument or, if it inherits from another class, a class constructed
561 by combining the two classes.
565 # IMPORTANT: the registry contains an instance for each model. The class
566 # of each model carries inferred metadata that is shared among the
567 # model's instances for this registry, but not among registries. Hence
568 # we cannot use that "registry class" for combining model classes by
569 # inheritance, since it confuses the metadata inference process.
571 # Keep links to non-inherited constraints in cls; this is useful for
572 # instance when exporting translations
573 cls._local_constraints = cls.__dict__.get('_constraints', [])
574 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
576 # determine inherited models
577 parents = getattr(cls, '_inherit', [])
578 parents = [parents] if isinstance(parents, basestring) else (parents or [])
580 # determine the model's name
581 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
583 # determine the module that introduced the model
584 original_module = pool[name]._original_module if name in parents else cls._module
586 # build the class hierarchy for the model
587 for parent in parents:
588 if parent not in pool:
589 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
590 'You may need to add a dependency on the parent class\' module.' % (name, parent))
591 parent_model = pool[parent]
593 # do no use the class of parent_model, since that class contains
594 # inferred metadata; use its ancestor instead
595 parent_class = type(parent_model).__base__
597 # don't inherit custom fields
598 columns = dict((key, val)
599 for key, val in parent_class._columns.iteritems()
602 columns.update(cls._columns)
604 inherits = dict(parent_class._inherits)
605 inherits.update(cls._inherits)
607 depends = dict(parent_class._depends)
608 for m, fs in cls._depends.iteritems():
609 depends[m] = depends.get(m, []) + fs
611 old_constraints = parent_class._constraints
612 new_constraints = cls._constraints
613 # filter out from old_constraints the ones overridden by a
614 # constraint with the same function name in new_constraints
615 constraints = new_constraints + [oldc
616 for oldc in old_constraints
617 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
618 for newc in new_constraints)
621 sql_constraints = cls._sql_constraints + \
622 parent_class._sql_constraints
628 '_inherits': inherits,
630 '_constraints': constraints,
631 '_sql_constraints': sql_constraints,
633 cls = type(name, (cls, parent_class), attrs)
635 # introduce the "registry class" of the model;
636 # duplicate some attributes so that the ORM can modify them
640 '_columns': dict(cls._columns),
641 '_defaults': {}, # filled by Field._determine_default()
642 '_inherits': dict(cls._inherits),
643 '_depends': dict(cls._depends),
644 '_constraints': list(cls._constraints),
645 '_sql_constraints': list(cls._sql_constraints),
646 '_original_module': original_module,
648 cls = type(cls._name, (cls,), attrs)
650 # instantiate the model, and initialize it
651 model = object.__new__(cls)
652 model.__init__(pool, cr)
656 def _init_function_fields(cls, pool, cr):
657 # initialize the list of non-stored function fields for this model
658 pool._pure_function_fields[cls._name] = []
660 # process store of low-level function fields
661 for fname, column in cls._columns.iteritems():
662 if hasattr(column, 'digits_change'):
663 column.digits_change(cr)
664 # filter out existing store about this field
665 pool._store_function[cls._name] = [
667 for stored in pool._store_function.get(cls._name, [])
668 if (stored[0], stored[1]) != (cls._name, fname)
670 if not isinstance(column, fields.function):
673 # register it on the pool for invalidation
674 pool._pure_function_fields[cls._name].append(fname)
676 # process store parameter
679 get_ids = lambda self, cr, uid, ids, c={}: ids
680 store = {cls._name: (get_ids, None, column.priority, None)}
681 for model, spec in store.iteritems():
683 (fnct, fields2, order, length) = spec
685 (fnct, fields2, order) = spec
688 raise except_orm('Error',
689 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
690 pool._store_function.setdefault(model, [])
691 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
692 if t not in pool._store_function[model]:
693 pool._store_function[model].append(t)
694 pool._store_function[model].sort(key=lambda x: x[4])
697 def _init_manual_fields(cls, pool, cr):
698 # Check whether the query is already done
699 if pool.fields_by_model is not None:
700 manual_fields = pool.fields_by_model.get(cls._name, [])
702 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
703 manual_fields = cr.dictfetchall()
705 for field in manual_fields:
706 if field['name'] in cls._columns:
709 'string': field['field_description'],
710 'required': bool(field['required']),
711 'readonly': bool(field['readonly']),
712 'domain': eval(field['domain']) if field['domain'] else None,
713 'size': field['size'] or None,
714 'ondelete': field['on_delete'],
715 'translate': (field['translate']),
718 #'select': int(field['select_level'])
720 if field['serialization_field_id']:
721 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
722 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
723 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
724 attrs.update({'relation': field['relation']})
725 cls._columns[field['name']] = fields.sparse(**attrs)
726 elif field['ttype'] == 'selection':
727 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
728 elif field['ttype'] == 'reference':
729 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
730 elif field['ttype'] == 'many2one':
731 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
732 elif field['ttype'] == 'one2many':
733 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
734 elif field['ttype'] == 'many2many':
735 _rel1 = field['relation'].replace('.', '_')
736 _rel2 = field['model'].replace('.', '_')
737 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
738 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
740 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
743 def _init_constraints_onchanges(cls):
744 # store sql constraint error messages
745 for (key, _, msg) in cls._sql_constraints:
746 cls.pool._sql_error[cls._table + '_' + key] = msg
748 # collect constraint and onchange methods
749 cls._constraint_methods = []
750 cls._onchange_methods = defaultdict(list)
751 for attr, func in getmembers(cls, callable):
752 if hasattr(func, '_constrains'):
753 if not all(name in cls._fields for name in func._constrains):
754 _logger.warning("@constrains%r parameters must be field names", func._constrains)
755 cls._constraint_methods.append(func)
756 if hasattr(func, '_onchange'):
757 if not all(name in cls._fields for name in func._onchange):
758 _logger.warning("@onchange%r parameters must be field names", func._onchange)
759 for name in func._onchange:
760 cls._onchange_methods[name].append(func)
763 # In the past, this method was registering the model class in the server.
764 # This job is now done entirely by the metaclass MetaModel.
766 # Do not create an instance here. Model instances are created by method
770 def __init__(self, pool, cr):
771 """ Initialize a model and make it part of the given registry.
773 - copy the stored fields' functions in the registry,
774 - retrieve custom fields and add them in the model,
775 - ensure there is a many2one for each _inherits'd parent,
776 - update the children's _columns,
777 - give a chance to each field to initialize itself.
782 # link the class to the registry, and update the registry
784 cls._model = self # backward compatibility
785 pool.add(cls._name, self)
787 # determine description, table, sequence and log_access
788 if not cls._description:
789 cls._description = cls._name
791 cls._table = cls._name.replace('.', '_')
792 if not cls._sequence:
793 cls._sequence = cls._table + '_id_seq'
794 if not hasattr(cls, '_log_access'):
795 # If _log_access is not specified, it is the same value as _auto.
796 cls._log_access = cls._auto
799 if cls.is_transient():
800 cls._transient_check_count = 0
801 cls._transient_max_count = config.get('osv_memory_count_limit')
802 cls._transient_max_hours = config.get('osv_memory_age_limit')
803 assert cls._log_access, \
804 "TransientModels must have log_access turned on, " \
805 "in order to implement their access rights policy"
807 # retrieve new-style fields (from above registry class) and duplicate
808 # them (to avoid clashes with inheritance between different models)
810 above = cls.__bases__[0]
811 for attr, field in getmembers(above, Field.__instancecheck__):
812 if not field.inherited:
813 cls._add_field(attr, field.new())
815 # introduce magic fields
816 cls._add_magic_fields()
818 # register stuff about low-level function fields and custom fields
819 cls._init_function_fields(pool, cr)
820 cls._init_manual_fields(pool, cr)
823 cls._inherits_check()
824 cls._inherits_reload()
826 # register constraints and onchange methods
827 cls._init_constraints_onchanges()
830 for k in cls._defaults:
831 assert k in cls._fields, \
832 "Model %s has a default for non-existing field %s" % (cls._name, k)
835 for column in cls._columns.itervalues():
840 assert cls._rec_name in cls._fields, \
841 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
842 elif 'name' in cls._fields:
843 cls._rec_name = 'name'
845 # prepare ormcache, which must be shared by all instances of the model
850 def _is_an_ordinary_table(self):
851 self.env.cr.execute("""\
855 AND relkind = %s""", [self._table, 'r'])
856 return bool(self.env.cr.fetchone())
858 def __export_xml_id(self):
859 """ Return a valid xml_id for the record `self`. """
860 if not self._is_an_ordinary_table():
862 "You can not export the column ID of model %s, because the "
863 "table %s is not an ordinary table."
864 % (self._name, self._table))
865 ir_model_data = self.sudo().env['ir.model.data']
866 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
869 return '%s.%s' % (data[0].module, data[0].name)
874 name = '%s_%s' % (self._table, self.id)
875 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
877 name = '%s_%s_%s' % (self._table, self.id, postfix)
878 ir_model_data.create({
881 'module': '__export__',
884 return '__export__.' + name
887 def __export_rows(self, fields):
888 """ Export fields of the records in `self`.
890 :param fields: list of lists of fields to traverse
891 :return: list of lists of corresponding values
895 # main line of record, initially empty
896 current = [''] * len(fields)
897 lines.append(current)
899 # list of primary fields followed by secondary field(s)
902 # process column by column
903 for i, path in enumerate(fields):
908 if name in primary_done:
912 current[i] = str(record.id)
914 current[i] = record.__export_xml_id()
916 field = record._fields[name]
919 # this part could be simpler, but it has to be done this way
920 # in order to reproduce the former behavior
921 if not isinstance(value, BaseModel):
922 current[i] = field.convert_to_export(value, self.env)
924 primary_done.append(name)
926 # This is a special case, its strange behavior is intended!
927 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
928 xml_ids = [r.__export_xml_id() for r in value]
929 current[i] = ','.join(xml_ids) or False
932 # recursively export the fields that follow name
933 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
934 lines2 = value.__export_rows(fields2)
936 # merge first line with record's main line
937 for j, val in enumerate(lines2[0]):
940 # check value of current field
942 # assign xml_ids, and forget about remaining lines
943 xml_ids = [item[1] for item in value.name_get()]
944 current[i] = ','.join(xml_ids)
946 # append the other lines at the end
954 def export_data(self, fields_to_export, raw_data=False):
955 """ Export fields for selected objects
957 :param fields_to_export: list of fields
958 :param raw_data: True to return value in native Python type
959 :rtype: dictionary with a *datas* matrix
961 This method is used when exporting data via client menu
963 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
965 self = self.with_context(export_raw_data=True)
966 return {'datas': self.__export_rows(fields_to_export)}
968 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
971 Use :meth:`~load` instead
973 Import given data in given module
975 This method is used when importing data via client menu.
977 Example of fields to import for a sale.order::
980 partner_id, (=name_search)
981 order_line/.id, (=database_id)
983 order_line/product_id/id, (=xml id)
984 order_line/price_unit,
985 order_line/product_uom_qty,
986 order_line/product_uom/id (=xml_id)
988 This method returns a 4-tuple with the following structure::
990 (return_code, errored_resource, error_message, unused)
992 * The first item is a return code, it is ``-1`` in case of
993 import error, or the last imported row number in case of success
994 * The second item contains the record data dict that failed to import
995 in case of error, otherwise it's 0
996 * The third item contains an error message string in case of error,
998 * The last item is currently unused, with no specific semantics
1000 :param fields: list of fields to import
1001 :param datas: data to import
1002 :param mode: 'init' or 'update' for record creation
1003 :param current_module: module name
1004 :param noupdate: flag for record creation
1005 :param filename: optional file to store partial import state for recovery
1006 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1007 :rtype: (int, dict or 0, str or 0, str or 0)
1009 context = dict(context) if context is not None else {}
1010 context['_import_current_module'] = current_module
1012 fields = map(fix_import_export_id_paths, fields)
1013 ir_model_data_obj = self.pool.get('ir.model.data')
1016 if m['type'] == 'error':
1017 raise Exception(m['message'])
1019 if config.get('import_partial') and filename:
1020 with open(config.get('import_partial'), 'rb') as partial_import_file:
1021 data = pickle.load(partial_import_file)
1022 position = data.get(filename, 0)
1026 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1027 self._extract_records(cr, uid, fields, datas,
1028 context=context, log=log),
1029 context=context, log=log):
1030 ir_model_data_obj._update(cr, uid, self._name,
1031 current_module, res, mode=mode, xml_id=xml_id,
1032 noupdate=noupdate, res_id=res_id, context=context)
1033 position = info.get('rows', {}).get('to', 0) + 1
1034 if config.get('import_partial') and filename and (not (position%100)):
1035 with open(config.get('import_partial'), 'rb') as partial_import:
1036 data = pickle.load(partial_import)
1037 data[filename] = position
1038 with open(config.get('import_partial'), 'wb') as partial_import:
1039 pickle.dump(data, partial_import)
1040 if context.get('defer_parent_store_computation'):
1041 self._parent_store_compute(cr)
1043 except Exception, e:
1045 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1047 if context.get('defer_parent_store_computation'):
1048 self._parent_store_compute(cr)
1049 return position, 0, 0, 0
1051 def load(self, cr, uid, fields, data, context=None):
1053 Attempts to load the data matrix, and returns a list of ids (or
1054 ``False`` if there was an error and no id could be generated) and a
1057 The ids are those of the records created and saved (in database), in
1058 the same order they were extracted from the file. They can be passed
1059 directly to :meth:`~read`
1061 :param fields: list of fields to import, at the same index as the corresponding data
1062 :type fields: list(str)
1063 :param data: row-major matrix of data to import
1064 :type data: list(list(str))
1065 :param dict context:
1066 :returns: {ids: list(int)|False, messages: [Message]}
1068 cr.execute('SAVEPOINT model_load')
1071 fields = map(fix_import_export_id_paths, fields)
1072 ModelData = self.pool['ir.model.data'].clear_caches()
1074 fg = self.fields_get(cr, uid, context=context)
1081 for id, xid, record, info in self._convert_records(cr, uid,
1082 self._extract_records(cr, uid, fields, data,
1083 context=context, log=messages.append),
1084 context=context, log=messages.append):
1086 cr.execute('SAVEPOINT model_load_save')
1087 except psycopg2.InternalError, e:
1088 # broken transaction, exit and hope the source error was
1090 if not any(message['type'] == 'error' for message in messages):
1091 messages.append(dict(info, type='error',message=
1092 u"Unknown database error: '%s'" % e))
1095 ids.append(ModelData._update(cr, uid, self._name,
1096 current_module, record, mode=mode, xml_id=xid,
1097 noupdate=noupdate, res_id=id, context=context))
1098 cr.execute('RELEASE SAVEPOINT model_load_save')
1099 except psycopg2.Warning, e:
1100 messages.append(dict(info, type='warning', message=str(e)))
1101 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1102 except psycopg2.Error, e:
1103 messages.append(dict(
1105 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1106 # Failed to write, log to messages, rollback savepoint (to
1107 # avoid broken transaction) and keep going
1108 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1109 except Exception, e:
1110 message = (_('Unknown error during import:') +
1111 ' %s: %s' % (type(e), unicode(e)))
1112 moreinfo = _('Resolve other errors first')
1113 messages.append(dict(info, type='error',
1116 # Failed for some reason, perhaps due to invalid data supplied,
1117 # rollback savepoint and keep going
1118 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1119 if any(message['type'] == 'error' for message in messages):
1120 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1122 return {'ids': ids, 'messages': messages}
1124 def _extract_records(self, cr, uid, fields_, data,
1125 context=None, log=lambda a: None):
1126 """ Generates record dicts from the data sequence.
1128 The result is a generator of dicts mapping field names to raw
1129 (unconverted, unvalidated) values.
1131 For relational fields, if sub-fields were provided the value will be
1132 a list of sub-records
1134 The following sub-fields may be set on the record (by key):
1135 * None is the name_get for the record (to use with name_create/name_search)
1136 * "id" is the External ID for the record
1137 * ".id" is the Database ID for the record
1139 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1140 # Fake columns to avoid special cases in extractor
1141 columns[None] = fields.char('rec_name')
1142 columns['id'] = fields.char('External ID')
1143 columns['.id'] = fields.integer('Database ID')
1145 # m2o fields can't be on multiple lines so exclude them from the
1146 # is_relational field rows filter, but special-case it later on to
1147 # be handled with relational fields (as it can have subfields)
1148 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1149 get_o2m_values = itemgetter_tuple(
1150 [index for index, field in enumerate(fields_)
1151 if columns[field[0]]._type == 'one2many'])
1152 get_nono2m_values = itemgetter_tuple(
1153 [index for index, field in enumerate(fields_)
1154 if columns[field[0]]._type != 'one2many'])
1155 # Checks if the provided row has any non-empty non-relational field
1156 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1157 return any(g(row)) and not any(f(row))
1161 if index >= len(data): return
1164 # copy non-relational fields to record dict
1165 record = dict((field[0], value)
1166 for field, value in itertools.izip(fields_, row)
1167 if not is_relational(field[0]))
1169 # Get all following rows which have relational values attached to
1170 # the current record (no non-relational values)
1171 record_span = itertools.takewhile(
1172 only_o2m_values, itertools.islice(data, index + 1, None))
1173 # stitch record row back on for relational fields
1174 record_span = list(itertools.chain([row], record_span))
1175 for relfield in set(
1176 field[0] for field in fields_
1177 if is_relational(field[0])):
1178 column = columns[relfield]
1179 # FIXME: how to not use _obj without relying on fields_get?
1180 Model = self.pool[column._obj]
1182 # get only cells for this sub-field, should be strictly
1183 # non-empty, field path [None] is for name_get column
1184 indices, subfields = zip(*((index, field[1:] or [None])
1185 for index, field in enumerate(fields_)
1186 if field[0] == relfield))
1188 # return all rows which have at least one value for the
1189 # subfields of relfield
1190 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1191 record[relfield] = [subrecord
1192 for subrecord, _subinfo in Model._extract_records(
1193 cr, uid, subfields, relfield_data,
1194 context=context, log=log)]
1196 yield record, {'rows': {
1198 'to': index + len(record_span) - 1
1200 index += len(record_span)
1202 def _convert_records(self, cr, uid, records,
1203 context=None, log=lambda a: None):
1204 """ Converts records from the source iterable (recursive dicts of
1205 strings) into forms which can be written to the database (via
1206 self.create or (ir.model.data)._update)
1208 :returns: a list of triplets of (id, xid, record)
1209 :rtype: list((int|None, str|None, dict))
1211 if context is None: context = {}
1212 Converter = self.pool['ir.fields.converter']
1213 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1214 Translation = self.pool['ir.translation']
1216 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1217 context.get('lang'))
1219 for f, column in columns.iteritems())
1221 convert = Converter.for_model(cr, uid, self, context=context)
1223 def _log(base, field, exception):
1224 type = 'warning' if isinstance(exception, Warning) else 'error'
1225 # logs the logical (not human-readable) field name for automated
1226 # processing of response, but injects human readable in message
1227 record = dict(base, type=type, field=field,
1228 message=unicode(exception.args[0]) % base)
1229 if len(exception.args) > 1 and exception.args[1]:
1230 record.update(exception.args[1])
1233 stream = CountingStream(records)
1234 for record, extras in stream:
1237 # name_get/name_create
1238 if None in record: pass
1245 dbid = int(record['.id'])
1247 # in case of overridden id column
1248 dbid = record['.id']
1249 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1252 record=stream.index,
1254 message=_(u"Unknown database identifier '%s'") % dbid))
1257 converted = convert(record, lambda field, err:\
1258 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1260 yield dbid, xid, converted, dict(extras, record=stream.index)
1263 def _validate_fields(self, field_names):
1264 field_names = set(field_names)
1266 # old-style constraint methods
1267 trans = self.env['ir.translation']
1268 cr, uid, context = self.env.args
1271 for fun, msg, names in self._constraints:
1273 # validation must be context-independent; call `fun` without context
1274 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1276 except Exception, e:
1277 _logger.debug('Exception while validating constraint', exc_info=True)
1279 extra_error = tools.ustr(e)
1282 res_msg = msg(self._model, cr, uid, ids, context=context)
1283 if isinstance(res_msg, tuple):
1284 template, params = res_msg
1285 res_msg = template % params
1287 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1289 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1291 _("Field(s) `%s` failed against a constraint: %s") %
1292 (', '.join(names), res_msg)
1295 raise ValidationError('\n'.join(errors))
1297 # new-style constraint methods
1298 for check in self._constraint_methods:
1299 if set(check._constrains) & field_names:
1302 except ValidationError, e:
1304 except Exception, e:
1305 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1308 def default_get(self, fields_list):
1309 """ default_get(fields) -> default_values
1311 Return default values for the fields in `fields_list`. Default
1312 values are determined by the context, user defaults, and the model
1315 :param fields_list: a list of field names
1316 :return: a dictionary mapping each field name to its corresponding
1317 default value, if it has one.
1320 # trigger view init hook
1321 self.view_init(fields_list)
1324 parent_fields = defaultdict(list)
1326 for name in fields_list:
1327 # 1. look up context
1328 key = 'default_' + name
1329 if key in self._context:
1330 defaults[name] = self._context[key]
1333 # 2. look up ir_values
1334 # Note: performance is good, because get_defaults_dict is cached!
1335 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1336 if name in ir_values_dict:
1337 defaults[name] = ir_values_dict[name]
1340 field = self._fields.get(name)
1342 # 3. look up property fields
1343 # TODO: get rid of this one
1344 if field and field.company_dependent:
1345 defaults[name] = self.env['ir.property'].get(name, self._name)
1348 # 4. look up field.default
1349 if field and field.default:
1350 defaults[name] = field.default(self)
1353 # 5. delegate to parent model
1354 if field and field.inherited:
1355 field = field.related_field
1356 parent_fields[field.model_name].append(field.name)
1358 # convert default values to the right format
1359 defaults = self._convert_to_cache(defaults, validate=False)
1360 defaults = self._convert_to_write(defaults)
1362 # add default values for inherited fields
1363 for model, names in parent_fields.iteritems():
1364 defaults.update(self.env[model].default_get(names))
1368 def fields_get_keys(self, cr, user, context=None):
1369 res = self._columns.keys()
1370 # TODO I believe this loop can be replace by
1371 # res.extend(self._inherit_fields.key())
1372 for parent in self._inherits:
1373 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1376 def _rec_name_fallback(self, cr, uid, context=None):
1377 rec_name = self._rec_name
1378 if rec_name not in self._columns:
1379 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1383 # Overload this method if you need a window title which depends on the context
1385 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1388 def user_has_groups(self, cr, uid, groups, context=None):
1389 """Return true if the user is at least member of one of the groups
1390 in groups_str. Typically used to resolve `groups` attribute
1391 in view and model definitions.
1393 :param str groups: comma-separated list of fully-qualified group
1394 external IDs, e.g.: ``base.group_user,base.group_system``
1395 :return: True if the current user is a member of one of the
1398 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1399 for group_ext_id in groups.split(','))
1401 def _get_default_form_view(self, cr, user, context=None):
1402 """ Generates a default single-line form view using all fields
1403 of the current model except the m2m and o2m ones.
1405 :param cr: database cursor
1406 :param int user: user id
1407 :param dict context: connection context
1408 :returns: a form view as an lxml document
1409 :rtype: etree._Element
1411 view = etree.Element('form', string=self._description)
1412 group = etree.SubElement(view, 'group', col="4")
1413 for fname, field in self._fields.iteritems():
1414 if field.automatic or field.type in ('one2many', 'many2many'):
1417 etree.SubElement(group, 'field', name=fname)
1418 if field.type == 'text':
1419 etree.SubElement(group, 'newline')
1422 def _get_default_search_view(self, cr, user, context=None):
1423 """ Generates a single-field search view, based on _rec_name.
1425 :param cr: database cursor
1426 :param int user: user id
1427 :param dict context: connection context
1428 :returns: a tree view as an lxml document
1429 :rtype: etree._Element
1431 view = etree.Element('search', string=self._description)
1432 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1435 def _get_default_tree_view(self, cr, user, context=None):
1436 """ Generates a single-field tree view, based on _rec_name.
1438 :param cr: database cursor
1439 :param int user: user id
1440 :param dict context: connection context
1441 :returns: a tree view as an lxml document
1442 :rtype: etree._Element
1444 view = etree.Element('tree', string=self._description)
1445 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1448 def _get_default_calendar_view(self, cr, user, context=None):
1449 """ Generates a default calendar view by trying to infer
1450 calendar fields from a number of pre-set attribute names
1452 :param cr: database cursor
1453 :param int user: user id
1454 :param dict context: connection context
1455 :returns: a calendar view
1456 :rtype: etree._Element
1458 def set_first_of(seq, in_, to):
1459 """Sets the first value of `seq` also found in `in_` to
1460 the `to` attribute of the view being closed over.
1462 Returns whether it's found a suitable value (and set it on
1463 the attribute) or not
1471 view = etree.Element('calendar', string=self._description)
1472 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1474 if self._date_name not in self._columns:
1476 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1477 if dt in self._columns:
1478 self._date_name = dt
1483 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1484 view.set('date_start', self._date_name)
1486 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1487 self._columns, 'color')
1489 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1490 self._columns, 'date_stop'):
1491 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1492 self._columns, 'date_delay'):
1494 _('Invalid Object Architecture!'),
1495 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1499 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1500 """ fields_view_get([view_id | view_type='form'])
1502 Get the detailed composition of the requested view like fields, model, view architecture
1504 :param view_id: id of the view or None
1505 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1506 :param toolbar: true to include contextual actions
1507 :param submenu: deprecated
1508 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1509 :raise AttributeError:
1510 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1511 * if some tag other than 'position' is found in parent view
1512 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1516 View = self.pool['ir.ui.view']
1519 'model': self._name,
1520 'field_parent': False,
1523 # try to find a view_id if none provided
1525 # <view_type>_view_ref in context can be used to overrride the default view
1526 view_ref_key = view_type + '_view_ref'
1527 view_ref = context.get(view_ref_key)
1530 module, view_ref = view_ref.split('.', 1)
1531 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1532 view_ref_res = cr.fetchone()
1534 view_id = view_ref_res[0]
1536 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1537 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1541 # otherwise try to find the lowest priority matching ir.ui.view
1542 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1544 # context for post-processing might be overriden
1547 # read the view with inherited views applied
1548 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1549 result['arch'] = root_view['arch']
1550 result['name'] = root_view['name']
1551 result['type'] = root_view['type']
1552 result['view_id'] = root_view['id']
1553 result['field_parent'] = root_view['field_parent']
1554 # override context fro postprocessing
1555 if root_view.get('model') != self._name:
1556 ctx = dict(context, base_model_name=root_view.get('model'))
1558 # fallback on default views methods if no ir.ui.view could be found
1560 get_func = getattr(self, '_get_default_%s_view' % view_type)
1561 arch_etree = get_func(cr, uid, context)
1562 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1563 result['type'] = view_type
1564 result['name'] = 'default'
1565 except AttributeError:
1566 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1568 # Apply post processing, groups and modifiers etc...
1569 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1570 result['arch'] = xarch
1571 result['fields'] = xfields
1573 # Add related action information if aksed
1575 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1581 ir_values_obj = self.pool.get('ir.values')
1582 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1583 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1584 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1585 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1586 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1587 #When multi="True" set it will display only in More of the list view
1588 resrelate = [clean(action) for action in resrelate
1589 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1591 for x in itertools.chain(resprint, resaction, resrelate):
1592 x['string'] = x['name']
1594 result['toolbar'] = {
1596 'action': resaction,
1601 def get_formview_id(self, cr, uid, id, context=None):
1602 """ Return an view id to open the document with. This method is meant to be
1603 overridden in addons that want to give specific view ids for example.
1605 :param int id: id of the document to open
1609 def get_formview_action(self, cr, uid, id, context=None):
1610 """ Return an action to open the document. This method is meant to be
1611 overridden in addons that want to give specific view ids for example.
1613 :param int id: id of the document to open
1615 view_id = self.get_formview_id(cr, uid, id, context=context)
1617 'type': 'ir.actions.act_window',
1618 'res_model': self._name,
1619 'view_type': 'form',
1620 'view_mode': 'form',
1621 'views': [(view_id, 'form')],
1622 'target': 'current',
1626 def get_access_action(self, cr, uid, id, context=None):
1627 """ Return an action to open the document. This method is meant to be
1628 overridden in addons that want to give specific access to the document.
1629 By default it opens the formview of the document.
1631 :paramt int id: id of the document to open
1633 return self.get_formview_action(cr, uid, id, context=context)
1635 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1636 return self.pool['ir.ui.view'].postprocess_and_fields(
1637 cr, uid, self._name, node, view_id, context=context)
1639 def search_count(self, cr, user, args, context=None):
1640 """ search_count(args) -> int
1642 Returns the number of records in the current model matching :ref:`the
1643 provided domain <reference/orm/domains>`.
1645 res = self.search(cr, user, args, context=context, count=True)
1646 if isinstance(res, list):
1650 @api.returns('self')
1651 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1652 """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1654 Searches for records based on the ``args``
1655 :ref:`search domain <reference/orm/domains>`.
1657 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1658 list to match all records.
1659 :param int offset: number of results to ignore (default: none)
1660 :param int limit: maximum number of records to return (default: all)
1661 :param str order: sort string
1662 :param bool count: if ``True``, the call should return the number of
1663 records matching ``args`` rather than the records
1665 :returns: at most ``limit`` records matching the search criteria
1667 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1669 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1672 # display_name, name_get, name_create, name_search
1675 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1676 def _compute_display_name(self):
1677 names = dict(self.name_get())
1679 record.display_name = names.get(record.id, False)
1683 """ name_get() -> [(id, name), ...]
1685 Returns a textual representation for the records in ``self``.
1686 By default this is the value of the ``display_name`` field.
1688 :return: list of pairs ``(id, text_repr)`` for each records
1692 name = self._rec_name
1693 if name in self._fields:
1694 convert = self._fields[name].convert_to_display_name
1696 result.append((record.id, convert(record[name])))
1699 result.append((record.id, "%s,%s" % (record._name, record.id)))
1704 def name_create(self, name):
1705 """ name_create(name) -> record
1707 Create a new record by calling :meth:`~.create` with only one value
1708 provided: the display name of the new record.
1710 The new record will be initialized with any default values
1711 applicable to this model, or provided through the context. The usual
1712 behavior of :meth:`~.create` applies.
1714 :param name: display name of the record to create
1716 :return: the :meth:`~.name_get` pair value of the created record
1719 record = self.create({self._rec_name: name})
1720 return record.name_get()[0]
1722 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1726 def name_search(self, name='', args=None, operator='ilike', limit=100):
1727 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1729 Search for records that have a display name matching the given
1730 `name` pattern when compared with the given `operator`, while also
1731 matching the optional search domain (`args`).
1733 This is used for example to provide suggestions based on a partial
1734 value for a relational field. Sometimes be seen as the inverse
1735 function of :meth:`~.name_get`, but it is not guaranteed to be.
1737 This method is equivalent to calling :meth:`~.search` with a search
1738 domain based on ``display_name`` and then :meth:`~.name_get` on the
1739 result of the search.
1741 :param str name: the name pattern to match
1742 :param list args: optional search domain (see :meth:`~.search` for
1743 syntax), specifying further restrictions
1744 :param str operator: domain operator for matching `name`, such as
1745 ``'like'`` or ``'='``.
1746 :param int limit: optional max number of records to return
1748 :return: list of pairs ``(id, text_repr)`` for all matching records.
1750 return self._name_search(name, args, operator, limit=limit)
1752 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1753 # private implementation of name_search, allows passing a dedicated user
1754 # for the name_get part to solve some access rights issues
1755 args = list(args or [])
1756 # optimize out the default criterion of ``ilike ''`` that matches everything
1757 if not self._rec_name:
1758 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1759 elif not (name == '' and operator == 'ilike'):
1760 args += [(self._rec_name, operator, name)]
1761 access_rights_uid = name_get_uid or user
1762 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1763 res = self.name_get(cr, access_rights_uid, ids, context)
1766 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1769 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1771 fields = self._columns.keys() + self._inherit_fields.keys()
1772 #FIXME: collect all calls to _get_source into one SQL call.
1774 res[lang] = {'code': lang}
1776 if f in self._columns:
1777 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1779 res[lang][f] = res_trans
1781 res[lang][f] = self._columns[f].string
1782 for table in self._inherits:
1783 cols = intersect(self._inherit_fields.keys(), fields)
1784 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1787 res[lang]['code'] = lang
1788 for f in res2[lang]:
1789 res[lang][f] = res2[lang][f]
1792 def write_string(self, cr, uid, id, langs, vals, context=None):
1793 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1794 #FIXME: try to only call the translation in one SQL
1797 if field in self._columns:
1798 src = self._columns[field].string
1799 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1800 for table in self._inherits:
1801 cols = intersect(self._inherit_fields.keys(), vals)
1803 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1806 def _add_missing_default_values(self, cr, uid, values, context=None):
1807 # avoid overriding inherited values when parent is set
1809 for tables, parent_field in self._inherits.items():
1810 if parent_field in values:
1811 avoid_tables.append(tables)
1813 # compute missing fields
1814 missing_defaults = set()
1815 for field in self._columns.keys():
1816 if not field in values:
1817 missing_defaults.add(field)
1818 for field in self._inherit_fields.keys():
1819 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1820 missing_defaults.add(field)
1821 # discard magic fields
1822 missing_defaults -= set(MAGIC_COLUMNS)
1824 if missing_defaults:
1825 # override defaults with the provided values, never allow the other way around
1826 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1828 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1829 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1830 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1831 defaults[dv] = [(6, 0, defaults[dv])]
1832 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1833 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1834 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1835 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1836 defaults.update(values)
1840 def clear_caches(self):
1841 """ Clear the caches
1843 This clears the caches associated to methods decorated with
1844 ``tools.ormcache`` or ``tools.ormcache_multi``.
1847 self._ormcache.clear()
1848 self.pool._any_cache_cleared = True
1849 except AttributeError:
1853 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1854 aggregated_fields, count_field,
1855 read_group_result, read_group_order=None, context=None):
1856 """Helper method for filling in empty groups for all possible values of
1857 the field being grouped by"""
1859 # self._group_by_full should map groupable fields to a method that returns
1860 # a list of all aggregated values that we want to display for this field,
1861 # in the form of a m2o-like pair (key,label).
1862 # This is useful to implement kanban views for instance, where all columns
1863 # should be displayed even if they don't contain any record.
1865 # Grab the list of all groups that should be displayed, including all present groups
1866 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1867 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1868 read_group_order=read_group_order,
1869 access_rights_uid=openerp.SUPERUSER_ID,
1872 result_template = dict.fromkeys(aggregated_fields, False)
1873 result_template[groupby + '_count'] = 0
1874 if remaining_groupbys:
1875 result_template['__context'] = {'group_by': remaining_groupbys}
1877 # Merge the left_side (current results as dicts) with the right_side (all
1878 # possible values as m2o pairs). Both lists are supposed to be using the
1879 # same ordering, and can be merged in one pass.
1882 def append_left(left_side):
1883 grouped_value = left_side[groupby] and left_side[groupby][0]
1884 if not grouped_value in known_values:
1885 result.append(left_side)
1886 known_values[grouped_value] = left_side
1888 known_values[grouped_value].update({count_field: left_side[count_field]})
1889 def append_right(right_side):
1890 grouped_value = right_side[0]
1891 if not grouped_value in known_values:
1892 line = dict(result_template)
1893 line[groupby] = right_side
1894 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1896 known_values[grouped_value] = line
1897 while read_group_result or all_groups:
1898 left_side = read_group_result[0] if read_group_result else None
1899 right_side = all_groups[0] if all_groups else None
1900 assert left_side is None or left_side[groupby] is False \
1901 or isinstance(left_side[groupby], (tuple,list)), \
1902 'M2O-like pair expected, got %r' % left_side[groupby]
1903 assert right_side is None or isinstance(right_side, (tuple,list)), \
1904 'M2O-like pair expected, got %r' % right_side
1905 if left_side is None:
1906 append_right(all_groups.pop(0))
1907 elif right_side is None:
1908 append_left(read_group_result.pop(0))
1909 elif left_side[groupby] == right_side:
1910 append_left(read_group_result.pop(0))
1911 all_groups.pop(0) # discard right_side
1912 elif not left_side[groupby] or not left_side[groupby][0]:
1913 # left side == "Undefined" entry, not present on right_side
1914 append_left(read_group_result.pop(0))
1916 append_right(all_groups.pop(0))
1920 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1923 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1925 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1926 to the query if order should be computed against m2o field.
1927 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1928 :param aggregated_fields: list of aggregated fields in the query
1929 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1930 These dictionaries contains the qualified name of each groupby
1931 (fully qualified SQL name for the corresponding field),
1932 and the (non raw) field name.
1933 :param osv.Query query: the query under construction
1934 :return: (groupby_terms, orderby_terms)
1937 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1938 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1940 return groupby_terms, orderby_terms
1942 self._check_qorder(orderby)
1943 for order_part in orderby.split(','):
1944 order_split = order_part.split()
1945 order_field = order_split[0]
1946 if order_field in groupby_fields:
1948 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1949 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1951 orderby_terms.append(order_clause)
1952 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1954 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1955 orderby_terms.append(order)
1956 elif order_field in aggregated_fields:
1957 orderby_terms.append(order_part)
1959 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1960 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1961 self._name, order_part)
1962 return groupby_terms, orderby_terms
1964 def _read_group_process_groupby(self, gb, query, context):
1966 Helper method to collect important information about groupbys: raw
1967 field name, type, time informations, qualified name, ...
1969 split = gb.split(':')
1970 field_type = self._all_columns[split[0]].column._type
1971 gb_function = split[1] if len(split) == 2 else None
1972 temporal = field_type in ('date', 'datetime')
1973 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1974 qualified_field = self._inherits_join_calc(split[0], query)
1977 # Careful with week/year formats:
1978 # - yyyy (lower) must always be used, *except* for week+year formats
1979 # - YYYY (upper) must always be used for week+year format
1980 # e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
1981 # and W1 2006 for others
1983 # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
1984 # such as 2006-01-01 being formatted as "January 2005" in some locales.
1985 # Cfr: http://babel.pocoo.org/docs/dates/#date-fields
1986 'day': 'dd MMM yyyy', # yyyy = normal year
1987 'week': "'W'w YYYY", # w YYYY = ISO week-year
1988 'month': 'MMMM yyyy',
1989 'quarter': 'QQQ yyyy',
1993 'day': dateutil.relativedelta.relativedelta(days=1),
1994 'week': datetime.timedelta(days=7),
1995 'month': dateutil.relativedelta.relativedelta(months=1),
1996 'quarter': dateutil.relativedelta.relativedelta(months=3),
1997 'year': dateutil.relativedelta.relativedelta(years=1)
2000 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
2001 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
2002 if field_type == 'boolean':
2003 qualified_field = "coalesce(%s,false)" % qualified_field
2008 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2009 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2010 'tz_convert': tz_convert,
2011 'qualified_field': qualified_field
2014 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2016 Helper method to sanitize the data received by read_group. The None
2017 values are converted to False, and the date/datetime are formatted,
2018 and corrected according to the timezones.
2020 value = False if value is None else value
2021 gb = groupby_dict.get(key)
2022 if gb and gb['type'] in ('date', 'datetime') and value:
2023 if isinstance(value, basestring):
2024 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2025 value = datetime.datetime.strptime(value, dt_format)
2026 if gb['tz_convert']:
2027 value = pytz.timezone(context['tz']).localize(value)
2030 def _read_group_get_domain(self, groupby, value):
2032 Helper method to construct the domain corresponding to a groupby and
2033 a given value. This is mostly relevant for date/datetime.
2035 if groupby['type'] in ('date', 'datetime') and value:
2036 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2037 domain_dt_begin = value
2038 domain_dt_end = value + groupby['interval']
2039 if groupby['tz_convert']:
2040 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2041 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2042 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2043 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2044 if groupby['type'] == 'many2one' and value:
2046 return [(groupby['field'], '=', value)]
2048 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2050 Helper method to format the data contained in the dictianary data by
2051 adding the domain corresponding to its values, the groupbys in the
2052 context and by properly formatting the date/datetime values.
2054 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2055 for k,v in data.iteritems():
2056 gb = groupby_dict.get(k)
2057 if gb and gb['type'] in ('date', 'datetime') and v:
2058 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2060 data['__domain'] = domain_group + domain
2061 if len(groupby) - len(annotated_groupbys) >= 1:
2062 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2066 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2068 Get the list of records in list view grouped by the given ``groupby`` fields
2070 :param cr: database cursor
2071 :param uid: current user id
2072 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2073 :param list fields: list of fields present in the list view specified on the object
2074 :param list groupby: list of groupby descriptions by which the records will be grouped.
2075 A groupby description is either a field (then it will be grouped by that field)
2076 or a string 'field:groupby_function'. Right now, the only functions supported
2077 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2078 date/datetime fields.
2079 :param int offset: optional number of records to skip
2080 :param int limit: optional max number of records to return
2081 :param dict context: context arguments, like lang, time zone.
2082 :param list orderby: optional ``order by`` specification, for
2083 overriding the natural sort ordering of the
2084 groups, see also :py:meth:`~osv.osv.osv.search`
2085 (supported only for many2one fields currently)
2086 :param bool lazy: if true, the results are only grouped by the first groupby and the
2087 remaining groupbys are put in the __context key. If false, all the groupbys are
2089 :return: list of dictionaries(one dictionary for each record) containing:
2091 * the values of fields grouped by the fields in ``groupby`` argument
2092 * __domain: list of tuples specifying the search criteria
2093 * __context: dictionary with argument like ``groupby``
2094 :rtype: [{'field_name_1': value, ...]
2095 :raise AccessError: * if user has no read rights on the requested object
2096 * if user tries to bypass access rules for read on the requested object
2100 self.check_access_rights(cr, uid, 'read')
2101 query = self._where_calc(cr, uid, domain, context=context)
2102 fields = fields or self._columns.keys()
2104 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2105 groupby_list = groupby[:1] if lazy else groupby
2106 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2107 for gb in groupby_list]
2108 groupby_fields = [g['field'] for g in annotated_groupbys]
2109 order = orderby or ','.join([g for g in groupby_list])
2110 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2112 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2113 for gb in groupby_fields:
2114 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2115 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2116 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2117 if not (gb in self._all_columns):
2118 # Don't allow arbitrary values, as this would be a SQL injection vector!
2119 raise except_orm(_('Invalid group_by'),
2120 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2122 aggregated_fields = [
2124 if f not in ('id', 'sequence')
2125 if f not in groupby_fields
2126 if f in self._all_columns
2127 if self._all_columns[f].column._type in ('integer', 'float')
2128 if getattr(self._all_columns[f].column, '_classic_write')]
2130 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2131 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2133 for gb in annotated_groupbys:
2134 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2136 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2137 from_clause, where_clause, where_clause_params = query.get_sql()
2138 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2139 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2142 count_field += '_count'
2144 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2145 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2148 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2156 'table': self._table,
2157 'count_field': count_field,
2158 'extra_fields': prefix_terms(',', select_terms),
2159 'from': from_clause,
2160 'where': prefix_term('WHERE', where_clause),
2161 'groupby': prefix_terms('GROUP BY', groupby_terms),
2162 'orderby': prefix_terms('ORDER BY', orderby_terms),
2163 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2164 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2166 cr.execute(query, where_clause_params)
2167 fetched_data = cr.dictfetchall()
2169 if not groupby_fields:
2172 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2174 data_ids = [r['id'] for r in fetched_data]
2175 many2onefields = list(set(many2onefields))
2176 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2177 for d in fetched_data:
2178 d.update(data_dict[d['id']])
2180 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2181 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2182 if lazy and groupby_fields[0] in self._group_by_full:
2183 # Right now, read_group only fill results in lazy mode (by default).
2184 # If you need to have the empty groups in 'eager' mode, then the
2185 # method _read_group_fill_results need to be completely reimplemented
2187 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2188 aggregated_fields, count_field, result, read_group_order=order,
2192 def _inherits_join_add(self, current_model, parent_model_name, query):
2194 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2195 :param current_model: current model object
2196 :param parent_model_name: name of the parent model for which the clauses should be added
2197 :param query: query object on which the JOIN should be added
2199 inherits_field = current_model._inherits[parent_model_name]
2200 parent_model = self.pool[parent_model_name]
2201 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2204 def _inherits_join_calc(self, field, query):
2206 Adds missing table select and join clause(s) to ``query`` for reaching
2207 the field coming from an '_inherits' parent table (no duplicates).
2209 :param field: name of inherited field to reach
2210 :param query: query object on which the JOIN should be added
2211 :return: qualified name of field, to be used in SELECT clause
2213 current_table = self
2214 parent_alias = '"%s"' % current_table._table
2215 while field in current_table._inherit_fields and not field in current_table._columns:
2216 parent_model_name = current_table._inherit_fields[field][0]
2217 parent_table = self.pool[parent_model_name]
2218 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2219 current_table = parent_table
2220 return '%s."%s"' % (parent_alias, field)
2222 def _parent_store_compute(self, cr):
2223 if not self._parent_store:
2225 _logger.info('Computing parent left and right for table %s...', self._table)
2226 def browse_rec(root, pos=0):
2228 where = self._parent_name+'='+str(root)
2230 where = self._parent_name+' IS NULL'
2231 if self._parent_order:
2232 where += ' order by '+self._parent_order
2233 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2235 for id in cr.fetchall():
2236 pos2 = browse_rec(id[0], pos2)
2237 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2239 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2240 if self._parent_order:
2241 query += ' order by ' + self._parent_order
2244 for (root,) in cr.fetchall():
2245 pos = browse_rec(root, pos)
2246 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2249 def _update_store(self, cr, f, k):
2250 _logger.info("storing computed values of fields.function '%s'", k)
2251 ss = self._columns[k]._symbol_set
2252 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2253 cr.execute('select id from '+self._table)
2254 ids_lst = map(lambda x: x[0], cr.fetchall())
2256 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2257 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2258 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2259 for key, val in res.items():
2262 # if val is a many2one, just write the ID
2263 if type(val) == tuple:
2265 if val is not False:
2266 cr.execute(update_query, (ss[1](val), key))
2269 def _check_selection_field_value(self, field, value):
2270 """ Check whether value is among the valid values for the given
2271 selection/reference field, and raise an exception if not.
2273 field = self._fields[field]
2274 field.convert_to_cache(value, self)
2276 def _check_removed_columns(self, cr, log=False):
2277 # iterate on the database columns to drop the NOT NULL constraints
2278 # of fields which were required but have been removed (or will be added by another module)
2279 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2280 columns += MAGIC_COLUMNS
2281 cr.execute("SELECT a.attname, a.attnotnull"
2282 " FROM pg_class c, pg_attribute a"
2283 " WHERE c.relname=%s"
2284 " AND c.oid=a.attrelid"
2285 " AND a.attisdropped=%s"
2286 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2287 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2289 for column in cr.dictfetchall():
2291 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2292 column['attname'], self._table, self._name)
2293 if column['attnotnull']:
2294 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2295 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2296 self._table, column['attname'])
2298 def _save_constraint(self, cr, constraint_name, type, definition):
2300 Record the creation of a constraint for this model, to make it possible
2301 to delete it later when the module is uninstalled. Type can be either
2302 'f' or 'u' depending on the constraint being a foreign key or not.
2304 if not self._module:
2305 # no need to save constraints for custom models as they're not part
2308 assert type in ('f', 'u')
2310 SELECT type, definition FROM ir_model_constraint, ir_module_module
2311 WHERE ir_model_constraint.module=ir_module_module.id
2312 AND ir_model_constraint.name=%s
2313 AND ir_module_module.name=%s
2314 """, (constraint_name, self._module))
2315 constraints = cr.dictfetchone()
2318 INSERT INTO ir_model_constraint
2319 (name, date_init, date_update, module, model, type, definition)
2320 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2321 (SELECT id FROM ir_module_module WHERE name=%s),
2322 (SELECT id FROM ir_model WHERE model=%s), %s, %s)""",
2323 (constraint_name, self._module, self._name, type, definition))
2324 elif constraints['type'] != type or (definition and constraints['definition'] != definition):
2326 UPDATE ir_model_constraint
2327 SET date_update=now() AT TIME ZONE 'UTC', type=%s, definition=%s
2328 WHERE name=%s AND module = (SELECT id FROM ir_module_module WHERE name=%s)""",
2329 (type, definition, constraint_name, self._module))
2331 def _save_relation_table(self, cr, relation_table):
2333 Record the creation of a many2many for this model, to make it possible
2334 to delete it later when the module is uninstalled.
2337 SELECT 1 FROM ir_model_relation, ir_module_module
2338 WHERE ir_model_relation.module=ir_module_module.id
2339 AND ir_model_relation.name=%s
2340 AND ir_module_module.name=%s
2341 """, (relation_table, self._module))
2343 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2344 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2345 (SELECT id FROM ir_module_module WHERE name=%s),
2346 (SELECT id FROM ir_model WHERE model=%s))""",
2347 (relation_table, self._module, self._name))
2348 self.invalidate_cache(cr, SUPERUSER_ID)
2350 # checked version: for direct m2o starting from `self`
2351 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2352 assert self.is_transient() or not dest_model.is_transient(), \
2353 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2354 if self.is_transient() and not dest_model.is_transient():
2355 # TransientModel relationships to regular Models are annoying
2356 # usually because they could block deletion due to the FKs.
2357 # So unless stated otherwise we default them to ondelete=cascade.
2358 ondelete = ondelete or 'cascade'
2359 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2360 self._foreign_keys.add(fk_def)
2361 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2363 # unchecked version: for custom cases, such as m2m relationships
2364 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2365 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2366 self._foreign_keys.add(fk_def)
2367 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2369 def _drop_constraint(self, cr, source_table, constraint_name):
2370 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2372 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2373 # Find FK constraint(s) currently established for the m2o field,
2374 # and see whether they are stale or not
2375 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2376 cl2.relname as foreign_table
2377 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2378 pg_attribute as att1, pg_attribute as att2
2379 WHERE con.conrelid = cl1.oid
2380 AND cl1.relname = %s
2381 AND con.confrelid = cl2.oid
2382 AND array_lower(con.conkey, 1) = 1
2383 AND con.conkey[1] = att1.attnum
2384 AND att1.attrelid = cl1.oid
2385 AND att1.attname = %s
2386 AND array_lower(con.confkey, 1) = 1
2387 AND con.confkey[1] = att2.attnum
2388 AND att2.attrelid = cl2.oid
2389 AND att2.attname = %s
2390 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2391 constraints = cr.dictfetchall()
2393 if len(constraints) == 1:
2394 # Is it the right constraint?
2396 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2397 or cons['foreign_table'] != dest_model._table:
2398 # Wrong FK: drop it and recreate
2399 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2400 source_table, cons['constraint_name'])
2401 self._drop_constraint(cr, source_table, cons['constraint_name'])
2403 # it's all good, nothing to do!
2406 # Multiple FKs found for the same field, drop them all, and re-create
2407 for cons in constraints:
2408 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2409 source_table, cons['constraint_name'])
2410 self._drop_constraint(cr, source_table, cons['constraint_name'])
2412 # (re-)create the FK
2413 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2416 def _set_default_value_on_column(self, cr, column_name, context=None):
2417 # ideally, we should use default_get(), but it fails due to ir.values
2421 default = self._defaults.get(column_name)
2422 if callable(default):
2423 default = default(self, cr, SUPERUSER_ID, context)
2425 column = self._columns[column_name]
2426 ss = column._symbol_set
2427 db_default = ss[1](default)
2428 # Write default if non-NULL, except for booleans for which False means
2429 # the same as NULL - this saves us an expensive query on large tables.
2430 write_default = (db_default is not None if column._type != 'boolean'
2433 _logger.debug("Table '%s': setting default value of new column %s to %r",
2434 self._table, column_name, default)
2435 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2436 self._table, column_name, ss[0], column_name)
2437 cr.execute(query, (db_default,))
2438 # this is a disgrace
2441 def _auto_init(self, cr, context=None):
2444 Call _field_create and, unless _auto is False:
2446 - create the corresponding table in database for the model,
2447 - possibly add the parent columns in database,
2448 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2449 'write_date' in database if _log_access is True (the default),
2450 - report on database columns no more existing in _columns,
2451 - remove no more existing not null constraints,
2452 - alter existing database columns to match _columns,
2453 - create database tables to match _columns,
2454 - add database indices to match _columns,
2455 - save in self._foreign_keys a list a foreign keys to create (see
2459 self._foreign_keys = set()
2460 raise_on_invalid_object_name(self._name)
2463 store_compute = False
2464 stored_fields = [] # new-style stored fields with compute
2466 update_custom_fields = context.get('update_custom_fields', False)
2467 self._field_create(cr, context=context)
2468 create = not self._table_exist(cr)
2472 self._create_table(cr)
2475 cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
2476 has_rows = cr.rowcount
2479 if self._parent_store:
2480 if not self._parent_columns_exist(cr):
2481 self._create_parent_columns(cr)
2482 store_compute = True
2484 self._check_removed_columns(cr, log=False)
2486 # iterate on the "object columns"
2487 column_data = self._select_column_data(cr)
2489 for k, f in self._columns.iteritems():
2490 if k == 'id': # FIXME: maybe id should be a regular column?
2492 # Don't update custom (also called manual) fields
2493 if f.manual and not update_custom_fields:
2496 if isinstance(f, fields.one2many):
2497 self._o2m_raise_on_missing_reference(cr, f)
2499 elif isinstance(f, fields.many2many):
2500 self._m2m_raise_or_create_relation(cr, f)
2503 res = column_data.get(k)
2505 # The field is not found as-is in database, try if it
2506 # exists with an old name.
2507 if not res and hasattr(f, 'oldname'):
2508 res = column_data.get(f.oldname)
2510 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2512 column_data[k] = res
2513 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2514 self._table, f.oldname, k)
2516 # The field already exists in database. Possibly
2517 # change its type, rename it, drop it or change its
2520 f_pg_type = res['typname']
2521 f_pg_size = res['size']
2522 f_pg_notnull = res['attnotnull']
2523 if isinstance(f, fields.function) and not f.store and\
2524 not getattr(f, 'nodrop', False):
2525 _logger.info('column %s (%s) converted to a function, removed from table %s',
2526 k, f.string, self._table)
2527 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2529 _schema.debug("Table '%s': dropped column '%s' with cascade",
2533 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2538 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2539 ('varchar', 'text', 'TEXT', ''),
2540 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2541 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2542 ('timestamp', 'date', 'date', '::date'),
2543 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2544 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2546 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2548 with cr.savepoint():
2549 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2550 except psycopg2.NotSupportedError:
2551 # In place alter table cannot be done because a view is depending of this field.
2552 # Do a manual copy. This will drop the view (that will be recreated later)
2553 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2554 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2555 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2556 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2558 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2559 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2561 if (f_pg_type==c[0]) and (f._type==c[1]):
2562 if f_pg_type != f_obj_type:
2564 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2565 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2566 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2567 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2569 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2570 self._table, k, c[0], c[1])
2573 if f_pg_type != f_obj_type:
2577 newname = k + '_moved' + str(i)
2578 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2579 "WHERE c.relname=%s " \
2580 "AND a.attname=%s " \
2581 "AND c.oid=a.attrelid ", (self._table, newname))
2582 if not cr.fetchone()[0]:
2586 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2587 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2588 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2589 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2590 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2591 self._table, k, f_pg_type, f._type, newname)
2593 # if the field is required and hasn't got a NOT NULL constraint
2594 if f.required and f_pg_notnull == 0:
2596 self._set_default_value_on_column(cr, k, context=context)
2597 # add the NOT NULL constraint
2599 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2601 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2604 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2605 "If you want to have it, you should update the records and execute manually:\n"\
2606 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2607 _schema.warning(msg, self._table, k, self._table, k)
2609 elif not f.required and f_pg_notnull == 1:
2610 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2612 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2615 indexname = '%s_%s_index' % (self._table, k)
2616 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2617 res2 = cr.dictfetchall()
2618 if not res2 and f.select:
2619 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2621 if f._type == 'text':
2622 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2623 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2624 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2625 " because there is a length limit for indexable btree values!\n"\
2626 "Use a search view instead if you simply want to make the field searchable."
2627 _schema.warning(msg, self._table, f._type, k)
2628 if res2 and not f.select:
2629 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2631 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2632 _schema.debug(msg, self._table, k, f._type)
2634 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2635 dest_model = self.pool[f._obj]
2636 if dest_model._auto and dest_model._table != 'ir_actions':
2637 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2639 # The field doesn't exist in database. Create it if necessary.
2641 if not isinstance(f, fields.function) or f.store:
2642 # add the missing field
2643 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2644 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2645 _schema.debug("Table '%s': added column '%s' with definition=%s",
2646 self._table, k, get_pg_type(f)[1])
2650 self._set_default_value_on_column(cr, k, context=context)
2652 # remember the functions to call for the stored fields
2653 if isinstance(f, fields.function):
2655 if f.store is not True: # i.e. if f.store is a dict
2656 order = f.store[f.store.keys()[0]][2]
2657 todo_end.append((order, self._update_store, (f, k)))
2659 # remember new-style stored fields with compute method
2660 if k in self._fields and self._fields[k].depends:
2661 stored_fields.append(self._fields[k])
2663 # and add constraints if needed
2664 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2665 if f._obj not in self.pool:
2666 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2667 dest_model = self.pool[f._obj]
2668 ref = dest_model._table
2669 # ir_actions is inherited so foreign key doesn't work on it
2670 if dest_model._auto and ref != 'ir_actions':
2671 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2673 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2677 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2678 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2681 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2682 "Try to re-run: openerp-server --update=module\n"\
2683 "If it doesn't work, update records and execute manually:\n"\
2684 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2685 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2689 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2690 create = not bool(cr.fetchone())
2692 cr.commit() # start a new transaction
2695 self._add_sql_constraints(cr)
2698 self._execute_sql(cr)
2701 self._parent_store_compute(cr)
2705 # trigger computation of new-style stored fields with a compute
2707 _logger.info("Storing computed values of %s fields %s",
2708 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2709 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2710 recs = recs.search([])
2712 map(recs._recompute_todo, stored_fields)
2715 todo_end.append((1000, func, ()))
2719 def _auto_end(self, cr, context=None):
2720 """ Create the foreign keys recorded by _auto_init. """
2721 for t, k, r, d in self._foreign_keys:
2722 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2723 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f', False)
2725 del self._foreign_keys
2728 def _table_exist(self, cr):
2729 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2733 def _create_table(self, cr):
2734 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2735 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2736 _schema.debug("Table '%s': created", self._table)
2739 def _parent_columns_exist(self, cr):
2740 cr.execute("""SELECT c.relname
2741 FROM pg_class c, pg_attribute a
2742 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2743 """, (self._table, 'parent_left'))
2747 def _create_parent_columns(self, cr):
2748 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2749 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2750 if 'parent_left' not in self._columns:
2751 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2753 _schema.debug("Table '%s': added column '%s' with definition=%s",
2754 self._table, 'parent_left', 'INTEGER')
2755 elif not self._columns['parent_left'].select:
2756 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2758 if 'parent_right' not in self._columns:
2759 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2761 _schema.debug("Table '%s': added column '%s' with definition=%s",
2762 self._table, 'parent_right', 'INTEGER')
2763 elif not self._columns['parent_right'].select:
2764 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2766 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2767 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2768 self._parent_name, self._name)
2773 def _select_column_data(self, cr):
2774 # attlen is the number of bytes necessary to represent the type when
2775 # the type has a fixed size. If the type has a varying size attlen is
2776 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2777 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2778 "FROM pg_class c,pg_attribute a,pg_type t " \
2779 "WHERE c.relname=%s " \
2780 "AND c.oid=a.attrelid " \
2781 "AND a.atttypid=t.oid", (self._table,))
2782 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2785 def _o2m_raise_on_missing_reference(self, cr, f):
2786 # TODO this check should be a method on fields.one2many.
2787 if f._obj in self.pool:
2788 other = self.pool[f._obj]
2789 # TODO the condition could use fields_get_keys().
2790 if f._fields_id not in other._columns.keys():
2791 if f._fields_id not in other._inherit_fields.keys():
2792 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2794 def _m2m_raise_or_create_relation(self, cr, f):
2795 m2m_tbl, col1, col2 = f._sql_names(self)
2796 # do not create relations for custom fields as they do not belong to a module
2797 # they will be automatically removed when dropping the corresponding ir.model.field
2798 # table name for custom relation all starts with x_, see __init__
2799 if not m2m_tbl.startswith('x_'):
2800 self._save_relation_table(cr, m2m_tbl)
2801 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2802 if not cr.dictfetchall():
2803 if f._obj not in self.pool:
2804 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2805 dest_model = self.pool[f._obj]
2806 ref = dest_model._table
2807 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2808 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2809 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2810 if not cr.fetchall():
2811 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2812 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2813 if not cr.fetchall():
2814 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2816 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2817 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2818 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2820 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2823 def _add_sql_constraints(self, cr):
2826 Modify this model's database table constraints so they match the one in
2830 def unify_cons_text(txt):
2831 return txt.lower().replace(', ',',').replace(' (','(')
2833 for (key, con, _) in self._sql_constraints:
2834 conname = '%s_%s' % (self._table, key)
2836 # using 1 to get result if no imc but one pgc
2837 cr.execute("""SELECT definition, 1
2838 FROM ir_model_constraint imc
2839 RIGHT JOIN pg_constraint pgc
2840 ON (pgc.conname = imc.name)
2841 WHERE pgc.conname=%s
2843 existing_constraints = cr.dictfetchone()
2847 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2848 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2849 self._table, conname, con),
2850 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2855 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2856 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2857 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2863 if not existing_constraints:
2864 # constraint does not exists:
2865 sql_actions['add']['execute'] = True
2866 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2867 elif unify_cons_text(con) != existing_constraints['definition']:
2868 # constraint exists but its definition has changed:
2869 sql_actions['drop']['execute'] = True
2870 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints['definition'] or '', )
2871 sql_actions['add']['execute'] = True
2872 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2874 # we need to add the constraint:
2875 self._save_constraint(cr, conname, 'u', unify_cons_text(con))
2876 sql_actions = [item for item in sql_actions.values()]
2877 sql_actions.sort(key=lambda x: x['order'])
2878 for sql_action in [action for action in sql_actions if action['execute']]:
2880 cr.execute(sql_action['query'])
2882 _schema.debug(sql_action['msg_ok'])
2884 _schema.warning(sql_action['msg_err'])
2888 def _execute_sql(self, cr):
2889 """ Execute the SQL code from the _sql attribute (if any)."""
2890 if hasattr(self, "_sql"):
2891 for line in self._sql.split(';'):
2892 line2 = line.replace('\n', '').strip()
2898 # Update objects that uses this one to update their _inherits fields
2902 def _inherits_reload_src(cls):
2903 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2904 for model in cls.pool.values():
2905 if cls._name in model._inherits:
2906 model._inherits_reload()
2909 def _inherits_reload(cls):
2910 """ Recompute the _inherit_fields mapping.
2912 This will also call itself on each inherits'd child model.
2916 for table in cls._inherits:
2917 other = cls.pool[table]
2918 for col in other._columns.keys():
2919 res[col] = (table, cls._inherits[table], other._columns[col], table)
2920 for col in other._inherit_fields.keys():
2921 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2922 cls._inherit_fields = res
2923 cls._all_columns = cls._get_column_infos()
2925 # interface columns with new-style fields
2926 for attr, column in cls._columns.items():
2927 if attr not in cls._fields:
2928 cls._add_field(attr, column.to_field())
2930 # interface inherited fields with new-style fields (note that the
2931 # reverse order is for being consistent with _all_columns above)
2932 for parent_model, parent_field in reversed(cls._inherits.items()):
2933 for attr, field in cls.pool[parent_model]._fields.iteritems():
2934 if attr not in cls._fields:
2935 cls._add_field(attr, field.new(
2937 related=(parent_field, attr),
2941 cls._inherits_reload_src()
2944 def _get_column_infos(cls):
2945 """Returns a dict mapping all fields names (direct fields and
2946 inherited field via _inherits) to a ``column_info`` struct
2947 giving detailed columns """
2949 # do not inverse for loops, since local fields may hide inherited ones!
2950 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2951 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2952 for k, col in cls._columns.iteritems():
2953 result[k] = fields.column_info(k, col)
2957 def _inherits_check(cls):
2958 for table, field_name in cls._inherits.items():
2959 if field_name not in cls._columns:
2960 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2961 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2962 required=True, ondelete="cascade")
2963 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2964 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2965 cls._columns[field_name].required = True
2966 cls._columns[field_name].ondelete = "cascade"
2968 # reflect fields with delegate=True in dictionary cls._inherits
2969 for field in cls._fields.itervalues():
2970 if field.type == 'many2one' and not field.related and field.delegate:
2971 if not field.required:
2972 _logger.warning("Field %s with delegate=True must be required.", field)
2973 field.required = True
2974 if field.ondelete.lower() not in ('cascade', 'restrict'):
2975 field.ondelete = 'cascade'
2976 cls._inherits[field.comodel_name] = field.name
2979 def _prepare_setup_fields(self):
2980 """ Prepare the setup of fields once the models have been loaded. """
2981 for field in self._fields.itervalues():
2985 def _setup_fields(self, partial=False):
2986 """ Setup the fields (dependency triggers, etc). """
2987 for field in self._fields.itervalues():
2989 field.setup(self.env)
2994 # group fields by compute to determine field.computed_fields
2995 fields_by_compute = defaultdict(list)
2996 for field in self._fields.itervalues():
2998 field.computed_fields = fields_by_compute[field.compute]
2999 field.computed_fields.append(field)
3001 field.computed_fields = []
3003 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3004 """ fields_get([fields])
3006 Return the definition of each field.
3008 The returned value is a dictionary (indiced by field name) of
3009 dictionaries. The _inherits'd fields are included. The string, help,
3010 and selection (if present) attributes are translated.
3012 :param cr: database cursor
3013 :param user: current user id
3014 :param allfields: list of fields
3015 :param context: context arguments, like lang, time zone
3016 :return: dictionary of field dictionaries, each one describing a field of the business object
3017 :raise AccessError: * if user has no create/write rights on the requested object
3020 recs = self.browse(cr, user, [], context)
3023 for fname, field in self._fields.iteritems():
3024 if allfields and fname not in allfields:
3026 if not field.setup_done:
3028 if field.groups and not recs.user_has_groups(field.groups):
3030 res[fname] = field.get_description(recs.env)
3032 # if user cannot create or modify records, make all fields readonly
3033 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3034 if not (has_access('write') or has_access('create')):
3035 for description in res.itervalues():
3036 description['readonly'] = True
3037 description['states'] = {}
3041 def get_empty_list_help(self, cr, user, help, context=None):
3042 """ Generic method giving the help message displayed when having
3043 no result to display in a list or kanban view. By default it returns
3044 the help given in parameter that is generally the help message
3045 defined in the action.
3049 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3051 Check the user access rights on the given fields. This raises Access
3052 Denied if the user does not have the rights. Otherwise it returns the
3053 fields (as is if the fields is not falsy, or the readable/writable
3054 fields if fields is falsy).
3056 if user == SUPERUSER_ID:
3057 return fields or list(self._fields)
3060 """ determine whether user has access to field `fname` """
3061 field = self._fields.get(fname)
3062 if field and field.groups:
3063 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3068 fields = filter(valid, self._fields)
3070 invalid_fields = set(filter(lambda name: not valid(name), fields))
3072 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3073 operation, user, self._name, ', '.join(invalid_fields))
3075 _('The requested operation cannot be completed due to security restrictions. '
3076 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3077 (self._description, operation))
3081 # add explicit old-style implementation to read()
3083 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3084 records = self.browse(cr, user, ids, context)
3085 result = BaseModel.read(records, fields, load=load)
3086 return result if isinstance(ids, list) else (bool(result) and result[0])
3088 # new-style implementation of read()
3090 def read(self, fields=None, load='_classic_read'):
3093 Reads the requested fields for the records in `self`, low-level/RPC
3094 method. In Python code, prefer :meth:`~.browse`.
3096 :param fields: list of field names to return (default is all fields)
3097 :return: a list of dictionaries mapping field names to their values,
3098 with one dictionary per record
3099 :raise AccessError: if user has no read rights on some of the given
3102 # check access rights
3103 self.check_access_rights('read')
3104 fields = self.check_field_access_rights('read', fields)
3106 # split fields into stored and computed fields
3107 stored, computed = [], []
3109 if name in self._columns:
3111 elif name in self._fields:
3112 computed.append(name)
3114 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3116 # fetch stored fields from the database to the cache
3117 self._read_from_database(stored)
3119 # retrieve results from records; this takes values from the cache and
3120 # computes remaining fields
3122 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3123 use_name_get = (load == '_classic_read')
3126 values = {'id': record.id}
3127 for name, field in name_fields:
3128 values[name] = field.convert_to_read(record[name], use_name_get)
3129 result.append(values)
3130 except MissingError:
3136 def _prefetch_field(self, field):
3137 """ Read from the database in order to fetch `field` (:class:`Field`
3138 instance) for `self` in cache.
3140 # fetch the records of this model without field_name in their cache
3141 records = self._in_cache_without(field)
3143 if len(records) > PREFETCH_MAX:
3144 records = records[:PREFETCH_MAX] | self
3146 # determine which fields can be prefetched
3147 if not self.env.in_draft and \
3148 self._context.get('prefetch_fields', True) and \
3149 self._columns[field.name]._prefetch:
3150 # prefetch all classic and many2one fields that the user can access
3152 for fname, fcolumn in self._columns.iteritems()
3153 if fcolumn._prefetch
3154 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3157 fnames = {field.name}
3159 # important: never prefetch fields to recompute!
3160 get_recs_todo = self.env.field_todo
3161 for fname in list(fnames):
3162 if get_recs_todo(self._fields[fname]):
3163 if fname == field.name:
3164 records -= get_recs_todo(field)
3166 fnames.discard(fname)
3168 # fetch records with read()
3169 assert self in records and field.name in fnames
3172 result = records.read(list(fnames), load='_classic_write')
3176 # check the cache, and update it if necessary
3177 if not self._cache.contains(field):
3178 for values in result:
3179 record = self.browse(values.pop('id'))
3180 record._cache.update(record._convert_to_cache(values, validate=False))
3181 if not self._cache.contains(field):
3182 e = AccessError("No value found for %s.%s" % (self, field.name))
3183 self._cache[field] = FailedValue(e)
3186 def _read_from_database(self, field_names):
3187 """ Read the given fields of the records in `self` from the database,
3188 and store them in cache. Access errors are also stored in cache.
3191 cr, user, context = env.args
3193 # FIXME: The query construction needs to be rewritten using the internal Query
3194 # object, as in search(), to avoid ambiguous column references when
3195 # reading/sorting on a table that is auto_joined to another table with
3196 # common columns (e.g. the magical columns)
3198 # Construct a clause for the security rules.
3199 # 'tables' holds the list of tables necessary for the SELECT, including
3200 # the ir.rule clauses, and contains at least self._table.
3201 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3203 # determine the fields that are stored as columns in self._table
3204 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3206 # we need fully-qualified column names in case len(tables) > 1
3208 if isinstance(self._columns.get(f), fields.binary) and \
3209 context.get('bin_size_%s' % f, context.get('bin_size')):
3210 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3211 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3213 return '%s."%s"' % (self._table, f)
3214 qual_names = map(qualify, set(fields_pre + ['id']))
3216 query = """ SELECT %(qual_names)s FROM %(tables)s
3217 WHERE %(table)s.id IN %%s AND (%(extra)s)
3220 'qual_names': ",".join(qual_names),
3221 'tables': ",".join(tables),
3222 'table': self._table,
3223 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3224 'order': self._parent_order or self._order,
3228 for sub_ids in cr.split_for_in_conditions(self.ids):
3229 cr.execute(query, [tuple(sub_ids)] + rule_params)
3230 result.extend(cr.dictfetchall())
3232 ids = [vals['id'] for vals in result]
3235 # translate the fields if necessary
3236 if context.get('lang'):
3237 ir_translation = env['ir.translation']
3238 for f in fields_pre:
3239 if self._columns[f].translate:
3240 #TODO: optimize out of this loop
3241 res_trans = ir_translation._get_ids(
3242 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3244 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3246 # apply the symbol_get functions of the fields we just read
3247 for f in fields_pre:
3248 symbol_get = self._columns[f]._symbol_get
3251 vals[f] = symbol_get(vals[f])
3253 # store result in cache for POST fields
3255 record = self.browse(vals['id'])
3256 record._cache.update(record._convert_to_cache(vals, validate=False))
3258 # determine the fields that must be processed now
3259 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3261 # Compute POST fields, grouped by multi
3262 by_multi = defaultdict(list)
3263 for f in fields_post:
3264 by_multi[self._columns[f]._multi].append(f)
3266 for multi, fs in by_multi.iteritems():
3268 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3269 assert res2 is not None, \
3270 'The function field "%s" on the "%s" model returned None\n' \
3271 '(a dictionary was expected).' % (fs[0], self._name)
3273 # TOCHECK : why got string instend of dict in python2.6
3274 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3275 multi_fields = res2.get(vals['id'], {})
3278 vals[f] = multi_fields.get(f, [])
3281 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3284 vals[f] = res2[vals['id']]
3288 # Warn about deprecated fields now that fields_pre and fields_post are computed
3289 for f in field_names:
3290 column = self._columns[f]
3291 if column.deprecated:
3292 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3294 # store result in cache
3296 record = self.browse(vals.pop('id'))
3297 record._cache.update(record._convert_to_cache(vals, validate=False))
3299 # store failed values in cache for the records that could not be read
3300 fetched = self.browse(ids)
3301 missing = self - fetched
3303 extras = fetched - self
3306 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3307 ', '.join(map(repr, missing._ids)),
3308 ', '.join(map(repr, extras._ids)),
3310 # store an access error exception in existing records
3312 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3313 (self._name, 'read')
3315 forbidden = missing.exists()
3316 forbidden._cache.update(FailedValue(exc))
3317 # store a missing error exception in non-existing records
3319 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3321 (missing - forbidden)._cache.update(FailedValue(exc))
3324 def get_metadata(self):
3326 Returns some metadata about the given records.
3328 :return: list of ownership dictionaries for each requested record
3329 :rtype: list of dictionaries with the following keys:
3332 * create_uid: user who created the record
3333 * create_date: date when the record was created
3334 * write_uid: last user who changed the record
3335 * write_date: date of the last change to the record
3336 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3339 if self._log_access:
3340 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3341 quoted_table = '"%s"' % self._table
3342 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3343 query = '''SELECT %s, __imd.module, __imd.name
3344 FROM %s LEFT JOIN ir_model_data __imd
3345 ON (__imd.model = %%s and __imd.res_id = %s.id)
3346 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3347 self._cr.execute(query, (self._name, tuple(self.ids)))
3348 res = self._cr.dictfetchall()
3350 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3351 names = dict(self.env['res.users'].browse(uids).name_get())
3355 value = r[key] = r[key] or False
3356 if key in ('write_uid', 'create_uid') and value in names:
3357 r[key] = (value, names[value])
3358 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3359 del r['name'], r['module']
3362 def _check_concurrency(self, cr, ids, context):
3365 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3367 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3368 for sub_ids in cr.split_for_in_conditions(ids):
3371 id_ref = "%s,%s" % (self._name, id)
3372 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3374 ids_to_check.extend([id, update_date])
3375 if not ids_to_check:
3377 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3380 # mention the first one only to keep the error message readable
3381 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3383 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3384 """Verify the returned rows after applying record rules matches
3385 the length of `ids`, and raise an appropriate exception if it does not.
3389 ids, result_ids = set(ids), set(result_ids)
3390 missing_ids = ids - result_ids
3392 # Attempt to distinguish record rule restriction vs deleted records,
3393 # to provide a more specific error message - check if the missinf
3394 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3395 forbidden_ids = [x[0] for x in cr.fetchall()]
3397 # the missing ids are (at least partially) hidden by access rules
3398 if uid == SUPERUSER_ID:
3400 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3401 raise except_orm(_('Access Denied'),
3402 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3403 (self._description, operation))
3405 # If we get here, the missing_ids are not in the database
3406 if operation in ('read','unlink'):
3407 # No need to warn about deleting an already deleted record.
3408 # And no error when reading a record that was deleted, to prevent spurious
3409 # errors for non-transactional search/read sequences coming from clients
3411 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3412 raise except_orm(_('Missing document(s)'),
3413 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3416 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3417 """Verifies that the operation given by ``operation`` is allowed for the user
3418 according to the access rights."""
3419 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3421 def check_access_rule(self, cr, uid, ids, operation, context=None):
3422 """Verifies that the operation given by ``operation`` is allowed for the user
3423 according to ir.rules.
3425 :param operation: one of ``write``, ``unlink``
3426 :raise except_orm: * if current ir.rules do not permit this operation.
3427 :return: None if the operation is allowed
3429 if uid == SUPERUSER_ID:
3432 if self.is_transient():
3433 # Only one single implicit access rule for transient models: owner only!
3434 # This is ok to hardcode because we assert that TransientModels always
3435 # have log_access enabled so that the create_uid column is always there.
3436 # And even with _inherits, these fields are always present in the local
3437 # table too, so no need for JOINs.
3438 cr.execute("""SELECT distinct create_uid
3440 WHERE id IN %%s""" % self._table, (tuple(ids),))
3441 uids = [x[0] for x in cr.fetchall()]
3442 if len(uids) != 1 or uids[0] != uid:
3443 raise except_orm(_('Access Denied'),
3444 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3446 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3448 where_clause = ' and ' + ' and '.join(where_clause)
3449 for sub_ids in cr.split_for_in_conditions(ids):
3450 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3451 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3452 [sub_ids] + where_params)
3453 returned_ids = [x['id'] for x in cr.dictfetchall()]
3454 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3456 def create_workflow(self, cr, uid, ids, context=None):
3457 """Create a workflow instance for each given record IDs."""
3458 from openerp import workflow
3460 workflow.trg_create(uid, self._name, res_id, cr)
3461 # self.invalidate_cache(cr, uid, context=context) ?
3464 def delete_workflow(self, cr, uid, ids, context=None):
3465 """Delete the workflow instances bound to the given record IDs."""
3466 from openerp import workflow
3468 workflow.trg_delete(uid, self._name, res_id, cr)
3469 self.invalidate_cache(cr, uid, context=context)
3472 def step_workflow(self, cr, uid, ids, context=None):
3473 """Reevaluate the workflow instances of the given record IDs."""
3474 from openerp import workflow
3476 workflow.trg_write(uid, self._name, res_id, cr)
3477 # self.invalidate_cache(cr, uid, context=context) ?
3480 def signal_workflow(self, cr, uid, ids, signal, context=None):
3481 """Send given workflow signal and return a dict mapping ids to workflow results"""
3482 from openerp import workflow
3485 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3486 # self.invalidate_cache(cr, uid, context=context) ?
3489 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3490 """ Rebind the workflow instance bound to the given 'old' record IDs to
3491 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3493 from openerp import workflow
3494 for old_id, new_id in old_new_ids:
3495 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3496 self.invalidate_cache(cr, uid, context=context)
3499 def unlink(self, cr, uid, ids, context=None):
3502 Deletes the records of the current set
3504 :raise AccessError: * if user has no unlink rights on the requested object
3505 * if user tries to bypass access rules for unlink on the requested object
3506 :raise UserError: if the record is default property for other records
3511 if isinstance(ids, (int, long)):
3514 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3516 # for recomputing new-style fields
3517 recs = self.browse(cr, uid, ids, context)
3518 recs.modified(self._fields)
3520 self._check_concurrency(cr, ids, context)
3522 self.check_access_rights(cr, uid, 'unlink')
3524 ir_property = self.pool.get('ir.property')
3526 # Check if the records are used as default properties.
3527 domain = [('res_id', '=', False),
3528 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3530 if ir_property.search(cr, uid, domain, context=context):
3531 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3533 # Delete the records' properties.
3534 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3535 ir_property.unlink(cr, uid, property_ids, context=context)
3537 self.delete_workflow(cr, uid, ids, context=context)
3539 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3540 pool_model_data = self.pool.get('ir.model.data')
3541 ir_values_obj = self.pool.get('ir.values')
3542 ir_attachment_obj = self.pool.get('ir.attachment')
3543 for sub_ids in cr.split_for_in_conditions(ids):
3544 cr.execute('delete from ' + self._table + ' ' \
3545 'where id IN %s', (sub_ids,))
3547 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3548 # as these are not connected with real database foreign keys, and would be dangling references.
3549 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3550 # to avoid possible side-effects during admin calls.
3551 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3552 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3553 # Step 2. Marching towards the real deletion of referenced records
3555 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3557 # For the same reason, removing the record relevant to ir_values
3558 ir_value_ids = ir_values_obj.search(cr, uid,
3559 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3562 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3564 # For the same reason, removing the record relevant to ir_attachment
3565 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3566 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3567 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3568 if ir_attachment_ids:
3569 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3571 # invalidate the *whole* cache, since the orm does not handle all
3572 # changes made in the database, like cascading delete!
3573 recs.invalidate_cache()
3575 for order, obj_name, store_ids, fields in result_store:
3576 if obj_name == self._name:
3577 effective_store_ids = set(store_ids) - set(ids)
3579 effective_store_ids = store_ids
3580 if effective_store_ids:
3581 obj = self.pool[obj_name]
3582 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3583 rids = map(lambda x: x[0], cr.fetchall())
3585 obj._store_set_values(cr, uid, rids, fields, context)
3587 # recompute new-style fields
3596 def write(self, vals):
3599 Updates all records in the current set with the provided values.
3601 :param dict vals: fields to update and the value to set on them e.g::
3603 {'foo': 1, 'bar': "Qux"}
3605 will set the field ``foo`` to ``1`` and the field ``bar`` to
3606 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3608 :raise AccessError: * if user has no write rights on the requested object
3609 * if user tries to bypass access rules for write on the requested object
3610 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3611 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3613 .. _openerp/models/relationals/format:
3615 .. note:: Relational fields use a special "commands" format to manipulate their values
3617 This format is a list of command triplets executed sequentially,
3618 possible command triplets are:
3620 ``(0, _, values: dict)``
3621 links to a new record created from the provided values
3622 ``(1, id, values: dict)``
3623 updates the already-linked record of id ``id`` with the
3626 unlinks and deletes the linked record of id ``id``
3628 unlinks the linked record of id ``id`` without deleting it
3630 links to an existing record of id ``id``
3632 unlinks all records in the relation, equivalent to using
3633 the command ``3`` on every linked record
3635 replaces the existing list of linked records by the provoded
3636 ones, equivalent to using ``5`` then ``4`` for each id in
3639 (in command triplets, ``_`` values are ignored and can be
3640 anything, generally ``0`` or ``False``)
3642 Any command can be used on :class:`~openerp.fields.Many2many`,
3643 only ``0``, ``1`` and ``2`` can be used on
3644 :class:`~openerp.fields.One2many`.
3649 self._check_concurrency(self._ids)
3650 self.check_access_rights('write')
3652 # No user-driven update of these columns
3653 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3654 vals.pop(field, None)
3656 # split up fields into old-style and pure new-style ones
3657 old_vals, new_vals, unknown = {}, {}, []
3658 for key, val in vals.iteritems():
3659 field = self._fields.get(key)
3661 if field.store or field.inherited:
3663 if field.inverse and not field.inherited:
3669 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3671 # write old-style fields with (low-level) method _write
3673 self._write(old_vals)
3675 # put the values of pure new-style fields into cache, and inverse them
3678 record._cache.update(record._convert_to_cache(new_vals, update=True))
3679 for key in new_vals:
3680 self._fields[key].determine_inverse(self)
3684 def _write(self, cr, user, ids, vals, context=None):
3685 # low-level implementation of write()
3690 self.check_field_access_rights(cr, user, 'write', vals.keys())
3691 deleted_related = defaultdict(list)
3692 for field in vals.keys():
3694 if field in self._columns:
3695 fobj = self._columns[field]
3696 elif field in self._inherit_fields:
3697 fobj = self._inherit_fields[field][2]
3700 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3701 for wtuple in vals[field]:
3702 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3703 deleted_related[fobj._obj].append(wtuple[1])
3708 for group in groups:
3709 module = group.split(".")[0]
3710 grp = group.split(".")[1]
3711 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3712 (grp, module, 'res.groups', user))
3713 readonly = cr.fetchall()
3714 if readonly[0][0] >= 1:
3721 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3723 # for recomputing new-style fields
3724 recs = self.browse(cr, user, ids, context)
3725 modified_fields = list(vals)
3726 if self._log_access:
3727 modified_fields += ['write_date', 'write_uid']
3728 recs.modified(modified_fields)
3730 parents_changed = []
3731 parent_order = self._parent_order or self._order
3732 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3733 # The parent_left/right computation may take up to
3734 # 5 seconds. No need to recompute the values if the
3735 # parent is the same.
3736 # Note: to respect parent_order, nodes must be processed in
3737 # order, so ``parents_changed`` must be ordered properly.
3738 parent_val = vals[self._parent_name]
3740 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3741 (self._table, self._parent_name, self._parent_name, parent_order)
3742 cr.execute(query, (tuple(ids), parent_val))
3744 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3745 (self._table, self._parent_name, parent_order)
3746 cr.execute(query, (tuple(ids),))
3747 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3754 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3756 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3757 if field_column and field_column.deprecated:
3758 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3759 if field in self._columns:
3760 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3761 if (not totranslate) or not self._columns[field].translate:
3762 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3763 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3764 direct.append(field)
3766 upd_todo.append(field)
3768 updend.append(field)
3769 if field in self._columns \
3770 and hasattr(self._columns[field], 'selection') \
3772 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3774 if self._log_access:
3775 upd0.append('write_uid=%s')
3776 upd0.append("write_date=(now() at time zone 'UTC')")
3778 direct.append('write_uid')
3779 direct.append('write_date')
3782 self.check_access_rule(cr, user, ids, 'write', context=context)
3783 for sub_ids in cr.split_for_in_conditions(ids):
3784 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3785 'where id IN %s', upd1 + [sub_ids])
3786 if cr.rowcount != len(sub_ids):
3787 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3792 if self._columns[f].translate:
3793 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3796 # Inserting value to DB
3797 context_wo_lang = dict(context, lang=None)
3798 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3799 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3801 # invalidate and mark new-style fields to recompute; do this before
3802 # setting other fields, because it can require the value of computed
3803 # fields, e.g., a one2many checking constraints on records
3804 recs.modified(direct)
3806 # call the 'set' method of fields which are not classic_write
3807 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3809 # default element in context must be removed when call a one2many or many2many
3810 rel_context = context.copy()
3811 for c in context.items():
3812 if c[0].startswith('default_'):
3813 del rel_context[c[0]]
3815 for field in upd_todo:
3817 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3819 # for recomputing new-style fields
3820 recs.modified(upd_todo)
3822 unknown_fields = updend[:]
3823 for table in self._inherits:
3824 col = self._inherits[table]
3826 for sub_ids in cr.split_for_in_conditions(ids):
3827 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3828 'where id IN %s', (sub_ids,))
3829 nids.extend([x[0] for x in cr.fetchall()])
3833 if self._inherit_fields[val][0] == table:
3835 unknown_fields.remove(val)
3837 self.pool[table].write(cr, user, nids, v, context)
3841 'No such field(s) in model %s: %s.',
3842 self._name, ', '.join(unknown_fields))
3844 # check Python constraints
3845 recs._validate_fields(vals)
3847 # TODO: use _order to set dest at the right position and not first node of parent
3848 # We can't defer parent_store computation because the stored function
3849 # fields that are computer may refer (directly or indirectly) to
3850 # parent_left/right (via a child_of domain)
3853 self.pool._init_parent[self._name] = True
3855 order = self._parent_order or self._order
3856 parent_val = vals[self._parent_name]
3858 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3860 clause, params = '%s IS NULL' % (self._parent_name,), ()
3862 for id in parents_changed:
3863 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3864 pleft, pright = cr.fetchone()
3865 distance = pright - pleft + 1
3867 # Positions of current siblings, to locate proper insertion point;
3868 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3869 # after each update, in case several nodes are sequentially inserted one
3870 # next to the other (i.e computed incrementally)
3871 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3872 parents = cr.fetchall()
3874 # Find Position of the element
3876 for (parent_pright, parent_id) in parents:
3879 position = parent_pright and parent_pright + 1 or 1
3881 # It's the first node of the parent
3886 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3887 position = cr.fetchone()[0] + 1
3889 if pleft < position <= pright:
3890 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3892 if pleft < position:
3893 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3894 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3895 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3897 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3898 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3899 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3900 recs.invalidate_cache(['parent_left', 'parent_right'])
3902 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3906 for order, model_name, ids_to_update, fields_to_recompute in result:
3907 key = (model_name, tuple(fields_to_recompute))
3908 done.setdefault(key, {})
3909 # avoid to do several times the same computation
3911 for id in ids_to_update:
3912 if id not in done[key]:
3913 done[key][id] = True
3914 if id not in deleted_related[model_name]:
3916 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3918 # recompute new-style fields
3919 if context.get('recompute', True):
3922 self.step_workflow(cr, user, ids, context=context)
3926 # TODO: Should set perm to user.xxx
3929 @api.returns('self', lambda value: value.id)
3930 def create(self, vals):
3931 """ create(vals) -> record
3933 Creates a new record for the model.
3935 The new record is initialized using the values from ``vals`` and
3936 if necessary those from :meth:`~.default_get`.
3939 values for the model's fields, as a dictionary::
3941 {'field_name': field_value, ...}
3943 see :meth:`~.write` for details
3944 :return: new record created
3945 :raise AccessError: * if user has no create rights on the requested object
3946 * if user tries to bypass access rules for create on the requested object
3947 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3948 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3950 self.check_access_rights('create')
3952 # add missing defaults, and drop fields that may not be set by user
3953 vals = self._add_missing_default_values(vals)
3954 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3955 vals.pop(field, None)
3957 # split up fields into old-style and pure new-style ones
3958 old_vals, new_vals, unknown = {}, {}, []
3959 for key, val in vals.iteritems():
3960 field = self._fields.get(key)
3962 if field.store or field.inherited:
3964 if field.inverse and not field.inherited:
3970 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3972 # create record with old-style fields
3973 record = self.browse(self._create(old_vals))
3975 # put the values of pure new-style fields into cache, and inverse them
3976 record._cache.update(record._convert_to_cache(new_vals))
3977 for key in new_vals:
3978 self._fields[key].determine_inverse(record)
3982 def _create(self, cr, user, vals, context=None):
3983 # low-level implementation of create()
3987 if self.is_transient():
3988 self._transient_vacuum(cr, user)
3991 for v in self._inherits:
3992 if self._inherits[v] not in vals:
3995 tocreate[v] = {'id': vals[self._inherits[v]]}
3998 # list of column assignments defined as tuples like:
3999 # (column_name, format_string, column_value)
4000 # (column_name, sql_formula)
4001 # Those tuples will be used by the string formatting for the INSERT
4003 ('id', "nextval('%s')" % self._sequence),
4008 for v in vals.keys():
4009 if v in self._inherit_fields and v not in self._columns:
4010 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4011 tocreate[table][v] = vals[v]
4014 if (v not in self._inherit_fields) and (v not in self._columns):
4016 unknown_fields.append(v)
4019 'No such field(s) in model %s: %s.',
4020 self._name, ', '.join(unknown_fields))
4022 for table in tocreate:
4023 if self._inherits[table] in vals:
4024 del vals[self._inherits[table]]
4026 record_id = tocreate[table].pop('id', None)
4028 if record_id is None or not record_id:
4029 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4031 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4033 updates.append((self._inherits[table], '%s', record_id))
4035 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4036 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4038 for bool_field in bool_fields:
4039 if bool_field not in vals:
4040 vals[bool_field] = False
4042 for field in vals.keys():
4044 if field in self._columns:
4045 fobj = self._columns[field]
4047 fobj = self._inherit_fields[field][2]
4053 for group in groups:
4054 module = group.split(".")[0]
4055 grp = group.split(".")[1]
4056 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4057 (grp, module, 'res.groups', user))
4058 readonly = cr.fetchall()
4059 if readonly[0][0] >= 1:
4062 elif readonly[0][0] == 0:
4070 current_field = self._columns[field]
4071 if current_field._classic_write:
4072 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4074 #for the function fields that receive a value, we set them directly in the database
4075 #(they may be required), but we also need to trigger the _fct_inv()
4076 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4077 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4078 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4079 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4080 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4081 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4082 #after the release but, definitively, the behavior shouldn't be different for related and function
4084 upd_todo.append(field)
4086 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4087 #related. See the above TODO comment for further explanations.
4088 if not isinstance(current_field, fields.related):
4089 upd_todo.append(field)
4090 if field in self._columns \
4091 and hasattr(current_field, 'selection') \
4093 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4094 if self._log_access:
4095 updates.append(('create_uid', '%s', user))
4096 updates.append(('write_uid', '%s', user))
4097 updates.append(('create_date', "(now() at time zone 'UTC')"))
4098 updates.append(('write_date', "(now() at time zone 'UTC')"))
4100 # the list of tuples used in this formatting corresponds to
4101 # tuple(field_name, format, value)
4102 # In some case, for example (id, create_date, write_date) we does not
4103 # need to read the third value of the tuple, because the real value is
4104 # encoded in the second value (the format).
4106 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4108 ', '.join('"%s"' % u[0] for u in updates),
4109 ', '.join(u[1] for u in updates)
4111 tuple([u[2] for u in updates if len(u) > 2])
4114 id_new, = cr.fetchone()
4115 recs = self.browse(cr, user, id_new, context)
4117 if self._parent_store and not context.get('defer_parent_store_computation'):
4119 self.pool._init_parent[self._name] = True
4121 parent = vals.get(self._parent_name, False)
4123 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4125 result_p = cr.fetchall()
4126 for (pleft,) in result_p:
4131 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4132 pleft_old = cr.fetchone()[0]
4135 cr.execute('select max(parent_right) from '+self._table)
4136 pleft = cr.fetchone()[0] or 0
4137 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4138 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4139 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4140 recs.invalidate_cache(['parent_left', 'parent_right'])
4142 # invalidate and mark new-style fields to recompute; do this before
4143 # setting other fields, because it can require the value of computed
4144 # fields, e.g., a one2many checking constraints on records
4145 recs.modified([u[0] for u in updates])
4147 # call the 'set' method of fields which are not classic_write
4148 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4150 # default element in context must be remove when call a one2many or many2many
4151 rel_context = context.copy()
4152 for c in context.items():
4153 if c[0].startswith('default_'):
4154 del rel_context[c[0]]
4157 for field in upd_todo:
4158 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4160 # for recomputing new-style fields
4161 recs.modified(upd_todo)
4163 # check Python constraints
4164 recs._validate_fields(vals)
4166 if context.get('recompute', True):
4167 result += self._store_get_values(cr, user, [id_new],
4168 list(set(vals.keys() + self._inherits.values())),
4172 for order, model_name, ids, fields2 in result:
4173 if not (model_name, ids, fields2) in done:
4174 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4175 done.append((model_name, ids, fields2))
4176 # recompute new-style fields
4179 if self._log_create and context.get('recompute', True):
4180 message = self._description + \
4182 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4183 "' " + _("created.")
4184 self.log(cr, user, id_new, message, True, context=context)
4186 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4187 self.create_workflow(cr, user, [id_new], context=context)
4190 def _store_get_values(self, cr, uid, ids, fields, context):
4191 """Returns an ordered list of fields.function to call due to
4192 an update operation on ``fields`` of records with ``ids``,
4193 obtained by calling the 'store' triggers of these fields,
4194 as setup by their 'store' attribute.
4196 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4198 if fields is None: fields = []
4199 stored_functions = self.pool._store_function.get(self._name, [])
4201 # use indexed names for the details of the stored_functions:
4202 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4204 # only keep store triggers that should be triggered for the ``fields``
4206 triggers_to_compute = (
4207 f for f in stored_functions
4208 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4212 target_id_results = {}
4213 for store_trigger in triggers_to_compute:
4214 target_func_id_ = id(store_trigger[target_ids_func_])
4215 if target_func_id_ not in target_id_results:
4216 # use admin user for accessing objects having rules defined on store fields
4217 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4218 target_ids = target_id_results[target_func_id_]
4220 # the compound key must consider the priority and model name
4221 key = (store_trigger[priority_], store_trigger[model_name_])
4222 for target_id in target_ids:
4223 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4225 # Here to_compute_map looks like:
4226 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4227 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4228 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4231 # Now we need to generate the batch function calls list
4233 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4235 for ((priority,model), id_map) in to_compute_map.iteritems():
4236 trigger_ids_maps = {}
4237 # function_ids_maps =
4238 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4239 for target_id, triggers in id_map.iteritems():
4240 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4241 for triggers, target_ids in trigger_ids_maps.iteritems():
4242 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4243 [t[func_field_to_compute_] for t in triggers]))
4246 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4249 def _store_set_values(self, cr, uid, ids, fields, context):
4250 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4251 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4256 if self._log_access:
4257 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4261 field_dict.setdefault(r[0], [])
4262 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4263 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4264 for i in self.pool._store_function.get(self._name, []):
4266 up_write_date = write_date + datetime.timedelta(hours=i[5])
4267 if datetime.datetime.now() < up_write_date:
4269 field_dict[r[0]].append(i[1])
4275 if self._columns[f]._multi not in keys:
4276 keys.append(self._columns[f]._multi)
4277 todo.setdefault(self._columns[f]._multi, [])
4278 todo[self._columns[f]._multi].append(f)
4282 # use admin user for accessing objects having rules defined on store fields
4283 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4284 for id, value in result.items():
4286 for f in value.keys():
4287 if f in field_dict[id]:
4294 if self._columns[v]._type == 'many2one':
4296 value[v] = value[v][0]
4299 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4300 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4303 cr.execute('update "' + self._table + '" set ' + \
4304 ','.join(upd0) + ' where id = %s', upd1)
4308 # use admin user for accessing objects having rules defined on store fields
4309 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4310 for r in result.keys():
4312 if r in field_dict.keys():
4313 if f in field_dict[r]:
4315 for id, value in result.items():
4316 if self._columns[f]._type == 'many2one':
4321 cr.execute('update "' + self._table + '" set ' + \
4322 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4324 # invalidate and mark new-style fields to recompute
4325 self.browse(cr, uid, ids, context).modified(fields)
4329 # TODO: ameliorer avec NULL
4330 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4331 """Computes the WHERE clause needed to implement an OpenERP domain.
4332 :param domain: the domain to compute
4334 :param active_test: whether the default filtering of records with ``active``
4335 field set to ``False`` should be applied.
4336 :return: the query expressing the given domain as provided in domain
4337 :rtype: osv.query.Query
4342 # if the object has a field named 'active', filter out all inactive
4343 # records unless they were explicitely asked for
4344 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4346 # the item[0] trick below works for domain items and '&'/'|'/'!'
4348 if not any(item[0] == 'active' for item in domain):
4349 domain.insert(0, ('active', '=', 1))
4351 domain = [('active', '=', 1)]
4354 e = expression.expression(cr, user, domain, self, context)
4355 tables = e.get_tables()
4356 where_clause, where_params = e.to_sql()
4357 where_clause = where_clause and [where_clause] or []
4359 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4361 return Query(tables, where_clause, where_params)
4363 def _check_qorder(self, word):
4364 if not regex_order.match(word):
4365 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4368 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4369 """Add what's missing in ``query`` to implement all appropriate ir.rules
4370 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4372 :param query: the current query object
4374 if uid == SUPERUSER_ID:
4377 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4378 """ :param parent_model: name of the parent model, if the added
4379 clause comes from a parent model
4383 # as inherited rules are being applied, we need to add the missing JOIN
4384 # to reach the parent table (if it was not JOINed yet in the query)
4385 parent_alias = self._inherits_join_add(self, parent_model, query)
4386 # inherited rules are applied on the external table -> need to get the alias and replace
4387 parent_table = self.pool[parent_model]._table
4388 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4389 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4391 for table in added_tables:
4392 # table is just a table name -> switch to the full alias
4393 if table == '"%s"' % parent_table:
4394 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4395 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4397 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4398 added_tables = new_tables
4399 query.where_clause += added_clause
4400 query.where_clause_params += added_params
4401 for table in added_tables:
4402 if table not in query.tables:
4403 query.tables.append(table)
4407 # apply main rules on the object
4408 rule_obj = self.pool.get('ir.rule')
4409 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4410 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4412 # apply ir.rules from the parents (through _inherits)
4413 for inherited_model in self._inherits:
4414 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4415 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4416 parent_model=inherited_model)
4418 def _generate_m2o_order_by(self, order_field, query):
4420 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4421 either native m2o fields or function/related fields that are stored, including
4422 intermediate JOINs for inheritance if required.
4424 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4426 if order_field not in self._columns and order_field in self._inherit_fields:
4427 # also add missing joins for reaching the table containing the m2o field
4428 qualified_field = self._inherits_join_calc(order_field, query)
4429 order_field_column = self._inherit_fields[order_field][2]
4431 qualified_field = '"%s"."%s"' % (self._table, order_field)
4432 order_field_column = self._columns[order_field]
4434 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4435 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4436 _logger.debug("Many2one function/related fields must be stored " \
4437 "to be used as ordering fields! Ignoring sorting for %s.%s",
4438 self._name, order_field)
4441 # figure out the applicable order_by for the m2o
4442 dest_model = self.pool[order_field_column._obj]
4443 m2o_order = dest_model._order
4444 if not regex_order.match(m2o_order):
4445 # _order is complex, can't use it here, so we default to _rec_name
4446 m2o_order = dest_model._rec_name
4448 # extract the field names, to be able to qualify them and add desc/asc
4450 for order_part in m2o_order.split(","):
4451 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4452 m2o_order = m2o_order_list
4454 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4455 # as we don't want to exclude results that have NULL values for the m2o
4456 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4457 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4458 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4459 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4461 def _generate_order_by(self, order_spec, query):
4463 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4464 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4466 :raise" except_orm in case order_spec is malformed
4468 order_by_clause = ''
4469 order_spec = order_spec or self._order
4471 order_by_elements = []
4472 self._check_qorder(order_spec)
4473 for order_part in order_spec.split(','):
4474 order_split = order_part.strip().split(' ')
4475 order_field = order_split[0].strip()
4476 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4479 if order_field == 'id':
4480 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4481 elif order_field in self._columns:
4482 order_column = self._columns[order_field]
4483 if order_column._classic_read:
4484 inner_clause = '"%s"."%s"' % (self._table, order_field)
4485 elif order_column._type == 'many2one':
4486 inner_clause = self._generate_m2o_order_by(order_field, query)
4488 continue # ignore non-readable or "non-joinable" fields
4489 elif order_field in self._inherit_fields:
4490 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4491 order_column = parent_obj._columns[order_field]
4492 if order_column._classic_read:
4493 inner_clause = self._inherits_join_calc(order_field, query)
4494 elif order_column._type == 'many2one':
4495 inner_clause = self._generate_m2o_order_by(order_field, query)
4497 continue # ignore non-readable or "non-joinable" fields
4499 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4500 if order_column and order_column._type == 'boolean':
4501 inner_clause = "COALESCE(%s, false)" % inner_clause
4503 if isinstance(inner_clause, list):
4504 for clause in inner_clause:
4505 order_by_elements.append("%s %s" % (clause, order_direction))
4507 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4508 if order_by_elements:
4509 order_by_clause = ",".join(order_by_elements)
4511 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4513 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4515 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4516 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4517 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4518 This is ok at the security level because this method is private and not callable through XML-RPC.
4520 :param access_rights_uid: optional user ID to use when checking access rights
4521 (not for ir.rules, this is only for ir.model.access)
4525 self.check_access_rights(cr, access_rights_uid or user, 'read')
4527 # For transient models, restrict acces to the current user, except for the super-user
4528 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4529 args = expression.AND(([('create_uid', '=', user)], args or []))
4531 query = self._where_calc(cr, user, args, context=context)
4532 self._apply_ir_rules(cr, user, query, 'read', context=context)
4533 order_by = self._generate_order_by(order, query)
4534 from_clause, where_clause, where_clause_params = query.get_sql()
4536 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4539 # Ignore order, limit and offset when just counting, they don't make sense and could
4541 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4542 cr.execute(query_str, where_clause_params)
4546 limit_str = limit and ' limit %d' % limit or ''
4547 offset_str = offset and ' offset %d' % offset or ''
4548 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4549 cr.execute(query_str, where_clause_params)
4552 # TDE note: with auto_join, we could have several lines about the same result
4553 # i.e. a lead with several unread messages; we uniquify the result using
4554 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4555 def _uniquify_list(seq):
4557 return [x for x in seq if x not in seen and not seen.add(x)]
4559 return _uniquify_list([x[0] for x in res])
4561 # returns the different values ever entered for one field
4562 # this is used, for example, in the client when the user hits enter on
4564 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4567 if field in self._inherit_fields:
4568 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4570 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4572 def copy_data(self, cr, uid, id, default=None, context=None):
4574 Copy given record's data with all its fields values
4576 :param cr: database cursor
4577 :param uid: current user id
4578 :param id: id of the record to copy
4579 :param default: field values to override in the original values of the copied record
4580 :type default: dictionary
4581 :param context: context arguments, like lang, time zone
4582 :type context: dictionary
4583 :return: dictionary containing all the field values
4589 # avoid recursion through already copied records in case of circular relationship
4590 seen_map = context.setdefault('__copy_data_seen', {})
4591 if id in seen_map.setdefault(self._name, []):
4593 seen_map[self._name].append(id)
4597 if 'state' not in default:
4598 if 'state' in self._defaults:
4599 if callable(self._defaults['state']):
4600 default['state'] = self._defaults['state'](self, cr, uid, context)
4602 default['state'] = self._defaults['state']
4604 # build a black list of fields that should not be copied
4605 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4606 def blacklist_given_fields(obj):
4607 # blacklist the fields that are given by inheritance
4608 for other, field_to_other in obj._inherits.items():
4609 blacklist.add(field_to_other)
4610 if field_to_other in default:
4611 # all the fields of 'other' are given by the record: default[field_to_other],
4612 # except the ones redefined in self
4613 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4615 blacklist_given_fields(self.pool[other])
4616 # blacklist deprecated fields
4617 for name, field in obj._columns.items():
4618 if field.deprecated:
4621 blacklist_given_fields(self)
4624 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4627 if f not in blacklist)
4629 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4633 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4636 for f, colinfo in fields_to_copy.iteritems():
4637 field = colinfo.column
4638 if field._type == 'many2one':
4639 res[f] = data[f] and data[f][0]
4640 elif field._type == 'one2many':
4641 other = self.pool[field._obj]
4642 # duplicate following the order of the ids because we'll rely on
4643 # it later for copying translations in copy_translation()!
4644 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4645 # the lines are duplicated using the wrong (old) parent, but then
4646 # are reassigned to the correct one thanks to the (0, 0, ...)
4647 res[f] = [(0, 0, line) for line in lines if line]
4648 elif field._type == 'many2many':
4649 res[f] = [(6, 0, data[f])]
4655 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4659 # avoid recursion through already copied records in case of circular relationship
4660 seen_map = context.setdefault('__copy_translations_seen',{})
4661 if old_id in seen_map.setdefault(self._name,[]):
4663 seen_map[self._name].append(old_id)
4665 trans_obj = self.pool.get('ir.translation')
4666 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4667 fields = self.fields_get(cr, uid, context=context)
4669 for field_name, field_def in fields.items():
4670 # removing the lang to compare untranslated values
4671 context_wo_lang = dict(context, lang=None)
4672 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4673 # we must recursively copy the translations for o2o and o2m
4674 if field_def['type'] == 'one2many':
4675 target_obj = self.pool[field_def['relation']]
4676 # here we rely on the order of the ids to match the translations
4677 # as foreseen in copy_data()
4678 old_children = sorted(r.id for r in old_record[field_name])
4679 new_children = sorted(r.id for r in new_record[field_name])
4680 for (old_child, new_child) in zip(old_children, new_children):
4681 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4682 # and for translatable fields we keep them for copy
4683 elif field_def.get('translate'):
4684 if field_name in self._columns:
4685 trans_name = self._name + "," + field_name
4688 elif field_name in self._inherit_fields:
4689 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4690 # get the id of the parent record to set the translation
4691 inherit_field_name = self._inherit_fields[field_name][1]
4692 target_id = new_record[inherit_field_name].id
4693 source_id = old_record[inherit_field_name].id
4697 trans_ids = trans_obj.search(cr, uid, [
4698 ('name', '=', trans_name),
4699 ('res_id', '=', source_id)
4701 user_lang = context.get('lang')
4702 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4704 # remove source to avoid triggering _set_src
4705 del record['source']
4706 record.update({'res_id': target_id})
4707 if user_lang and user_lang == record['lang']:
4708 # 'source' to force the call to _set_src
4709 # 'value' needed if value is changed in copy(), want to see the new_value
4710 record['source'] = old_record[field_name]
4711 record['value'] = new_record[field_name]
4712 trans_obj.create(cr, uid, record, context=context)
4714 @api.returns('self', lambda value: value.id)
4715 def copy(self, cr, uid, id, default=None, context=None):
4716 """ copy(default=None)
4718 Duplicate record with given id updating it with default values
4720 :param dict default: dictionary of field values to override in the
4721 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4722 :returns: new record
4727 context = context.copy()
4728 data = self.copy_data(cr, uid, id, default, context)
4729 new_id = self.create(cr, uid, data, context)
4730 self.copy_translations(cr, uid, id, new_id, context)
4734 @api.returns('self')
4736 """ exists() -> records
4738 Returns the subset of records in `self` that exist, and marks deleted
4739 records as such in cache. It can be used as a test on records::
4744 By convention, new records are returned as existing.
4746 ids = filter(None, self._ids) # ids to check in database
4749 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4750 self._cr.execute(query, (ids,))
4751 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4752 [id for id in self._ids if not id]) # new ids
4753 existing = self.browse(ids)
4754 if len(existing) < len(self):
4755 # mark missing records in cache with a failed value
4756 exc = MissingError(_("Record does not exist or has been deleted."))
4757 (self - existing)._cache.update(FailedValue(exc))
4760 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4761 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4763 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4764 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4765 return self._check_recursion(cr, uid, ids, context, parent)
4767 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4769 Verifies that there is no loop in a hierarchical structure of records,
4770 by following the parent relationship using the **parent** field until a loop
4771 is detected or until a top-level record is found.
4773 :param cr: database cursor
4774 :param uid: current user id
4775 :param ids: list of ids of records to check
4776 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4777 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4780 parent = self._parent_name
4782 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4783 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4786 while current_id is not None:
4787 cr.execute(query, (current_id,))
4788 result = cr.fetchone()
4789 current_id = result[0] if result else None
4790 if current_id == id:
4794 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4796 Verifies that there is no loop in a hierarchical structure of records,
4797 by following the parent relationship using the **parent** field until a loop
4798 is detected or until a top-level record is found.
4800 :param cr: database cursor
4801 :param uid: current user id
4802 :param ids: list of ids of records to check
4803 :param field_name: field to check
4804 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4807 field = self._all_columns.get(field_name)
4808 field = field.column if field else None
4809 if not field or field._type != 'many2many' or field._obj != self._name:
4810 # field must be a many2many on itself
4811 raise ValueError('invalid field_name: %r' % (field_name,))
4813 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4817 for i in range(0, len(ids_parent), cr.IN_MAX):
4819 sub_ids_parent = ids_parent[i:j]
4820 cr.execute(query, (tuple(sub_ids_parent),))
4821 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4822 ids_parent = ids_parent2
4823 for i in ids_parent:
4828 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4829 """Retrieve the External ID(s) of any database record.
4831 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4833 :return: map of ids to the list of their fully qualified External IDs
4834 in the form ``module.key``, or an empty list when there's no External
4835 ID for a record, e.g.::
4837 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4840 ir_model_data = self.pool.get('ir.model.data')
4841 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4842 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4845 # can't use dict.fromkeys() as the list would be shared!
4847 for record in data_results:
4848 result[record['res_id']].append('%(module)s.%(name)s' % record)
4851 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4852 """Retrieve the External ID of any database record, if there
4853 is one. This method works as a possible implementation
4854 for a function field, to be able to add it to any
4855 model object easily, referencing it as ``Model.get_external_id``.
4857 When multiple External IDs exist for a record, only one
4858 of them is returned (randomly).
4860 :return: map of ids to their fully qualified XML ID,
4861 defaulting to an empty string when there's none
4862 (to be usable as a function field),
4865 { 'id': 'module.ext_id',
4868 results = self._get_xml_ids(cr, uid, ids)
4869 for k, v in results.iteritems():
4876 # backwards compatibility
4877 get_xml_id = get_external_id
4878 _get_xml_ids = _get_external_ids
4880 def print_report(self, cr, uid, ids, name, data, context=None):
4882 Render the report `name` for the given IDs. The report must be defined
4883 for this model, not another.
4885 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4886 assert self._name == report.table
4887 return report.create(cr, uid, ids, data, context)
4891 def is_transient(cls):
4892 """ Return whether the model is transient.
4894 See :class:`TransientModel`.
4897 return cls._transient
4899 def _transient_clean_rows_older_than(self, cr, seconds):
4900 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4901 # Never delete rows used in last 5 minutes
4902 seconds = max(seconds, 300)
4903 query = ("SELECT id FROM " + self._table + " WHERE"
4904 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4905 " < ((now() at time zone 'UTC') - interval %s)")
4906 cr.execute(query, ("%s seconds" % seconds,))
4907 ids = [x[0] for x in cr.fetchall()]
4908 self.unlink(cr, SUPERUSER_ID, ids)
4910 def _transient_clean_old_rows(self, cr, max_count):
4911 # Check how many rows we have in the table
4912 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4914 if res[0][0] <= max_count:
4915 return # max not reached, nothing to do
4916 self._transient_clean_rows_older_than(cr, 300)
4918 def _transient_vacuum(self, cr, uid, force=False):
4919 """Clean the transient records.
4921 This unlinks old records from the transient model tables whenever the
4922 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4923 Actual cleaning will happen only once every "_transient_check_time" calls.
4924 This means this method can be called frequently called (e.g. whenever
4925 a new record is created).
4926 Example with both max_hours and max_count active:
4927 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4928 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4929 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4930 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4931 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4932 would immediately cause the maximum to be reached again.
4933 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4935 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4936 _transient_check_time = 20 # arbitrary limit on vacuum executions
4937 self._transient_check_count += 1
4938 if not force and (self._transient_check_count < _transient_check_time):
4939 return True # no vacuum cleaning this time
4940 self._transient_check_count = 0
4942 # Age-based expiration
4943 if self._transient_max_hours:
4944 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4946 # Count-based expiration
4947 if self._transient_max_count:
4948 self._transient_clean_old_rows(cr, self._transient_max_count)
4952 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4953 """ Serializes one2many and many2many commands into record dictionaries
4954 (as if all the records came from the database via a read()). This
4955 method is aimed at onchange methods on one2many and many2many fields.
4957 Because commands might be creation commands, not all record dicts
4958 will contain an ``id`` field. Commands matching an existing record
4959 will have an ``id``.
4961 :param field_name: name of the one2many or many2many field matching the commands
4962 :type field_name: str
4963 :param commands: one2many or many2many commands to execute on ``field_name``
4964 :type commands: list((int|False, int|False, dict|False))
4965 :param fields: list of fields to read from the database, when applicable
4966 :type fields: list(str)
4967 :returns: records in a shape similar to that returned by ``read()``
4968 (except records may be missing the ``id`` field if they don't exist in db)
4971 result = [] # result (list of dict)
4972 record_ids = [] # ids of records to read
4973 updates = {} # {id: dict} of updates on particular records
4975 for command in commands or []:
4976 if not isinstance(command, (list, tuple)):
4977 record_ids.append(command)
4978 elif command[0] == 0:
4979 result.append(command[2])
4980 elif command[0] == 1:
4981 record_ids.append(command[1])
4982 updates.setdefault(command[1], {}).update(command[2])
4983 elif command[0] in (2, 3):
4984 record_ids = [id for id in record_ids if id != command[1]]
4985 elif command[0] == 4:
4986 record_ids.append(command[1])
4987 elif command[0] == 5:
4988 result, record_ids = [], []
4989 elif command[0] == 6:
4990 result, record_ids = [], list(command[2])
4992 # read the records and apply the updates
4993 other_model = self.pool[self._all_columns[field_name].column._obj]
4994 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4995 record.update(updates.get(record['id'], {}))
4996 result.append(record)
5000 # for backward compatibility
5001 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5003 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5005 Performs a ``search()`` followed by a ``read()``.
5007 :param cr: database cursor
5008 :param user: current user id
5009 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5010 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5011 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5012 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5013 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5014 :param context: context arguments.
5015 :return: List of dictionaries containing the asked fields.
5016 :rtype: List of dictionaries.
5019 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5023 if fields and fields == ['id']:
5024 # shortcut read if we only want the ids
5025 return [{'id': id} for id in record_ids]
5027 # read() ignores active_test, but it would forward it to any downstream search call
5028 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5029 # was presumably only meant for the main search().
5030 # TODO: Move this to read() directly?
5031 read_ctx = dict(context or {})
5032 read_ctx.pop('active_test', None)
5034 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5035 if len(result) <= 1:
5039 index = dict((r['id'], r) for r in result)
5040 return [index[x] for x in record_ids if x in index]
5042 def _register_hook(self, cr):
5043 """ stuff to do right after the registry is built """
5047 def _patch_method(cls, name, method):
5048 """ Monkey-patch a method for all instances of this model. This replaces
5049 the method called `name` by `method` in the given class.
5050 The original method is then accessible via ``method.origin``, and it
5051 can be restored with :meth:`~._revert_method`.
5056 def do_write(self, values):
5057 # do stuff, and call the original method
5058 return do_write.origin(self, values)
5060 # patch method write of model
5061 model._patch_method('write', do_write)
5063 # this will call do_write
5064 records = model.search([...])
5067 # restore the original method
5068 model._revert_method('write')
5070 origin = getattr(cls, name)
5071 method.origin = origin
5072 # propagate decorators from origin to method, and apply api decorator
5073 wrapped = api.guess(api.propagate(origin, method))
5074 wrapped.origin = origin
5075 setattr(cls, name, wrapped)
5078 def _revert_method(cls, name):
5079 """ Revert the original method called `name` in the given class.
5080 See :meth:`~._patch_method`.
5082 method = getattr(cls, name)
5083 setattr(cls, name, method.origin)
5088 # An instance represents an ordered collection of records in a given
5089 # execution environment. The instance object refers to the environment, and
5090 # the records themselves are represented by their cache dictionary. The 'id'
5091 # of each record is found in its corresponding cache dictionary.
5093 # This design has the following advantages:
5094 # - cache access is direct and thus fast;
5095 # - one can consider records without an 'id' (see new records);
5096 # - the global cache is only an index to "resolve" a record 'id'.
5100 def _browse(cls, env, ids):
5101 """ Create an instance attached to `env`; `ids` is a tuple of record
5104 records = object.__new__(cls)
5107 env.prefetch[cls._name].update(ids)
5111 def browse(self, cr, uid, arg=None, context=None):
5112 ids = _normalize_ids(arg)
5113 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5114 return self._browse(Environment(cr, uid, context or {}), ids)
5117 def browse(self, arg=None):
5118 """ browse([ids]) -> records
5120 Returns a recordset for the ids provided as parameter in the current
5123 Can take no ids, a single id or a sequence of ids.
5125 ids = _normalize_ids(arg)
5126 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5127 return self._browse(self.env, ids)
5130 # Internal properties, for manipulating the instance's implementation
5135 """ List of actual record ids in this recordset (ignores placeholder
5136 ids for records to create)
5138 return filter(None, list(self._ids))
5140 # backward-compatibility with former browse records
5141 _cr = property(lambda self: self.env.cr)
5142 _uid = property(lambda self: self.env.uid)
5143 _context = property(lambda self: self.env.context)
5146 # Conversion methods
5149 def ensure_one(self):
5150 """ Verifies that the current recorset holds a single record. Raises
5151 an exception otherwise.
5155 raise except_orm("ValueError", "Expected singleton: %s" % self)
5157 def with_env(self, env):
5158 """ Returns a new version of this recordset attached to the provided
5161 :type env: :class:`~openerp.api.Environment`
5163 return self._browse(env, self._ids)
5165 def sudo(self, user=SUPERUSER_ID):
5166 """ sudo([user=SUPERUSER])
5168 Returns a new version of this recordset attached to the provided
5171 return self.with_env(self.env(user=user))
5173 def with_context(self, *args, **kwargs):
5174 """ with_context([context][, **overrides]) -> records
5176 Returns a new version of this recordset attached to an extended
5179 The extended context is either the provided ``context`` in which
5180 ``overrides`` are merged or the *current* context in which
5181 ``overrides`` are merged e.g.::
5183 # current context is {'key1': True}
5184 r2 = records.with_context({}, key2=True)
5185 # -> r2._context is {'key2': True}
5186 r2 = records.with_context(key2=True)
5187 # -> r2._context is {'key1': True, 'key2': True}
5189 context = dict(args[0] if args else self._context, **kwargs)
5190 return self.with_env(self.env(context=context))
5192 def _convert_to_cache(self, values, update=False, validate=True):
5193 """ Convert the `values` dictionary into cached values.
5195 :param update: whether the conversion is made for updating `self`;
5196 this is necessary for interpreting the commands of *2many fields
5197 :param validate: whether values must be checked
5199 fields = self._fields
5200 target = self if update else self.browse()
5202 name: fields[name].convert_to_cache(value, target, validate=validate)
5203 for name, value in values.iteritems()
5207 def _convert_to_write(self, values):
5208 """ Convert the `values` dictionary into the format of :meth:`write`. """
5209 fields = self._fields
5211 for name, value in values.iteritems():
5213 value = fields[name].convert_to_write(value)
5214 if not isinstance(value, NewId):
5215 result[name] = value
5219 # Record traversal and update
5222 def _mapped_func(self, func):
5223 """ Apply function `func` on all records in `self`, and return the
5224 result as a list or a recordset (if `func` return recordsets).
5226 vals = [func(rec) for rec in self]
5227 val0 = vals[0] if vals else func(self)
5228 if isinstance(val0, BaseModel):
5229 return reduce(operator.or_, vals, val0)
5232 def mapped(self, func):
5233 """ Apply `func` on all records in `self`, and return the result as a
5234 list or a recordset (if `func` return recordsets). In the latter
5235 case, the order of the returned recordset is arbritrary.
5237 :param func: a function or a dot-separated sequence of field names
5239 if isinstance(func, basestring):
5241 for name in func.split('.'):
5242 recs = recs._mapped_func(operator.itemgetter(name))
5245 return self._mapped_func(func)
5247 def _mapped_cache(self, name_seq):
5248 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5249 field names, and only cached values are used.
5252 for name in name_seq.split('.'):
5253 field = recs._fields[name]
5254 null = field.null(self.env)
5255 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5258 def filtered(self, func):
5259 """ Select the records in `self` such that `func(rec)` is true, and
5260 return them as a recordset.
5262 :param func: a function or a dot-separated sequence of field names
5264 if isinstance(func, basestring):
5266 func = lambda rec: filter(None, rec.mapped(name))
5267 return self.browse([rec.id for rec in self if func(rec)])
5269 def sorted(self, key=None):
5270 """ Return the recordset `self` ordered by `key` """
5272 return self.search([('id', 'in', self.ids)])
5274 return self.browse(map(int, sorted(self, key=key)))
5276 def update(self, values):
5277 """ Update record `self[0]` with `values`. """
5278 for name, value in values.iteritems():
5282 # New records - represent records that do not exist in the database yet;
5283 # they are used to perform onchanges.
5287 def new(self, values={}):
5288 """ new([values]) -> record
5290 Return a new record instance attached to the current environment and
5291 initialized with the provided ``value``. The record is *not* created
5292 in database, it only exists in memory.
5294 record = self.browse([NewId()])
5295 record._cache.update(record._convert_to_cache(values, update=True))
5297 if record.env.in_onchange:
5298 # The cache update does not set inverse fields, so do it manually.
5299 # This is useful for computing a function field on secondary
5300 # records, if that field depends on the main record.
5302 field = self._fields.get(name)
5304 for invf in field.inverse_fields:
5305 invf._update(record[name], record)
5310 # Dirty flag, to mark records modified (in draft mode)
5315 """ Return whether any record in `self` is dirty. """
5316 dirty = self.env.dirty
5317 return any(record in dirty for record in self)
5320 def _dirty(self, value):
5321 """ Mark the records in `self` as dirty. """
5323 map(self.env.dirty.add, self)
5325 map(self.env.dirty.discard, self)
5331 def __nonzero__(self):
5332 """ Test whether `self` is nonempty. """
5333 return bool(getattr(self, '_ids', True))
5336 """ Return the size of `self`. """
5337 return len(self._ids)
5340 """ Return an iterator over `self`. """
5341 for id in self._ids:
5342 yield self._browse(self.env, (id,))
5344 def __contains__(self, item):
5345 """ Test whether `item` (record or field name) is an element of `self`.
5346 In the first case, the test is fully equivalent to::
5348 any(item == record for record in self)
5350 if isinstance(item, BaseModel) and self._name == item._name:
5351 return len(item) == 1 and item.id in self._ids
5352 elif isinstance(item, basestring):
5353 return item in self._fields
5355 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5357 def __add__(self, other):
5358 """ Return the concatenation of two recordsets. """
5359 if not isinstance(other, BaseModel) or self._name != other._name:
5360 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5361 return self.browse(self._ids + other._ids)
5363 def __sub__(self, other):
5364 """ Return the recordset of all the records in `self` that are not in `other`. """
5365 if not isinstance(other, BaseModel) or self._name != other._name:
5366 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5367 other_ids = set(other._ids)
5368 return self.browse([id for id in self._ids if id not in other_ids])
5370 def __and__(self, other):
5371 """ Return the intersection of two recordsets.
5372 Note that recordset order is not preserved.
5374 if not isinstance(other, BaseModel) or self._name != other._name:
5375 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5376 return self.browse(set(self._ids) & set(other._ids))
5378 def __or__(self, other):
5379 """ Return the union of two recordsets.
5380 Note that recordset order is not preserved.
5382 if not isinstance(other, BaseModel) or self._name != other._name:
5383 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5384 return self.browse(set(self._ids) | set(other._ids))
5386 def __eq__(self, other):
5387 """ Test whether two recordsets are equivalent (up to reordering). """
5388 if not isinstance(other, BaseModel):
5390 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5392 return self._name == other._name and set(self._ids) == set(other._ids)
5394 def __ne__(self, other):
5395 return not self == other
5397 def __lt__(self, other):
5398 if not isinstance(other, BaseModel) or self._name != other._name:
5399 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5400 return set(self._ids) < set(other._ids)
5402 def __le__(self, other):
5403 if not isinstance(other, BaseModel) or self._name != other._name:
5404 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5405 return set(self._ids) <= set(other._ids)
5407 def __gt__(self, other):
5408 if not isinstance(other, BaseModel) or self._name != other._name:
5409 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5410 return set(self._ids) > set(other._ids)
5412 def __ge__(self, other):
5413 if not isinstance(other, BaseModel) or self._name != other._name:
5414 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5415 return set(self._ids) >= set(other._ids)
5421 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5423 def __unicode__(self):
5424 return unicode(str(self))
5429 if hasattr(self, '_ids'):
5430 return hash((self._name, frozenset(self._ids)))
5432 return hash(self._name)
5434 def __getitem__(self, key):
5435 """ If `key` is an integer or a slice, return the corresponding record
5436 selection as an instance (attached to `self.env`).
5437 Otherwise read the field `key` of the first record in `self`.
5441 inst = model.search(dom) # inst is a recordset
5442 r4 = inst[3] # fourth record in inst
5443 rs = inst[10:20] # subset of inst
5444 nm = rs['name'] # name of first record in inst
5446 if isinstance(key, basestring):
5447 # important: one must call the field's getter
5448 return self._fields[key].__get__(self, type(self))
5449 elif isinstance(key, slice):
5450 return self._browse(self.env, self._ids[key])
5452 return self._browse(self.env, (self._ids[key],))
5454 def __setitem__(self, key, value):
5455 """ Assign the field `key` to `value` in record `self`. """
5456 # important: one must call the field's setter
5457 return self._fields[key].__set__(self, value)
5460 # Cache and recomputation management
5465 """ Return the cache of `self`, mapping field names to values. """
5466 return RecordCache(self)
5469 def _in_cache_without(self, field):
5470 """ Make sure `self` is present in cache (for prefetching), and return
5471 the records of model `self` in cache that have no value for `field`
5472 (:class:`Field` instance).
5475 prefetch_ids = env.prefetch[self._name]
5476 prefetch_ids.update(self._ids)
5477 ids = filter(None, prefetch_ids - set(env.cache[field]))
5478 return self.browse(ids)
5482 """ Clear the records cache.
5485 The record cache is automatically invalidated.
5487 self.invalidate_cache()
5490 def invalidate_cache(self, fnames=None, ids=None):
5491 """ Invalidate the record caches after some records have been modified.
5492 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5494 :param fnames: the list of modified fields, or ``None`` for all fields
5495 :param ids: the list of modified record ids, or ``None`` for all
5499 return self.env.invalidate_all()
5500 fields = self._fields.values()
5502 fields = map(self._fields.__getitem__, fnames)
5504 # invalidate fields and inverse fields, too
5505 spec = [(f, ids) for f in fields] + \
5506 [(invf, None) for f in fields for invf in f.inverse_fields]
5507 self.env.invalidate(spec)
5510 def modified(self, fnames):
5511 """ Notify that fields have been modified on `self`. This invalidates
5512 the cache, and prepares the recomputation of stored function fields
5513 (new-style fields only).
5515 :param fnames: iterable of field names that have been modified on
5518 # each field knows what to invalidate and recompute
5520 for fname in fnames:
5521 spec += self._fields[fname].modified(self)
5525 for env in self.env.all
5526 for field in env.cache
5528 # invalidate non-stored fields.function which are currently cached
5529 spec += [(f, None) for f in self.pool.pure_function_fields
5530 if f in cached_fields]
5532 self.env.invalidate(spec)
5534 def _recompute_check(self, field):
5535 """ If `field` must be recomputed on some record in `self`, return the
5536 corresponding records that must be recomputed.
5538 return self.env.check_todo(field, self)
5540 def _recompute_todo(self, field):
5541 """ Mark `field` to be recomputed. """
5542 self.env.add_todo(field, self)
5544 def _recompute_done(self, field):
5545 """ Mark `field` as recomputed. """
5546 self.env.remove_todo(field, self)
5549 def recompute(self):
5550 """ Recompute stored function fields. The fields and records to
5551 recompute have been determined by method :meth:`modified`.
5553 while self.env.has_todo():
5554 field, recs = self.env.get_todo()
5555 # evaluate the fields to recompute, and save them to database
5556 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5558 values = rec._convert_to_write({
5559 f.name: rec[f.name] for f in field.computed_fields
5562 except MissingError:
5564 # mark the computed fields as done
5565 map(recs._recompute_done, field.computed_fields)
5568 # Generic onchange method
5571 def _has_onchange(self, field, other_fields):
5572 """ Return whether `field` should trigger an onchange event in the
5573 presence of `other_fields`.
5575 # test whether self has an onchange method for field, or field is a
5576 # dependency of any field in other_fields
5577 return field.name in self._onchange_methods or \
5578 any(dep in other_fields for dep in field.dependents)
5581 def _onchange_spec(self, view_info=None):
5582 """ Return the onchange spec from a view description; if not given, the
5583 result of ``self.fields_view_get()`` is used.
5587 # for traversing the XML arch and populating result
5588 def process(node, info, prefix):
5589 if node.tag == 'field':
5590 name = node.attrib['name']
5591 names = "%s.%s" % (prefix, name) if prefix else name
5592 if not result.get(names):
5593 result[names] = node.attrib.get('on_change')
5594 # traverse the subviews included in relational fields
5595 for subinfo in info['fields'][name].get('views', {}).itervalues():
5596 process(etree.fromstring(subinfo['arch']), subinfo, names)
5599 process(child, info, prefix)
5601 if view_info is None:
5602 view_info = self.fields_view_get()
5603 process(etree.fromstring(view_info['arch']), view_info, '')
5606 def _onchange_eval(self, field_name, onchange, result):
5607 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5608 on record `self`. Value assignments are applied on `self`, while
5609 domain and warning messages are put in dictionary `result`.
5611 onchange = onchange.strip()
5614 if onchange in ("1", "true"):
5615 for method in self._onchange_methods.get(field_name, ()):
5616 method_res = method(self)
5619 if 'domain' in method_res:
5620 result.setdefault('domain', {}).update(method_res['domain'])
5621 if 'warning' in method_res:
5622 result['warning'] = method_res['warning']
5626 match = onchange_v7.match(onchange)
5628 method, params = match.groups()
5630 # evaluate params -> tuple
5631 global_vars = {'context': self._context, 'uid': self._uid}
5632 if self._context.get('field_parent'):
5633 class RawRecord(object):
5634 def __init__(self, record):
5635 self._record = record
5636 def __getattr__(self, name):
5637 field = self._record._fields[name]
5638 value = self._record[name]
5639 return field.convert_to_onchange(value)
5640 record = self[self._context['field_parent']]
5641 global_vars['parent'] = RawRecord(record)
5643 key: self._fields[key].convert_to_onchange(val)
5644 for key, val in self._cache.iteritems()
5646 params = eval("[%s]" % params, global_vars, field_vars)
5648 # call onchange method
5649 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5650 method_res = getattr(self._model, method)(*args)
5651 if not isinstance(method_res, dict):
5653 if 'value' in method_res:
5654 method_res['value'].pop('id', None)
5655 self.update(self._convert_to_cache(method_res['value'], validate=False))
5656 if 'domain' in method_res:
5657 result.setdefault('domain', {}).update(method_res['domain'])
5658 if 'warning' in method_res:
5659 result['warning'] = method_res['warning']
5662 def onchange(self, values, field_name, field_onchange):
5663 """ Perform an onchange on the given field.
5665 :param values: dictionary mapping field names to values, giving the
5666 current state of modification
5667 :param field_name: name of the modified field_name
5668 :param field_onchange: dictionary mapping field names to their
5673 if field_name and field_name not in self._fields:
5676 # determine subfields for field.convert_to_write() below
5678 subfields = defaultdict(set)
5679 for dotname in field_onchange:
5681 secondary.append(dotname)
5682 name, subname = dotname.split('.')
5683 subfields[name].add(subname)
5685 # create a new record with values, and attach `self` to it
5686 with env.do_in_onchange():
5687 record = self.new(values)
5688 values = dict(record._cache)
5689 # attach `self` with a different context (for cache consistency)
5690 record._origin = self.with_context(__onchange=True)
5692 # determine which field should be triggered an onchange
5693 todo = set([field_name]) if field_name else set(values)
5696 # dummy assignment: trigger invalidations on the record
5698 value = record[name]
5699 field = self._fields[name]
5700 if not field_name and field.type == 'many2one' and field.delegate and not value:
5701 # do not nullify all fields of parent record for new records
5703 record[name] = value
5705 result = {'value': {}}
5713 with env.do_in_onchange():
5714 # apply field-specific onchange methods
5715 if field_onchange.get(name):
5716 record._onchange_eval(name, field_onchange[name], result)
5718 # force re-evaluation of function fields on secondary records
5719 for field_seq in secondary:
5720 record.mapped(field_seq)
5722 # determine which fields have been modified
5723 for name, oldval in values.iteritems():
5724 field = self._fields[name]
5725 newval = record[name]
5726 if field.type in ('one2many', 'many2many'):
5727 if newval != oldval or newval._dirty:
5728 # put new value in result
5729 result['value'][name] = field.convert_to_write(
5730 newval, record._origin, subfields.get(name),
5734 # keep result: newval may have been dirty before
5737 if newval != oldval:
5738 # put new value in result
5739 result['value'][name] = field.convert_to_write(
5740 newval, record._origin, subfields.get(name),
5744 # clean up result to not return another value
5745 result['value'].pop(name, None)
5747 # At the moment, the client does not support updates on a *2many field
5748 # while this one is modified by the user.
5749 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5750 result['value'].pop(field_name, None)
5755 class RecordCache(MutableMapping):
5756 """ Implements a proxy dictionary to read/update the cache of a record.
5757 Upon iteration, it looks like a dictionary mapping field names to
5758 values. However, fields may be used as keys as well.
5760 def __init__(self, records):
5761 self._recs = records
5763 def contains(self, field):
5764 """ Return whether `records[0]` has a value for `field` in cache. """
5765 if isinstance(field, basestring):
5766 field = self._recs._fields[field]
5767 return self._recs.id in self._recs.env.cache[field]
5769 def __contains__(self, field):
5770 """ Return whether `records[0]` has a regular value for `field` in cache. """
5771 if isinstance(field, basestring):
5772 field = self._recs._fields[field]
5773 dummy = SpecialValue(None)
5774 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5775 return not isinstance(value, SpecialValue)
5777 def __getitem__(self, field):
5778 """ Return the cached value of `field` for `records[0]`. """
5779 if isinstance(field, basestring):
5780 field = self._recs._fields[field]
5781 value = self._recs.env.cache[field][self._recs.id]
5782 return value.get() if isinstance(value, SpecialValue) else value
5784 def __setitem__(self, field, value):
5785 """ Assign the cached value of `field` for all records in `records`. """
5786 if isinstance(field, basestring):
5787 field = self._recs._fields[field]
5788 values = dict.fromkeys(self._recs._ids, value)
5789 self._recs.env.cache[field].update(values)
5791 def update(self, *args, **kwargs):
5792 """ Update the cache of all records in `records`. If the argument is a
5793 `SpecialValue`, update all fields (except "magic" columns).
5795 if args and isinstance(args[0], SpecialValue):
5796 values = dict.fromkeys(self._recs._ids, args[0])
5797 for name, field in self._recs._fields.iteritems():
5799 self._recs.env.cache[field].update(values)
5801 return super(RecordCache, self).update(*args, **kwargs)
5803 def __delitem__(self, field):
5804 """ Remove the cached value of `field` for all `records`. """
5805 if isinstance(field, basestring):
5806 field = self._recs._fields[field]
5807 field_cache = self._recs.env.cache[field]
5808 for id in self._recs._ids:
5809 field_cache.pop(id, None)
5812 """ Iterate over the field names with a regular value in cache. """
5813 cache, id = self._recs.env.cache, self._recs.id
5814 dummy = SpecialValue(None)
5815 for name, field in self._recs._fields.iteritems():
5816 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5820 """ Return the number of fields with a regular value in cache. """
5821 return sum(1 for name in self)
5823 class Model(BaseModel):
5824 """Main super-class for regular database-persisted OpenERP models.
5826 OpenERP models are created by inheriting from this class::
5831 The system will later instantiate the class once per database (on
5832 which the class' module is installed).
5835 _register = False # not visible in ORM registry, meant to be python-inherited only
5836 _transient = False # True in a TransientModel
5838 class TransientModel(BaseModel):
5839 """Model super-class for transient records, meant to be temporarily
5840 persisted, and regularly vaccuum-cleaned.
5842 A TransientModel has a simplified access rights management,
5843 all users can create new records, and may only access the
5844 records they created. The super-user has unrestricted access
5845 to all TransientModel records.
5848 _register = False # not visible in ORM registry, meant to be python-inherited only
5851 class AbstractModel(BaseModel):
5852 """Abstract Model super-class for creating an abstract class meant to be
5853 inherited by regular models (Models or TransientModels) but not meant to
5854 be usable on its own, or persisted.
5856 Technical note: we don't want to make AbstractModel the super-class of
5857 Model or BaseModel because it would not make sense to put the main
5858 definition of persistence methods such as create() in it, and still we
5859 should be able to override them within an AbstractModel.
5861 _auto = False # don't create any database backend for AbstractModels
5862 _register = False # not visible in ORM registry, meant to be python-inherited only
5865 def itemgetter_tuple(items):
5866 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5867 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5872 return lambda gettable: (gettable[items[0]],)
5873 return operator.itemgetter(*items)
5875 def convert_pgerror_23502(model, fields, info, e):
5876 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5877 r'not-null constraint\n',
5879 field_name = m and m.group('field')
5880 if not m or field_name not in fields:
5881 return {'message': unicode(e)}
5882 message = _(u"Missing required value for the field '%s'.") % field_name
5883 field = fields.get(field_name)
5885 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5888 'field': field_name,
5891 def convert_pgerror_23505(model, fields, info, e):
5892 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5894 field_name = m and m.group('field')
5895 if not m or field_name not in fields:
5896 return {'message': unicode(e)}
5897 message = _(u"The value for the field '%s' already exists.") % field_name
5898 field = fields.get(field_name)
5900 message = _(u"%s This might be '%s' in the current model, or a field "
5901 u"of the same name in an o2m.") % (message, field['string'])
5904 'field': field_name,
5907 PGERROR_TO_OE = defaultdict(
5908 # shape of mapped converters
5909 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5910 # not_null_violation
5911 '23502': convert_pgerror_23502,
5912 # unique constraint error
5913 '23505': convert_pgerror_23505,
5916 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5917 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5919 Various implementations were tested on the corpus of all browse() calls
5920 performed during a full crawler run (after having installed all website_*
5921 modules) and this one was the most efficient overall.
5923 A possible bit of correctness was sacrificed by not doing any test on
5924 Iterable and just assuming that any non-atomic type was an iterable of
5929 # much of the corpus is falsy objects (empty list, tuple or set, None)
5933 # `type in set` is significantly faster (because more restrictive) than
5934 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5935 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5936 # (and looks much worse) in most cases, but over millions of calls it
5937 # does have a very minor effect.
5938 if arg.__class__ in atoms:
5943 # keep those imports here to avoid dependency cycle errors
5944 from .osv import expression
5945 from .fields import Field, SpecialValue, FailedValue
5947 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: