1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
52 from collections import defaultdict, MutableMapping
53 from inspect import getmembers
56 import dateutil.relativedelta
58 from lxml import etree
61 from . import SUPERUSER_ID
64 from .api import Environment
65 from .exceptions import except_orm, AccessError, MissingError, ValidationError
66 from .osv import fields
67 from .osv.query import Query
68 from .tools import lazy_property, ormcache
69 from .tools.config import config
70 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
71 from .tools.safe_eval import safe_eval as eval
72 from .tools.translate import _
74 _logger = logging.getLogger(__name__)
75 _schema = logging.getLogger(__name__ + '.schema')
77 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
78 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
79 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
81 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
84 def check_object_name(name):
85 """ Check if the given name is a valid openerp object name.
87 The _name attribute in osv and osv_memory object is subject to
88 some restrictions. This function returns True or False whether
89 the given name is allowed or not.
91 TODO: this is an approximation. The goal in this approximation
92 is to disallow uppercase characters (in some places, we quote
93 table/column names and in other not, which leads to this kind
96 psycopg2.ProgrammingError: relation "xxx" does not exist).
98 The same restriction should apply to both osv and osv_memory
99 objects for consistency.
102 if regex_object_name.match(name) is None:
106 def raise_on_invalid_object_name(name):
107 if not check_object_name(name):
108 msg = "The _name attribute %s is not valid." % name
110 raise except_orm('ValueError', msg)
112 POSTGRES_CONFDELTYPES = {
120 def intersect(la, lb):
121 return filter(lambda x: x in lb, la)
124 """ Test whether functions `f` and `g` are identical or have the same name """
125 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
127 def fix_import_export_id_paths(fieldname):
129 Fixes the id fields in import and exports, and splits field paths
132 :param str fieldname: name of the field to import/export
133 :return: split field name
136 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
137 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
138 return fixed_external_id.split('/')
140 def pg_varchar(size=0):
141 """ Returns the VARCHAR declaration for the provided size:
143 * If no size (or an empty or negative size is provided) return an
145 * Otherwise return a VARCHAR(n)
147 :type int size: varchar size, optional
151 if not isinstance(size, int):
152 raise TypeError("VARCHAR parameter should be an int, got %s"
155 return 'VARCHAR(%d)' % size
158 FIELDS_TO_PGTYPES = {
159 fields.boolean: 'bool',
160 fields.integer: 'int4',
164 fields.datetime: 'timestamp',
165 fields.binary: 'bytea',
166 fields.many2one: 'int4',
167 fields.serialized: 'text',
170 def get_pg_type(f, type_override=None):
172 :param fields._column f: field to get a Postgres type for
173 :param type type_override: use the provided type for dispatching instead of the field's own type
174 :returns: (postgres_identification_type, postgres_type_specification)
177 field_type = type_override or type(f)
179 if field_type in FIELDS_TO_PGTYPES:
180 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
181 elif issubclass(field_type, fields.float):
183 pg_type = ('numeric', 'NUMERIC')
185 pg_type = ('float8', 'DOUBLE PRECISION')
186 elif issubclass(field_type, (fields.char, fields.reference)):
187 pg_type = ('varchar', pg_varchar(f.size))
188 elif issubclass(field_type, fields.selection):
189 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
190 or getattr(f, 'size', None) == -1:
191 pg_type = ('int4', 'INTEGER')
193 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
194 elif issubclass(field_type, fields.function):
195 if f._type == 'selection':
196 pg_type = ('varchar', pg_varchar())
198 pg_type = get_pg_type(f, getattr(fields, f._type))
200 _logger.warning('%s type not supported!', field_type)
206 class MetaModel(api.Meta):
207 """ Metaclass for the models.
209 This class is used as the metaclass for the class :class:`BaseModel` to
210 discover the models defined in a module (without instanciating them).
211 If the automatic discovery is not needed, it is possible to set the model's
212 ``_register`` attribute to False.
216 module_to_models = {}
218 def __init__(self, name, bases, attrs):
219 if not self._register:
220 self._register = True
221 super(MetaModel, self).__init__(name, bases, attrs)
224 if not hasattr(self, '_module'):
225 # The (OpenERP) module name can be in the `openerp.addons` namespace
226 # or not. For instance, module `sale` can be imported as
227 # `openerp.addons.sale` (the right way) or `sale` (for backward
229 module_parts = self.__module__.split('.')
230 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
231 module_name = self.__module__.split('.')[2]
233 module_name = self.__module__.split('.')[0]
234 self._module = module_name
236 # Remember which models to instanciate for this module.
238 self.module_to_models.setdefault(self._module, []).append(self)
242 """ Pseudo-ids for new records. """
243 def __nonzero__(self):
246 IdType = (int, long, basestring, NewId)
249 # maximum number of prefetched records
252 # special columns automatically created by the ORM
253 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
254 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
256 class BaseModel(object):
257 """ Base class for OpenERP models.
259 OpenERP models are created by inheriting from this class' subclasses:
261 * :class:`Model` for regular database-persisted models
263 * :class:`TransientModel` for temporary data, stored in the database but
264 automatically vaccuumed every so often
266 * :class:`AbstractModel` for abstract super classes meant to be shared by
267 multiple inheriting model
269 The system automatically instantiates every model once per database. Those
270 instances represent the available models on each database, and depend on
271 which modules are installed on that database. The actual class of each
272 instance is built from the Python classes that create and inherit from the
275 Every model instance is a "recordset", i.e., an ordered collection of
276 records of the model. Recordsets are returned by methods like
277 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
278 explicit representation: a record is represented as a recordset of one
281 To create a class that should not be instantiated, the _register class
282 attribute may be set to False.
284 __metaclass__ = MetaModel
285 _auto = True # create database backend
286 _register = False # Set to false if the model shouldn't be automatically discovered.
293 _parent_name = 'parent_id'
294 _parent_store = False
295 _parent_order = False
301 _translate = True # set to False to disable translations export for this model
303 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
304 # to include in the _read_group, if grouped on this field
308 _transient = False # True in a TransientModel
311 # { 'parent_model': 'm2o_field', ... }
314 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
315 # model from which it is inherits'd, r is the (local) field towards m, f
316 # is the _column object itself, and n is the original (i.e. top-most)
319 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
320 # field_column_obj, origina_parent_model), ... }
323 # Mapping field name/column_info object
324 # This is similar to _inherit_fields but:
325 # 1. includes self fields,
326 # 2. uses column_info instead of a triple.
331 _sql_constraints = []
333 # model dependencies, for models backed up by sql views:
334 # {model_name: field_names, ...}
337 CONCURRENCY_CHECK_FIELD = '__last_update'
339 def log(self, cr, uid, id, message, secondary=False, context=None):
340 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
342 def view_init(self, cr, uid, fields_list, context=None):
343 """Override this method to do specific things when a view on the object is opened."""
346 def _field_create(self, cr, context=None):
347 """ Create entries in ir_model_fields for all the model's fields.
349 If necessary, also create an entry in ir_model, and if called from the
350 modules loading scheme (by receiving 'module' in the context), also
351 create entries in ir_model_data (for the model and the fields).
353 - create an entry in ir_model (if there is not already one),
354 - create an entry in ir_model_data (if there is not already one, and if
355 'module' is in the context),
356 - update ir_model_fields with the fields found in _columns
357 (TODO there is some redundancy as _columns is updated from
358 ir_model_fields in __init__).
363 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
365 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
366 model_id = cr.fetchone()[0]
367 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
369 model_id = cr.fetchone()[0]
370 if 'module' in context:
371 name_id = 'model_'+self._name.replace('.', '_')
372 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
374 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
375 (name_id, context['module'], 'ir.model', model_id)
378 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
380 for rec in cr.dictfetchall():
381 cols[rec['name']] = rec
383 ir_model_fields_obj = self.pool.get('ir.model.fields')
385 # sparse field should be created at the end, as it depends on its serialized field already existing
386 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
387 for (k, f) in model_fields:
389 'model_id': model_id,
392 'field_description': f.string,
394 'relation': f._obj or '',
395 'select_level': tools.ustr(int(f.select)),
396 'readonly': (f.readonly and 1) or 0,
397 'required': (f.required and 1) or 0,
398 'selectable': (f.selectable and 1) or 0,
399 'translate': (f.translate and 1) or 0,
400 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
401 'serialization_field_id': None,
403 if getattr(f, 'serialization_field', None):
404 # resolve link to serialization_field if specified by name
405 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
406 if not serialization_field_id:
407 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
408 vals['serialization_field_id'] = serialization_field_id[0]
410 # When its a custom field,it does not contain f.select
411 if context.get('field_state', 'base') == 'manual':
412 if context.get('field_name', '') == k:
413 vals['select_level'] = context.get('select', '0')
414 #setting value to let the problem NOT occur next time
416 vals['select_level'] = cols[k]['select_level']
419 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
420 id = cr.fetchone()[0]
422 cr.execute("""INSERT INTO ir_model_fields (
423 id, model_id, model, name, field_description, ttype,
424 relation,state,select_level,relation_field, translate, serialization_field_id
426 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
428 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
429 vals['relation'], 'base',
430 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
432 if 'module' in context:
433 name1 = 'field_' + self._table + '_' + k
434 cr.execute("select name from ir_model_data where name=%s", (name1,))
436 name1 = name1 + "_" + str(id)
437 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
438 (name1, context['module'], 'ir.model.fields', id)
441 for key, val in vals.items():
442 if cols[k][key] != vals[key]:
443 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
444 cr.execute("""UPDATE ir_model_fields SET
445 model_id=%s, field_description=%s, ttype=%s, relation=%s,
446 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
448 model=%s AND name=%s""", (
449 vals['model_id'], vals['field_description'], vals['ttype'],
451 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
454 self.invalidate_cache(cr, SUPERUSER_ID)
457 def _add_field(cls, name, field):
458 """ Add the given `field` under the given `name` in the class """
459 field.set_class_name(cls, name)
461 # add field in _fields (for reflection)
462 cls._fields[name] = field
464 # add field as an attribute, unless another kind of value already exists
465 if isinstance(getattr(cls, name, field), Field):
466 setattr(cls, name, field)
468 _logger.warning("In model %r, member %r is not a field", cls._name, name)
471 cls._columns[name] = field.to_column()
473 # remove potential column that may be overridden by field
474 cls._columns.pop(name, None)
477 def _add_magic_fields(cls):
478 """ Introduce magic fields on the current class
480 * id is a "normal" field (with a specific getter)
481 * create_uid, create_date, write_uid and write_date have become
483 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
484 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
485 to get the same structure as the previous
486 ``(now() at time zone 'UTC')::timestamp``::
488 # select (now() at time zone 'UTC')::timestamp;
490 ----------------------------
491 2013-06-18 08:30:37.292809
493 >>> str(datetime.datetime.utcnow())
494 '2013-06-18 08:31:32.821177'
496 def add(name, field):
497 """ add `field` with the given `name` if it does not exist yet """
498 if name not in cls._columns and name not in cls._fields:
499 cls._add_field(name, field)
504 # this field 'id' must override any other column or field
505 cls._add_field('id', fields.Id(automatic=True))
507 add('display_name', fields.Char(string='Display Name', automatic=True,
508 compute='_compute_display_name'))
511 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
512 add('create_date', fields.Datetime(string='Created on', automatic=True))
513 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
514 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
515 last_modified_name = 'compute_concurrency_field_with_access'
517 last_modified_name = 'compute_concurrency_field'
519 # this field must override any other column or field
520 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
521 string='Last Modified on', compute=last_modified_name, automatic=True))
524 def compute_concurrency_field(self):
525 self[self.CONCURRENCY_CHECK_FIELD] = \
526 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
529 @api.depends('create_date', 'write_date')
530 def compute_concurrency_field_with_access(self):
531 self[self.CONCURRENCY_CHECK_FIELD] = \
532 self.write_date or self.create_date or \
533 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
536 # Goal: try to apply inheritance at the instanciation level and
537 # put objects in the pool var
540 def _build_model(cls, pool, cr):
541 """ Instanciate a given model.
543 This class method instanciates the class of some model (i.e. a class
544 deriving from osv or osv_memory). The class might be the class passed
545 in argument or, if it inherits from another class, a class constructed
546 by combining the two classes.
550 # IMPORTANT: the registry contains an instance for each model. The class
551 # of each model carries inferred metadata that is shared among the
552 # model's instances for this registry, but not among registries. Hence
553 # we cannot use that "registry class" for combining model classes by
554 # inheritance, since it confuses the metadata inference process.
556 # Keep links to non-inherited constraints in cls; this is useful for
557 # instance when exporting translations
558 cls._local_constraints = cls.__dict__.get('_constraints', [])
559 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
561 # determine inherited models
562 parents = getattr(cls, '_inherit', [])
563 parents = [parents] if isinstance(parents, basestring) else (parents or [])
565 # determine the model's name
566 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
568 # determine the module that introduced the model
569 original_module = pool[name]._original_module if name in parents else cls._module
571 # build the class hierarchy for the model
572 for parent in parents:
573 if parent not in pool:
574 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
575 'You may need to add a dependency on the parent class\' module.' % (name, parent))
576 parent_model = pool[parent]
578 # do no use the class of parent_model, since that class contains
579 # inferred metadata; use its ancestor instead
580 parent_class = type(parent_model).__base__
582 # don't inherit custom fields
583 columns = dict((key, val)
584 for key, val in parent_class._columns.iteritems()
587 columns.update(cls._columns)
589 defaults = dict(parent_class._defaults)
590 defaults.update(cls._defaults)
592 inherits = dict(parent_class._inherits)
593 inherits.update(cls._inherits)
595 depends = dict(parent_class._depends)
596 for m, fs in cls._depends.iteritems():
597 depends[m] = depends.get(m, []) + fs
599 old_constraints = parent_class._constraints
600 new_constraints = cls._constraints
601 # filter out from old_constraints the ones overridden by a
602 # constraint with the same function name in new_constraints
603 constraints = new_constraints + [oldc
604 for oldc in old_constraints
605 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
606 for newc in new_constraints)
609 sql_constraints = cls._sql_constraints + \
610 parent_class._sql_constraints
616 '_defaults': defaults,
617 '_inherits': inherits,
619 '_constraints': constraints,
620 '_sql_constraints': sql_constraints,
622 cls = type(name, (cls, parent_class), attrs)
624 # introduce the "registry class" of the model;
625 # duplicate some attributes so that the ORM can modify them
629 '_columns': dict(cls._columns),
630 '_defaults': dict(cls._defaults),
631 '_inherits': dict(cls._inherits),
632 '_depends': dict(cls._depends),
633 '_constraints': list(cls._constraints),
634 '_sql_constraints': list(cls._sql_constraints),
635 '_original_module': original_module,
637 cls = type(cls._name, (cls,), attrs)
639 # float fields are registry-dependent (digit attribute); duplicate them
641 for key, col in cls._columns.items():
642 if col._type == 'float':
643 cls._columns[key] = copy.copy(col)
645 # instantiate the model, and initialize it
646 model = object.__new__(cls)
647 model.__init__(pool, cr)
651 def _init_function_fields(cls, pool, cr):
652 # initialize the list of non-stored function fields for this model
653 pool._pure_function_fields[cls._name] = []
655 # process store of low-level function fields
656 for fname, column in cls._columns.iteritems():
657 if hasattr(column, 'digits_change'):
658 column.digits_change(cr)
659 # filter out existing store about this field
660 pool._store_function[cls._name] = [
662 for stored in pool._store_function.get(cls._name, [])
663 if (stored[0], stored[1]) != (cls._name, fname)
665 if not isinstance(column, fields.function):
668 # register it on the pool for invalidation
669 pool._pure_function_fields[cls._name].append(fname)
671 # process store parameter
674 get_ids = lambda self, cr, uid, ids, c={}: ids
675 store = {cls._name: (get_ids, None, column.priority, None)}
676 for model, spec in store.iteritems():
678 (fnct, fields2, order, length) = spec
680 (fnct, fields2, order) = spec
683 raise except_orm('Error',
684 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
685 pool._store_function.setdefault(model, [])
686 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
687 if t not in pool._store_function[model]:
688 pool._store_function[model].append(t)
689 pool._store_function[model].sort(key=lambda x: x[4])
692 def _init_manual_fields(cls, pool, cr):
693 # Check whether the query is already done
694 if pool.fields_by_model is not None:
695 manual_fields = pool.fields_by_model.get(cls._name, [])
697 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
698 manual_fields = cr.dictfetchall()
700 for field in manual_fields:
701 if field['name'] in cls._columns:
704 'string': field['field_description'],
705 'required': bool(field['required']),
706 'readonly': bool(field['readonly']),
707 'domain': eval(field['domain']) if field['domain'] else None,
708 'size': field['size'] or None,
709 'ondelete': field['on_delete'],
710 'translate': (field['translate']),
713 #'select': int(field['select_level'])
715 if field['serialization_field_id']:
716 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
717 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
718 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
719 attrs.update({'relation': field['relation']})
720 cls._columns[field['name']] = fields.sparse(**attrs)
721 elif field['ttype'] == 'selection':
722 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
723 elif field['ttype'] == 'reference':
724 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
725 elif field['ttype'] == 'many2one':
726 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
727 elif field['ttype'] == 'one2many':
728 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
729 elif field['ttype'] == 'many2many':
730 _rel1 = field['relation'].replace('.', '_')
731 _rel2 = field['model'].replace('.', '_')
732 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
733 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
735 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
738 def _init_constraints_onchanges(cls):
739 # store sql constraint error messages
740 for (key, _, msg) in cls._sql_constraints:
741 cls.pool._sql_error[cls._table + '_' + key] = msg
743 # collect constraint and onchange methods
744 cls._constraint_methods = []
745 cls._onchange_methods = defaultdict(list)
746 for attr, func in getmembers(cls, callable):
747 if hasattr(func, '_constrains'):
748 if not all(name in cls._fields for name in func._constrains):
749 _logger.warning("@constrains%r parameters must be field names", func._constrains)
750 cls._constraint_methods.append(func)
751 if hasattr(func, '_onchange'):
752 if not all(name in cls._fields for name in func._onchange):
753 _logger.warning("@onchange%r parameters must be field names", func._onchange)
754 for name in func._onchange:
755 cls._onchange_methods[name].append(func)
758 # In the past, this method was registering the model class in the server.
759 # This job is now done entirely by the metaclass MetaModel.
761 # Do not create an instance here. Model instances are created by method
765 def __init__(self, pool, cr):
766 """ Initialize a model and make it part of the given registry.
768 - copy the stored fields' functions in the registry,
769 - retrieve custom fields and add them in the model,
770 - ensure there is a many2one for each _inherits'd parent,
771 - update the children's _columns,
772 - give a chance to each field to initialize itself.
777 # link the class to the registry, and update the registry
779 cls._model = self # backward compatibility
780 pool.add(cls._name, self)
782 # determine description, table, sequence and log_access
783 if not cls._description:
784 cls._description = cls._name
786 cls._table = cls._name.replace('.', '_')
787 if not cls._sequence:
788 cls._sequence = cls._table + '_id_seq'
789 if not hasattr(cls, '_log_access'):
790 # If _log_access is not specified, it is the same value as _auto.
791 cls._log_access = cls._auto
794 if cls.is_transient():
795 cls._transient_check_count = 0
796 cls._transient_max_count = config.get('osv_memory_count_limit')
797 cls._transient_max_hours = config.get('osv_memory_age_limit')
798 assert cls._log_access, \
799 "TransientModels must have log_access turned on, " \
800 "in order to implement their access rights policy"
802 # retrieve new-style fields and duplicate them (to avoid clashes with
803 # inheritance between different models)
805 for attr, field in getmembers(cls, Field.__instancecheck__):
806 if not field._origin:
807 cls._add_field(attr, field.copy())
809 # introduce magic fields
810 cls._add_magic_fields()
812 # register stuff about low-level function fields and custom fields
813 cls._init_function_fields(pool, cr)
814 cls._init_manual_fields(pool, cr)
817 cls._inherits_check()
818 cls._inherits_reload()
820 # register constraints and onchange methods
821 cls._init_constraints_onchanges()
824 for k in cls._defaults:
825 assert k in cls._fields, \
826 "Model %s has a default for non-existing field %s" % (cls._name, k)
829 for column in cls._columns.itervalues():
834 assert cls._rec_name in cls._fields, \
835 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
836 elif 'name' in cls._fields:
837 cls._rec_name = 'name'
839 # prepare ormcache, which must be shared by all instances of the model
844 def _is_an_ordinary_table(self):
845 self.env.cr.execute("""\
849 AND relkind = %s""", [self._table, 'r'])
850 return bool(self.env.cr.fetchone())
852 def __export_xml_id(self):
853 """ Return a valid xml_id for the record `self`. """
854 if not self._is_an_ordinary_table():
856 "You can not export the column ID of model %s, because the "
857 "table %s is not an ordinary table."
858 % (self._name, self._table))
859 ir_model_data = self.sudo().env['ir.model.data']
860 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
863 return '%s.%s' % (data[0].module, data[0].name)
868 name = '%s_%s' % (self._table, self.id)
869 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
871 name = '%s_%s_%s' % (self._table, self.id, postfix)
872 ir_model_data.create({
875 'module': '__export__',
878 return '__export__.' + name
881 def __export_rows(self, fields):
882 """ Export fields of the records in `self`.
884 :param fields: list of lists of fields to traverse
885 :return: list of lists of corresponding values
889 # main line of record, initially empty
890 current = [''] * len(fields)
891 lines.append(current)
893 # list of primary fields followed by secondary field(s)
896 # process column by column
897 for i, path in enumerate(fields):
902 if name in primary_done:
906 current[i] = str(record.id)
908 current[i] = record.__export_xml_id()
910 field = record._fields[name]
913 # this part could be simpler, but it has to be done this way
914 # in order to reproduce the former behavior
915 if not isinstance(value, BaseModel):
916 current[i] = field.convert_to_export(value, self.env)
918 primary_done.append(name)
920 # This is a special case, its strange behavior is intended!
921 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
922 xml_ids = [r.__export_xml_id() for r in value]
923 current[i] = ','.join(xml_ids) or False
926 # recursively export the fields that follow name
927 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
928 lines2 = value.__export_rows(fields2)
930 # merge first line with record's main line
931 for j, val in enumerate(lines2[0]):
934 # check value of current field
936 # assign xml_ids, and forget about remaining lines
937 xml_ids = [item[1] for item in value.name_get()]
938 current[i] = ','.join(xml_ids)
940 # append the other lines at the end
948 def export_data(self, fields_to_export, raw_data=False):
949 """ Export fields for selected objects
951 :param fields_to_export: list of fields
952 :param raw_data: True to return value in native Python type
953 :rtype: dictionary with a *datas* matrix
955 This method is used when exporting data via client menu
957 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
959 self = self.with_context(export_raw_data=True)
960 return {'datas': self.__export_rows(fields_to_export)}
962 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
965 Use :meth:`~load` instead
967 Import given data in given module
969 This method is used when importing data via client menu.
971 Example of fields to import for a sale.order::
974 partner_id, (=name_search)
975 order_line/.id, (=database_id)
977 order_line/product_id/id, (=xml id)
978 order_line/price_unit,
979 order_line/product_uom_qty,
980 order_line/product_uom/id (=xml_id)
982 This method returns a 4-tuple with the following structure::
984 (return_code, errored_resource, error_message, unused)
986 * The first item is a return code, it is ``-1`` in case of
987 import error, or the last imported row number in case of success
988 * The second item contains the record data dict that failed to import
989 in case of error, otherwise it's 0
990 * The third item contains an error message string in case of error,
992 * The last item is currently unused, with no specific semantics
994 :param fields: list of fields to import
995 :param datas: data to import
996 :param mode: 'init' or 'update' for record creation
997 :param current_module: module name
998 :param noupdate: flag for record creation
999 :param filename: optional file to store partial import state for recovery
1000 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1001 :rtype: (int, dict or 0, str or 0, str or 0)
1003 context = dict(context) if context is not None else {}
1004 context['_import_current_module'] = current_module
1006 fields = map(fix_import_export_id_paths, fields)
1007 ir_model_data_obj = self.pool.get('ir.model.data')
1010 if m['type'] == 'error':
1011 raise Exception(m['message'])
1013 if config.get('import_partial') and filename:
1014 with open(config.get('import_partial'), 'rb') as partial_import_file:
1015 data = pickle.load(partial_import_file)
1016 position = data.get(filename, 0)
1020 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1021 self._extract_records(cr, uid, fields, datas,
1022 context=context, log=log),
1023 context=context, log=log):
1024 ir_model_data_obj._update(cr, uid, self._name,
1025 current_module, res, mode=mode, xml_id=xml_id,
1026 noupdate=noupdate, res_id=res_id, context=context)
1027 position = info.get('rows', {}).get('to', 0) + 1
1028 if config.get('import_partial') and filename and (not (position%100)):
1029 with open(config.get('import_partial'), 'rb') as partial_import:
1030 data = pickle.load(partial_import)
1031 data[filename] = position
1032 with open(config.get('import_partial'), 'wb') as partial_import:
1033 pickle.dump(data, partial_import)
1034 if context.get('defer_parent_store_computation'):
1035 self._parent_store_compute(cr)
1037 except Exception, e:
1039 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1041 if context.get('defer_parent_store_computation'):
1042 self._parent_store_compute(cr)
1043 return position, 0, 0, 0
1045 def load(self, cr, uid, fields, data, context=None):
1047 Attempts to load the data matrix, and returns a list of ids (or
1048 ``False`` if there was an error and no id could be generated) and a
1051 The ids are those of the records created and saved (in database), in
1052 the same order they were extracted from the file. They can be passed
1053 directly to :meth:`~read`
1055 :param fields: list of fields to import, at the same index as the corresponding data
1056 :type fields: list(str)
1057 :param data: row-major matrix of data to import
1058 :type data: list(list(str))
1059 :param dict context:
1060 :returns: {ids: list(int)|False, messages: [Message]}
1062 cr.execute('SAVEPOINT model_load')
1065 fields = map(fix_import_export_id_paths, fields)
1066 ModelData = self.pool['ir.model.data'].clear_caches()
1068 fg = self.fields_get(cr, uid, context=context)
1075 for id, xid, record, info in self._convert_records(cr, uid,
1076 self._extract_records(cr, uid, fields, data,
1077 context=context, log=messages.append),
1078 context=context, log=messages.append):
1080 cr.execute('SAVEPOINT model_load_save')
1081 except psycopg2.InternalError, e:
1082 # broken transaction, exit and hope the source error was
1084 if not any(message['type'] == 'error' for message in messages):
1085 messages.append(dict(info, type='error',message=
1086 u"Unknown database error: '%s'" % e))
1089 ids.append(ModelData._update(cr, uid, self._name,
1090 current_module, record, mode=mode, xml_id=xid,
1091 noupdate=noupdate, res_id=id, context=context))
1092 cr.execute('RELEASE SAVEPOINT model_load_save')
1093 except psycopg2.Warning, e:
1094 messages.append(dict(info, type='warning', message=str(e)))
1095 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1096 except psycopg2.Error, e:
1097 messages.append(dict(
1099 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1100 # Failed to write, log to messages, rollback savepoint (to
1101 # avoid broken transaction) and keep going
1102 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1103 except Exception, e:
1104 message = (_('Unknown error during import:') +
1105 ' %s: %s' % (type(e), unicode(e)))
1106 moreinfo = _('Resolve other errors first')
1107 messages.append(dict(info, type='error',
1110 # Failed for some reason, perhaps due to invalid data supplied,
1111 # rollback savepoint and keep going
1112 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1113 if any(message['type'] == 'error' for message in messages):
1114 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1116 return {'ids': ids, 'messages': messages}
1118 def _extract_records(self, cr, uid, fields_, data,
1119 context=None, log=lambda a: None):
1120 """ Generates record dicts from the data sequence.
1122 The result is a generator of dicts mapping field names to raw
1123 (unconverted, unvalidated) values.
1125 For relational fields, if sub-fields were provided the value will be
1126 a list of sub-records
1128 The following sub-fields may be set on the record (by key):
1129 * None is the name_get for the record (to use with name_create/name_search)
1130 * "id" is the External ID for the record
1131 * ".id" is the Database ID for the record
1133 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1134 # Fake columns to avoid special cases in extractor
1135 columns[None] = fields.char('rec_name')
1136 columns['id'] = fields.char('External ID')
1137 columns['.id'] = fields.integer('Database ID')
1139 # m2o fields can't be on multiple lines so exclude them from the
1140 # is_relational field rows filter, but special-case it later on to
1141 # be handled with relational fields (as it can have subfields)
1142 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1143 get_o2m_values = itemgetter_tuple(
1144 [index for index, field in enumerate(fields_)
1145 if columns[field[0]]._type == 'one2many'])
1146 get_nono2m_values = itemgetter_tuple(
1147 [index for index, field in enumerate(fields_)
1148 if columns[field[0]]._type != 'one2many'])
1149 # Checks if the provided row has any non-empty non-relational field
1150 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1151 return any(g(row)) and not any(f(row))
1155 if index >= len(data): return
1158 # copy non-relational fields to record dict
1159 record = dict((field[0], value)
1160 for field, value in itertools.izip(fields_, row)
1161 if not is_relational(field[0]))
1163 # Get all following rows which have relational values attached to
1164 # the current record (no non-relational values)
1165 record_span = itertools.takewhile(
1166 only_o2m_values, itertools.islice(data, index + 1, None))
1167 # stitch record row back on for relational fields
1168 record_span = list(itertools.chain([row], record_span))
1169 for relfield in set(
1170 field[0] for field in fields_
1171 if is_relational(field[0])):
1172 column = columns[relfield]
1173 # FIXME: how to not use _obj without relying on fields_get?
1174 Model = self.pool[column._obj]
1176 # get only cells for this sub-field, should be strictly
1177 # non-empty, field path [None] is for name_get column
1178 indices, subfields = zip(*((index, field[1:] or [None])
1179 for index, field in enumerate(fields_)
1180 if field[0] == relfield))
1182 # return all rows which have at least one value for the
1183 # subfields of relfield
1184 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1185 record[relfield] = [subrecord
1186 for subrecord, _subinfo in Model._extract_records(
1187 cr, uid, subfields, relfield_data,
1188 context=context, log=log)]
1190 yield record, {'rows': {
1192 'to': index + len(record_span) - 1
1194 index += len(record_span)
1196 def _convert_records(self, cr, uid, records,
1197 context=None, log=lambda a: None):
1198 """ Converts records from the source iterable (recursive dicts of
1199 strings) into forms which can be written to the database (via
1200 self.create or (ir.model.data)._update)
1202 :returns: a list of triplets of (id, xid, record)
1203 :rtype: list((int|None, str|None, dict))
1205 if context is None: context = {}
1206 Converter = self.pool['ir.fields.converter']
1207 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1208 Translation = self.pool['ir.translation']
1210 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1211 context.get('lang'))
1213 for f, column in columns.iteritems())
1215 convert = Converter.for_model(cr, uid, self, context=context)
1217 def _log(base, field, exception):
1218 type = 'warning' if isinstance(exception, Warning) else 'error'
1219 # logs the logical (not human-readable) field name for automated
1220 # processing of response, but injects human readable in message
1221 record = dict(base, type=type, field=field,
1222 message=unicode(exception.args[0]) % base)
1223 if len(exception.args) > 1 and exception.args[1]:
1224 record.update(exception.args[1])
1227 stream = CountingStream(records)
1228 for record, extras in stream:
1231 # name_get/name_create
1232 if None in record: pass
1239 dbid = int(record['.id'])
1241 # in case of overridden id column
1242 dbid = record['.id']
1243 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1246 record=stream.index,
1248 message=_(u"Unknown database identifier '%s'") % dbid))
1251 converted = convert(record, lambda field, err:\
1252 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1254 yield dbid, xid, converted, dict(extras, record=stream.index)
1257 def _validate_fields(self, field_names):
1258 field_names = set(field_names)
1260 # old-style constraint methods
1261 trans = self.env['ir.translation']
1262 cr, uid, context = self.env.args
1265 for fun, msg, names in self._constraints:
1267 # validation must be context-independent; call `fun` without context
1268 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1270 except Exception, e:
1271 _logger.debug('Exception while validating constraint', exc_info=True)
1273 extra_error = tools.ustr(e)
1276 res_msg = msg(self._model, cr, uid, ids, context=context)
1277 if isinstance(res_msg, tuple):
1278 template, params = res_msg
1279 res_msg = template % params
1281 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1283 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1285 _("Field(s) `%s` failed against a constraint: %s") %
1286 (', '.join(names), res_msg)
1289 raise ValidationError('\n'.join(errors))
1291 # new-style constraint methods
1292 for check in self._constraint_methods:
1293 if set(check._constrains) & field_names:
1296 except ValidationError, e:
1298 except Exception, e:
1299 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1301 def default_get(self, cr, uid, fields_list, context=None):
1302 """ default_get(fields) -> default_values
1304 Return default values for the fields in `fields_list`. Default
1305 values are determined by the context, user defaults, and the model
1308 :param fields_list: a list of field names
1309 :return: a dictionary mapping each field name to its corresponding
1310 default value; the keys of the dictionary are the fields in
1311 `fields_list` that have a default value different from ``False``.
1313 This method should not be overridden. In order to change the
1314 mechanism for determining default values, you should override method
1315 :meth:`add_default_value` instead.
1317 # trigger view init hook
1318 self.view_init(cr, uid, fields_list, context)
1320 # use a new record to determine default values; evaluate fields on the
1321 # new record and put default values in result
1322 record = self.new(cr, uid, {}, context=context)
1324 for name in fields_list:
1325 if name in self._fields:
1326 value = record[name]
1327 if name in record._cache:
1328 result[name] = value # it really is a default value
1330 # convert default values to the expected format
1331 result = self._convert_to_write(result)
1334 def add_default_value(self, field):
1335 """ Set the default value of `field` to the new record `self`.
1336 The value must be assigned to `self`.
1338 assert not self.id, "Expected new record: %s" % self
1339 cr, uid, context = self.env.args
1342 # 1. look up context
1343 key = 'default_' + name
1345 self[name] = context[key]
1348 # 2. look up ir_values
1349 # Note: performance is good, because get_defaults_dict is cached!
1350 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1351 if name in ir_values_dict:
1352 self[name] = ir_values_dict[name]
1355 # 3. look up property fields
1356 # TODO: get rid of this one
1357 column = self._columns.get(name)
1358 if isinstance(column, fields.property):
1359 self[name] = self.env['ir.property'].get(name, self._name)
1362 # 4. look up _defaults
1363 if name in self._defaults:
1364 value = self._defaults[name]
1366 value = value(self._model, cr, uid, context)
1370 # 5. delegate to field
1371 field.determine_default(self)
1373 def fields_get_keys(self, cr, user, context=None):
1374 res = self._columns.keys()
1375 # TODO I believe this loop can be replace by
1376 # res.extend(self._inherit_fields.key())
1377 for parent in self._inherits:
1378 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1381 def _rec_name_fallback(self, cr, uid, context=None):
1382 rec_name = self._rec_name
1383 if rec_name not in self._columns:
1384 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1388 # Overload this method if you need a window title which depends on the context
1390 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1393 def user_has_groups(self, cr, uid, groups, context=None):
1394 """Return true if the user is at least member of one of the groups
1395 in groups_str. Typically used to resolve `groups` attribute
1396 in view and model definitions.
1398 :param str groups: comma-separated list of fully-qualified group
1399 external IDs, e.g.: ``base.group_user,base.group_system``
1400 :return: True if the current user is a member of one of the
1403 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1404 for group_ext_id in groups.split(','))
1406 def _get_default_form_view(self, cr, user, context=None):
1407 """ Generates a default single-line form view using all fields
1408 of the current model except the m2m and o2m ones.
1410 :param cr: database cursor
1411 :param int user: user id
1412 :param dict context: connection context
1413 :returns: a form view as an lxml document
1414 :rtype: etree._Element
1416 view = etree.Element('form', string=self._description)
1417 group = etree.SubElement(view, 'group', col="4")
1418 for fname, field in self._fields.iteritems():
1419 if field.automatic or field.type in ('one2many', 'many2many'):
1422 etree.SubElement(group, 'field', name=fname)
1423 if field.type == 'text':
1424 etree.SubElement(group, 'newline')
1427 def _get_default_search_view(self, cr, user, context=None):
1428 """ Generates a single-field search view, based on _rec_name.
1430 :param cr: database cursor
1431 :param int user: user id
1432 :param dict context: connection context
1433 :returns: a tree view as an lxml document
1434 :rtype: etree._Element
1436 view = etree.Element('search', string=self._description)
1437 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1440 def _get_default_tree_view(self, cr, user, context=None):
1441 """ Generates a single-field tree view, based on _rec_name.
1443 :param cr: database cursor
1444 :param int user: user id
1445 :param dict context: connection context
1446 :returns: a tree view as an lxml document
1447 :rtype: etree._Element
1449 view = etree.Element('tree', string=self._description)
1450 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1453 def _get_default_calendar_view(self, cr, user, context=None):
1454 """ Generates a default calendar view by trying to infer
1455 calendar fields from a number of pre-set attribute names
1457 :param cr: database cursor
1458 :param int user: user id
1459 :param dict context: connection context
1460 :returns: a calendar view
1461 :rtype: etree._Element
1463 def set_first_of(seq, in_, to):
1464 """Sets the first value of `seq` also found in `in_` to
1465 the `to` attribute of the view being closed over.
1467 Returns whether it's found a suitable value (and set it on
1468 the attribute) or not
1476 view = etree.Element('calendar', string=self._description)
1477 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1479 if self._date_name not in self._columns:
1481 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1482 if dt in self._columns:
1483 self._date_name = dt
1488 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1489 view.set('date_start', self._date_name)
1491 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1492 self._columns, 'color')
1494 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1495 self._columns, 'date_stop'):
1496 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1497 self._columns, 'date_delay'):
1499 _('Invalid Object Architecture!'),
1500 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1504 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1505 """ fields_view_get([view_id | view_type='form'])
1507 Get the detailed composition of the requested view like fields, model, view architecture
1509 :param view_id: id of the view or None
1510 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1511 :param toolbar: true to include contextual actions
1512 :param submenu: deprecated
1513 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1514 :raise AttributeError:
1515 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1516 * if some tag other than 'position' is found in parent view
1517 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1521 View = self.pool['ir.ui.view']
1524 'model': self._name,
1525 'field_parent': False,
1528 # try to find a view_id if none provided
1530 # <view_type>_view_ref in context can be used to overrride the default view
1531 view_ref_key = view_type + '_view_ref'
1532 view_ref = context.get(view_ref_key)
1535 module, view_ref = view_ref.split('.', 1)
1536 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1537 view_ref_res = cr.fetchone()
1539 view_id = view_ref_res[0]
1541 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1542 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1546 # otherwise try to find the lowest priority matching ir.ui.view
1547 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1549 # context for post-processing might be overriden
1552 # read the view with inherited views applied
1553 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1554 result['arch'] = root_view['arch']
1555 result['name'] = root_view['name']
1556 result['type'] = root_view['type']
1557 result['view_id'] = root_view['id']
1558 result['field_parent'] = root_view['field_parent']
1559 # override context fro postprocessing
1560 if root_view.get('model') != self._name:
1561 ctx = dict(context, base_model_name=root_view.get('model'))
1563 # fallback on default views methods if no ir.ui.view could be found
1565 get_func = getattr(self, '_get_default_%s_view' % view_type)
1566 arch_etree = get_func(cr, uid, context)
1567 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1568 result['type'] = view_type
1569 result['name'] = 'default'
1570 except AttributeError:
1571 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1573 # Apply post processing, groups and modifiers etc...
1574 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1575 result['arch'] = xarch
1576 result['fields'] = xfields
1578 # Add related action information if aksed
1580 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1586 ir_values_obj = self.pool.get('ir.values')
1587 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1588 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1589 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1590 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1591 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1592 #When multi="True" set it will display only in More of the list view
1593 resrelate = [clean(action) for action in resrelate
1594 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1596 for x in itertools.chain(resprint, resaction, resrelate):
1597 x['string'] = x['name']
1599 result['toolbar'] = {
1601 'action': resaction,
1606 def get_formview_id(self, cr, uid, id, context=None):
1607 """ Return an view id to open the document with. This method is meant to be
1608 overridden in addons that want to give specific view ids for example.
1610 :param int id: id of the document to open
1614 def get_formview_action(self, cr, uid, id, context=None):
1615 """ Return an action to open the document. This method is meant to be
1616 overridden in addons that want to give specific view ids for example.
1618 :param int id: id of the document to open
1620 view_id = self.get_formview_id(cr, uid, id, context=context)
1622 'type': 'ir.actions.act_window',
1623 'res_model': self._name,
1624 'view_type': 'form',
1625 'view_mode': 'form',
1626 'views': [(view_id, 'form')],
1627 'target': 'current',
1631 def get_access_action(self, cr, uid, id, context=None):
1632 """ Return an action to open the document. This method is meant to be
1633 overridden in addons that want to give specific access to the document.
1634 By default it opens the formview of the document.
1636 :paramt int id: id of the document to open
1638 return self.get_formview_action(cr, uid, id, context=context)
1640 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1641 return self.pool['ir.ui.view'].postprocess_and_fields(
1642 cr, uid, self._name, node, view_id, context=context)
1644 def search_count(self, cr, user, args, context=None):
1645 """ search_count(args) -> int
1647 Returns the number of records in the current model matching :ref:`the
1648 provided domain <reference/orm/domains>`.
1650 res = self.search(cr, user, args, context=context, count=True)
1651 if isinstance(res, list):
1655 @api.returns('self')
1656 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1657 """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1659 Searches for records based on the ``args``
1660 :ref:`search domain <reference/orm/domains>`.
1662 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1663 list to match all records.
1664 :param int offset: number of results to ignore (default: none)
1665 :param int limit: maximum number of records to return (default: all)
1666 :param str order: sort string
1667 :param bool count: if ``True``, the call should return the number of
1668 records matching ``args`` rather than the records
1670 :returns: at most ``limit`` records matching the search criteria
1672 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1674 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1677 # display_name, name_get, name_create, name_search
1680 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1681 def _compute_display_name(self):
1682 for i, got_name in enumerate(self.name_get()):
1683 self[i].display_name = got_name[1]
1687 """ name_get() -> [(id, name), ...]
1689 Returns a textual representation for the records in ``self``.
1690 By default this is the value of the ``display_name`` field.
1692 :return: list of pairs ``(id, text_repr)`` for each records
1696 name = self._rec_name
1697 if name in self._fields:
1698 convert = self._fields[name].convert_to_display_name
1700 result.append((record.id, convert(record[name])))
1703 result.append((record.id, "%s,%s" % (record._name, record.id)))
1708 def name_create(self, name):
1709 """ name_create(name) -> record
1711 Create a new record by calling :meth:`~.create` with only one value
1712 provided: the display name of the new record.
1714 The new record will be initialized with any default values
1715 applicable to this model, or provided through the context. The usual
1716 behavior of :meth:`~.create` applies.
1718 :param name: display name of the record to create
1720 :return: the :meth:`~.name_get` pair value of the created record
1723 record = self.create({self._rec_name: name})
1724 return record.name_get()[0]
1726 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1730 def name_search(self, name='', args=None, operator='ilike', limit=100):
1731 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1733 Search for records that have a display name matching the given
1734 `name` pattern when compared with the given `operator`, while also
1735 matching the optional search domain (`args`).
1737 This is used for example to provide suggestions based on a partial
1738 value for a relational field. Sometimes be seen as the inverse
1739 function of :meth:`~.name_get`, but it is not guaranteed to be.
1741 This method is equivalent to calling :meth:`~.search` with a search
1742 domain based on ``display_name`` and then :meth:`~.name_get` on the
1743 result of the search.
1745 :param str name: the name pattern to match
1746 :param list args: optional search domain (see :meth:`~.search` for
1747 syntax), specifying further restrictions
1748 :param str operator: domain operator for matching `name`, such as
1749 ``'like'`` or ``'='``.
1750 :param int limit: optional max number of records to return
1752 :return: list of pairs ``(id, text_repr)`` for all matching records.
1754 return self._name_search(name, args, operator, limit=limit)
1756 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1757 # private implementation of name_search, allows passing a dedicated user
1758 # for the name_get part to solve some access rights issues
1759 args = list(args or [])
1760 # optimize out the default criterion of ``ilike ''`` that matches everything
1761 if not self._rec_name:
1762 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1763 elif not (name == '' and operator == 'ilike'):
1764 args += [(self._rec_name, operator, name)]
1765 access_rights_uid = name_get_uid or user
1766 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1767 res = self.name_get(cr, access_rights_uid, ids, context)
1770 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1773 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1775 fields = self._columns.keys() + self._inherit_fields.keys()
1776 #FIXME: collect all calls to _get_source into one SQL call.
1778 res[lang] = {'code': lang}
1780 if f in self._columns:
1781 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1783 res[lang][f] = res_trans
1785 res[lang][f] = self._columns[f].string
1786 for table in self._inherits:
1787 cols = intersect(self._inherit_fields.keys(), fields)
1788 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1791 res[lang]['code'] = lang
1792 for f in res2[lang]:
1793 res[lang][f] = res2[lang][f]
1796 def write_string(self, cr, uid, id, langs, vals, context=None):
1797 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1798 #FIXME: try to only call the translation in one SQL
1801 if field in self._columns:
1802 src = self._columns[field].string
1803 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1804 for table in self._inherits:
1805 cols = intersect(self._inherit_fields.keys(), vals)
1807 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1810 def _add_missing_default_values(self, cr, uid, values, context=None):
1811 # avoid overriding inherited values when parent is set
1813 for tables, parent_field in self._inherits.items():
1814 if parent_field in values:
1815 avoid_tables.append(tables)
1817 # compute missing fields
1818 missing_defaults = set()
1819 for field in self._columns.keys():
1820 if not field in values:
1821 missing_defaults.add(field)
1822 for field in self._inherit_fields.keys():
1823 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1824 missing_defaults.add(field)
1825 # discard magic fields
1826 missing_defaults -= set(MAGIC_COLUMNS)
1828 if missing_defaults:
1829 # override defaults with the provided values, never allow the other way around
1830 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1832 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1833 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1834 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1835 defaults[dv] = [(6, 0, defaults[dv])]
1836 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1837 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1838 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1839 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1840 defaults.update(values)
1844 def clear_caches(self):
1845 """ Clear the caches
1847 This clears the caches associated to methods decorated with
1848 ``tools.ormcache`` or ``tools.ormcache_multi``.
1851 self._ormcache.clear()
1852 self.pool._any_cache_cleared = True
1853 except AttributeError:
1857 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1858 aggregated_fields, count_field,
1859 read_group_result, read_group_order=None, context=None):
1860 """Helper method for filling in empty groups for all possible values of
1861 the field being grouped by"""
1863 # self._group_by_full should map groupable fields to a method that returns
1864 # a list of all aggregated values that we want to display for this field,
1865 # in the form of a m2o-like pair (key,label).
1866 # This is useful to implement kanban views for instance, where all columns
1867 # should be displayed even if they don't contain any record.
1869 # Grab the list of all groups that should be displayed, including all present groups
1870 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1871 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1872 read_group_order=read_group_order,
1873 access_rights_uid=openerp.SUPERUSER_ID,
1876 result_template = dict.fromkeys(aggregated_fields, False)
1877 result_template[groupby + '_count'] = 0
1878 if remaining_groupbys:
1879 result_template['__context'] = {'group_by': remaining_groupbys}
1881 # Merge the left_side (current results as dicts) with the right_side (all
1882 # possible values as m2o pairs). Both lists are supposed to be using the
1883 # same ordering, and can be merged in one pass.
1886 def append_left(left_side):
1887 grouped_value = left_side[groupby] and left_side[groupby][0]
1888 if not grouped_value in known_values:
1889 result.append(left_side)
1890 known_values[grouped_value] = left_side
1892 known_values[grouped_value].update({count_field: left_side[count_field]})
1893 def append_right(right_side):
1894 grouped_value = right_side[0]
1895 if not grouped_value in known_values:
1896 line = dict(result_template)
1897 line[groupby] = right_side
1898 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1900 known_values[grouped_value] = line
1901 while read_group_result or all_groups:
1902 left_side = read_group_result[0] if read_group_result else None
1903 right_side = all_groups[0] if all_groups else None
1904 assert left_side is None or left_side[groupby] is False \
1905 or isinstance(left_side[groupby], (tuple,list)), \
1906 'M2O-like pair expected, got %r' % left_side[groupby]
1907 assert right_side is None or isinstance(right_side, (tuple,list)), \
1908 'M2O-like pair expected, got %r' % right_side
1909 if left_side is None:
1910 append_right(all_groups.pop(0))
1911 elif right_side is None:
1912 append_left(read_group_result.pop(0))
1913 elif left_side[groupby] == right_side:
1914 append_left(read_group_result.pop(0))
1915 all_groups.pop(0) # discard right_side
1916 elif not left_side[groupby] or not left_side[groupby][0]:
1917 # left side == "Undefined" entry, not present on right_side
1918 append_left(read_group_result.pop(0))
1920 append_right(all_groups.pop(0))
1924 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1927 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1929 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1930 to the query if order should be computed against m2o field.
1931 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1932 :param aggregated_fields: list of aggregated fields in the query
1933 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1934 These dictionaries contains the qualified name of each groupby
1935 (fully qualified SQL name for the corresponding field),
1936 and the (non raw) field name.
1937 :param osv.Query query: the query under construction
1938 :return: (groupby_terms, orderby_terms)
1941 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1942 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1944 return groupby_terms, orderby_terms
1946 self._check_qorder(orderby)
1947 for order_part in orderby.split(','):
1948 order_split = order_part.split()
1949 order_field = order_split[0]
1950 if order_field in groupby_fields:
1952 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1953 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1955 orderby_terms.append(order_clause)
1956 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1958 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1959 orderby_terms.append(order)
1960 elif order_field in aggregated_fields:
1961 orderby_terms.append(order_part)
1963 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1964 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1965 self._name, order_part)
1966 return groupby_terms, orderby_terms
1968 def _read_group_process_groupby(self, gb, query, context):
1970 Helper method to collect important information about groupbys: raw
1971 field name, type, time informations, qualified name, ...
1973 split = gb.split(':')
1974 field_type = self._all_columns[split[0]].column._type
1975 gb_function = split[1] if len(split) == 2 else None
1976 temporal = field_type in ('date', 'datetime')
1977 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1978 qualified_field = self._inherits_join_calc(split[0], query)
1981 'day': 'dd MMM YYYY',
1982 'week': "'W'w YYYY",
1983 'month': 'MMMM YYYY',
1984 'quarter': 'QQQ YYYY',
1988 'day': dateutil.relativedelta.relativedelta(days=1),
1989 'week': datetime.timedelta(days=7),
1990 'month': dateutil.relativedelta.relativedelta(months=1),
1991 'quarter': dateutil.relativedelta.relativedelta(months=3),
1992 'year': dateutil.relativedelta.relativedelta(years=1)
1995 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
1996 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
1997 if field_type == 'boolean':
1998 qualified_field = "coalesce(%s,false)" % qualified_field
2003 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2004 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2005 'tz_convert': tz_convert,
2006 'qualified_field': qualified_field
2009 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2011 Helper method to sanitize the data received by read_group. The None
2012 values are converted to False, and the date/datetime are formatted,
2013 and corrected according to the timezones.
2015 value = False if value is None else value
2016 gb = groupby_dict.get(key)
2017 if gb and gb['type'] in ('date', 'datetime') and value:
2018 if isinstance(value, basestring):
2019 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2020 value = datetime.datetime.strptime(value, dt_format)
2021 if gb['tz_convert']:
2022 value = pytz.timezone(context['tz']).localize(value)
2025 def _read_group_get_domain(self, groupby, value):
2027 Helper method to construct the domain corresponding to a groupby and
2028 a given value. This is mostly relevant for date/datetime.
2030 if groupby['type'] in ('date', 'datetime') and value:
2031 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2032 domain_dt_begin = value
2033 domain_dt_end = value + groupby['interval']
2034 if groupby['tz_convert']:
2035 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2036 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2037 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2038 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2039 if groupby['type'] == 'many2one' and value:
2041 return [(groupby['field'], '=', value)]
2043 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2045 Helper method to format the data contained in the dictianary data by
2046 adding the domain corresponding to its values, the groupbys in the
2047 context and by properly formatting the date/datetime values.
2049 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2050 for k,v in data.iteritems():
2051 gb = groupby_dict.get(k)
2052 if gb and gb['type'] in ('date', 'datetime') and v:
2053 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2055 data['__domain'] = domain_group + domain
2056 if len(groupby) - len(annotated_groupbys) >= 1:
2057 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2061 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2063 Get the list of records in list view grouped by the given ``groupby`` fields
2065 :param cr: database cursor
2066 :param uid: current user id
2067 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2068 :param list fields: list of fields present in the list view specified on the object
2069 :param list groupby: list of groupby descriptions by which the records will be grouped.
2070 A groupby description is either a field (then it will be grouped by that field)
2071 or a string 'field:groupby_function'. Right now, the only functions supported
2072 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2073 date/datetime fields.
2074 :param int offset: optional number of records to skip
2075 :param int limit: optional max number of records to return
2076 :param dict context: context arguments, like lang, time zone.
2077 :param list orderby: optional ``order by`` specification, for
2078 overriding the natural sort ordering of the
2079 groups, see also :py:meth:`~osv.osv.osv.search`
2080 (supported only for many2one fields currently)
2081 :param bool lazy: if true, the results are only grouped by the first groupby and the
2082 remaining groupbys are put in the __context key. If false, all the groupbys are
2084 :return: list of dictionaries(one dictionary for each record) containing:
2086 * the values of fields grouped by the fields in ``groupby`` argument
2087 * __domain: list of tuples specifying the search criteria
2088 * __context: dictionary with argument like ``groupby``
2089 :rtype: [{'field_name_1': value, ...]
2090 :raise AccessError: * if user has no read rights on the requested object
2091 * if user tries to bypass access rules for read on the requested object
2095 self.check_access_rights(cr, uid, 'read')
2096 query = self._where_calc(cr, uid, domain, context=context)
2097 fields = fields or self._columns.keys()
2099 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2100 groupby_list = groupby[:1] if lazy else groupby
2101 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2102 for gb in groupby_list]
2103 groupby_fields = [g['field'] for g in annotated_groupbys]
2104 order = orderby or ','.join([g for g in groupby_list])
2105 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2107 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2108 for gb in groupby_fields:
2109 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2110 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2111 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2112 if not (gb in self._all_columns):
2113 # Don't allow arbitrary values, as this would be a SQL injection vector!
2114 raise except_orm(_('Invalid group_by'),
2115 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2117 aggregated_fields = [
2119 if f not in ('id', 'sequence')
2120 if f not in groupby_fields
2121 if f in self._all_columns
2122 if self._all_columns[f].column._type in ('integer', 'float')
2123 if getattr(self._all_columns[f].column, '_classic_write')]
2125 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2126 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2128 for gb in annotated_groupbys:
2129 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2131 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2132 from_clause, where_clause, where_clause_params = query.get_sql()
2133 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2134 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2137 count_field += '_count'
2139 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2140 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2143 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2151 'table': self._table,
2152 'count_field': count_field,
2153 'extra_fields': prefix_terms(',', select_terms),
2154 'from': from_clause,
2155 'where': prefix_term('WHERE', where_clause),
2156 'groupby': prefix_terms('GROUP BY', groupby_terms),
2157 'orderby': prefix_terms('ORDER BY', orderby_terms),
2158 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2159 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2161 cr.execute(query, where_clause_params)
2162 fetched_data = cr.dictfetchall()
2164 if not groupby_fields:
2167 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2169 data_ids = [r['id'] for r in fetched_data]
2170 many2onefields = list(set(many2onefields))
2171 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2172 for d in fetched_data:
2173 d.update(data_dict[d['id']])
2175 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2176 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2177 if lazy and groupby_fields[0] in self._group_by_full:
2178 # Right now, read_group only fill results in lazy mode (by default).
2179 # If you need to have the empty groups in 'eager' mode, then the
2180 # method _read_group_fill_results need to be completely reimplemented
2182 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2183 aggregated_fields, count_field, result, read_group_order=order,
2187 def _inherits_join_add(self, current_model, parent_model_name, query):
2189 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2190 :param current_model: current model object
2191 :param parent_model_name: name of the parent model for which the clauses should be added
2192 :param query: query object on which the JOIN should be added
2194 inherits_field = current_model._inherits[parent_model_name]
2195 parent_model = self.pool[parent_model_name]
2196 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2199 def _inherits_join_calc(self, field, query):
2201 Adds missing table select and join clause(s) to ``query`` for reaching
2202 the field coming from an '_inherits' parent table (no duplicates).
2204 :param field: name of inherited field to reach
2205 :param query: query object on which the JOIN should be added
2206 :return: qualified name of field, to be used in SELECT clause
2208 current_table = self
2209 parent_alias = '"%s"' % current_table._table
2210 while field in current_table._inherit_fields and not field in current_table._columns:
2211 parent_model_name = current_table._inherit_fields[field][0]
2212 parent_table = self.pool[parent_model_name]
2213 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2214 current_table = parent_table
2215 return '%s."%s"' % (parent_alias, field)
2217 def _parent_store_compute(self, cr):
2218 if not self._parent_store:
2220 _logger.info('Computing parent left and right for table %s...', self._table)
2221 def browse_rec(root, pos=0):
2223 where = self._parent_name+'='+str(root)
2225 where = self._parent_name+' IS NULL'
2226 if self._parent_order:
2227 where += ' order by '+self._parent_order
2228 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2230 for id in cr.fetchall():
2231 pos2 = browse_rec(id[0], pos2)
2232 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2234 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2235 if self._parent_order:
2236 query += ' order by ' + self._parent_order
2239 for (root,) in cr.fetchall():
2240 pos = browse_rec(root, pos)
2241 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2244 def _update_store(self, cr, f, k):
2245 _logger.info("storing computed values of fields.function '%s'", k)
2246 ss = self._columns[k]._symbol_set
2247 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2248 cr.execute('select id from '+self._table)
2249 ids_lst = map(lambda x: x[0], cr.fetchall())
2251 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2252 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2253 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2254 for key, val in res.items():
2257 # if val is a many2one, just write the ID
2258 if type(val) == tuple:
2260 if val is not False:
2261 cr.execute(update_query, (ss[1](val), key))
2263 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2264 """Raise except_orm if value is not among the valid values for the selection field"""
2265 if self._columns[field]._type == 'reference':
2266 val_model, val_id_str = value.split(',', 1)
2269 val_id = long(val_id_str)
2273 raise except_orm(_('ValidateError'),
2274 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2278 if isinstance(self._columns[field].selection, (tuple, list)):
2279 if val in dict(self._columns[field].selection):
2281 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2283 raise except_orm(_('ValidateError'),
2284 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
2286 def _check_removed_columns(self, cr, log=False):
2287 # iterate on the database columns to drop the NOT NULL constraints
2288 # of fields which were required but have been removed (or will be added by another module)
2289 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2290 columns += MAGIC_COLUMNS
2291 cr.execute("SELECT a.attname, a.attnotnull"
2292 " FROM pg_class c, pg_attribute a"
2293 " WHERE c.relname=%s"
2294 " AND c.oid=a.attrelid"
2295 " AND a.attisdropped=%s"
2296 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2297 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2299 for column in cr.dictfetchall():
2301 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2302 column['attname'], self._table, self._name)
2303 if column['attnotnull']:
2304 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2305 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2306 self._table, column['attname'])
2308 def _save_constraint(self, cr, constraint_name, type, definition):
2310 Record the creation of a constraint for this model, to make it possible
2311 to delete it later when the module is uninstalled. Type can be either
2312 'f' or 'u' depending on the constraint being a foreign key or not.
2314 if not self._module:
2315 # no need to save constraints for custom models as they're not part
2318 assert type in ('f', 'u')
2320 SELECT type, definition FROM ir_model_constraint, ir_module_module
2321 WHERE ir_model_constraint.module=ir_module_module.id
2322 AND ir_model_constraint.name=%s
2323 AND ir_module_module.name=%s
2324 """, (constraint_name, self._module))
2325 constraints = cr.dictfetchone()
2328 INSERT INTO ir_model_constraint
2329 (name, date_init, date_update, module, model, type, definition)
2330 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2331 (SELECT id FROM ir_module_module WHERE name=%s),
2332 (SELECT id FROM ir_model WHERE model=%s), %s, %s)""",
2333 (constraint_name, self._module, self._name, type, definition))
2334 elif constraints['type'] != type or (definition and constraints['definition'] != definition):
2336 UPDATE ir_model_constraint
2337 SET date_update=now() AT TIME ZONE 'UTC', type=%s, definition=%s
2338 WHERE name=%s AND module = (SELECT id FROM ir_module_module WHERE name=%s)""",
2339 (type, definition, constraint_name, self._module))
2341 def _save_relation_table(self, cr, relation_table):
2343 Record the creation of a many2many for this model, to make it possible
2344 to delete it later when the module is uninstalled.
2347 SELECT 1 FROM ir_model_relation, ir_module_module
2348 WHERE ir_model_relation.module=ir_module_module.id
2349 AND ir_model_relation.name=%s
2350 AND ir_module_module.name=%s
2351 """, (relation_table, self._module))
2353 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2354 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2355 (SELECT id FROM ir_module_module WHERE name=%s),
2356 (SELECT id FROM ir_model WHERE model=%s))""",
2357 (relation_table, self._module, self._name))
2358 self.invalidate_cache(cr, SUPERUSER_ID)
2360 # checked version: for direct m2o starting from `self`
2361 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2362 assert self.is_transient() or not dest_model.is_transient(), \
2363 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2364 if self.is_transient() and not dest_model.is_transient():
2365 # TransientModel relationships to regular Models are annoying
2366 # usually because they could block deletion due to the FKs.
2367 # So unless stated otherwise we default them to ondelete=cascade.
2368 ondelete = ondelete or 'cascade'
2369 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2370 self._foreign_keys.add(fk_def)
2371 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2373 # unchecked version: for custom cases, such as m2m relationships
2374 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2375 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2376 self._foreign_keys.add(fk_def)
2377 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2379 def _drop_constraint(self, cr, source_table, constraint_name):
2380 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2382 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2383 # Find FK constraint(s) currently established for the m2o field,
2384 # and see whether they are stale or not
2385 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2386 cl2.relname as foreign_table
2387 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2388 pg_attribute as att1, pg_attribute as att2
2389 WHERE con.conrelid = cl1.oid
2390 AND cl1.relname = %s
2391 AND con.confrelid = cl2.oid
2392 AND array_lower(con.conkey, 1) = 1
2393 AND con.conkey[1] = att1.attnum
2394 AND att1.attrelid = cl1.oid
2395 AND att1.attname = %s
2396 AND array_lower(con.confkey, 1) = 1
2397 AND con.confkey[1] = att2.attnum
2398 AND att2.attrelid = cl2.oid
2399 AND att2.attname = %s
2400 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2401 constraints = cr.dictfetchall()
2403 if len(constraints) == 1:
2404 # Is it the right constraint?
2406 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2407 or cons['foreign_table'] != dest_model._table:
2408 # Wrong FK: drop it and recreate
2409 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2410 source_table, cons['constraint_name'])
2411 self._drop_constraint(cr, source_table, cons['constraint_name'])
2413 # it's all good, nothing to do!
2416 # Multiple FKs found for the same field, drop them all, and re-create
2417 for cons in constraints:
2418 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2419 source_table, cons['constraint_name'])
2420 self._drop_constraint(cr, source_table, cons['constraint_name'])
2422 # (re-)create the FK
2423 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2426 def _set_default_value_on_column(self, cr, column_name, context=None):
2427 # ideally should use add_default_value but fails
2428 # due to ir.values not being ready
2430 # get old-style default
2431 default = self._defaults.get(column_name)
2432 if callable(default):
2433 default = default(self, cr, SUPERUSER_ID, context)
2435 # get new_style default if no old-style
2437 record = self.new(cr, SUPERUSER_ID, context=context)
2438 field = self._fields[column_name]
2439 field.determine_default(record)
2440 defaults = dict(record._cache)
2441 if column_name in defaults:
2442 default = field.convert_to_write(defaults[column_name])
2444 column = self._columns[column_name]
2445 ss = column._symbol_set
2446 db_default = ss[1](default)
2447 # Write default if non-NULL, except for booleans for which False means
2448 # the same as NULL - this saves us an expensive query on large tables.
2449 write_default = (db_default is not None if column._type != 'boolean'
2452 _logger.debug("Table '%s': setting default value of new column %s to %r",
2453 self._table, column_name, default)
2454 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2455 self._table, column_name, ss[0], column_name)
2456 cr.execute(query, (db_default,))
2457 # this is a disgrace
2460 def _auto_init(self, cr, context=None):
2463 Call _field_create and, unless _auto is False:
2465 - create the corresponding table in database for the model,
2466 - possibly add the parent columns in database,
2467 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2468 'write_date' in database if _log_access is True (the default),
2469 - report on database columns no more existing in _columns,
2470 - remove no more existing not null constraints,
2471 - alter existing database columns to match _columns,
2472 - create database tables to match _columns,
2473 - add database indices to match _columns,
2474 - save in self._foreign_keys a list a foreign keys to create (see
2478 self._foreign_keys = set()
2479 raise_on_invalid_object_name(self._name)
2482 store_compute = False
2483 stored_fields = [] # new-style stored fields with compute
2485 update_custom_fields = context.get('update_custom_fields', False)
2486 self._field_create(cr, context=context)
2487 create = not self._table_exist(cr)
2491 self._create_table(cr)
2494 cr.execute('SELECT min(id) FROM "%s"' % (self._table,))
2495 has_rows = cr.fetchone()[0] is not None
2498 if self._parent_store:
2499 if not self._parent_columns_exist(cr):
2500 self._create_parent_columns(cr)
2501 store_compute = True
2503 self._check_removed_columns(cr, log=False)
2505 # iterate on the "object columns"
2506 column_data = self._select_column_data(cr)
2508 for k, f in self._columns.iteritems():
2509 if k == 'id': # FIXME: maybe id should be a regular column?
2511 # Don't update custom (also called manual) fields
2512 if f.manual and not update_custom_fields:
2515 if isinstance(f, fields.one2many):
2516 self._o2m_raise_on_missing_reference(cr, f)
2518 elif isinstance(f, fields.many2many):
2519 self._m2m_raise_or_create_relation(cr, f)
2522 res = column_data.get(k)
2524 # The field is not found as-is in database, try if it
2525 # exists with an old name.
2526 if not res and hasattr(f, 'oldname'):
2527 res = column_data.get(f.oldname)
2529 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2531 column_data[k] = res
2532 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2533 self._table, f.oldname, k)
2535 # The field already exists in database. Possibly
2536 # change its type, rename it, drop it or change its
2539 f_pg_type = res['typname']
2540 f_pg_size = res['size']
2541 f_pg_notnull = res['attnotnull']
2542 if isinstance(f, fields.function) and not f.store and\
2543 not getattr(f, 'nodrop', False):
2544 _logger.info('column %s (%s) converted to a function, removed from table %s',
2545 k, f.string, self._table)
2546 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2548 _schema.debug("Table '%s': dropped column '%s' with cascade",
2552 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2557 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2558 ('varchar', 'text', 'TEXT', ''),
2559 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2560 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2561 ('timestamp', 'date', 'date', '::date'),
2562 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2563 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2565 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2567 with cr.savepoint():
2568 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2569 except psycopg2.NotSupportedError:
2570 # In place alter table cannot be done because a view is depending of this field.
2571 # Do a manual copy. This will drop the view (that will be recreated later)
2572 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2573 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2574 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2575 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2577 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2578 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2580 if (f_pg_type==c[0]) and (f._type==c[1]):
2581 if f_pg_type != f_obj_type:
2583 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2584 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2585 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2586 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2588 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2589 self._table, k, c[0], c[1])
2592 if f_pg_type != f_obj_type:
2596 newname = k + '_moved' + str(i)
2597 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2598 "WHERE c.relname=%s " \
2599 "AND a.attname=%s " \
2600 "AND c.oid=a.attrelid ", (self._table, newname))
2601 if not cr.fetchone()[0]:
2605 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2606 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2607 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2608 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2609 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2610 self._table, k, f_pg_type, f._type, newname)
2612 # if the field is required and hasn't got a NOT NULL constraint
2613 if f.required and f_pg_notnull == 0:
2615 self._set_default_value_on_column(cr, k, context=context)
2616 # add the NOT NULL constraint
2618 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2620 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2623 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2624 "If you want to have it, you should update the records and execute manually:\n"\
2625 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2626 _schema.warning(msg, self._table, k, self._table, k)
2628 elif not f.required and f_pg_notnull == 1:
2629 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2631 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2634 indexname = '%s_%s_index' % (self._table, k)
2635 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2636 res2 = cr.dictfetchall()
2637 if not res2 and f.select:
2638 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2640 if f._type == 'text':
2641 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2642 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2643 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2644 " because there is a length limit for indexable btree values!\n"\
2645 "Use a search view instead if you simply want to make the field searchable."
2646 _schema.warning(msg, self._table, f._type, k)
2647 if res2 and not f.select:
2648 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2650 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2651 _schema.debug(msg, self._table, k, f._type)
2653 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2654 dest_model = self.pool[f._obj]
2655 if dest_model._auto and dest_model._table != 'ir_actions':
2656 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2658 # The field doesn't exist in database. Create it if necessary.
2660 if not isinstance(f, fields.function) or f.store:
2661 # add the missing field
2662 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2663 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2664 _schema.debug("Table '%s': added column '%s' with definition=%s",
2665 self._table, k, get_pg_type(f)[1])
2669 self._set_default_value_on_column(cr, k, context=context)
2671 # remember the functions to call for the stored fields
2672 if isinstance(f, fields.function):
2674 if f.store is not True: # i.e. if f.store is a dict
2675 order = f.store[f.store.keys()[0]][2]
2676 todo_end.append((order, self._update_store, (f, k)))
2678 # remember new-style stored fields with compute method
2679 if k in self._fields and self._fields[k].depends:
2680 stored_fields.append(self._fields[k])
2682 # and add constraints if needed
2683 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2684 if f._obj not in self.pool:
2685 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2686 dest_model = self.pool[f._obj]
2687 ref = dest_model._table
2688 # ir_actions is inherited so foreign key doesn't work on it
2689 if dest_model._auto and ref != 'ir_actions':
2690 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2692 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2696 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2697 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2700 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2701 "Try to re-run: openerp-server --update=module\n"\
2702 "If it doesn't work, update records and execute manually:\n"\
2703 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2704 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2708 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2709 create = not bool(cr.fetchone())
2711 cr.commit() # start a new transaction
2714 self._add_sql_constraints(cr)
2717 self._execute_sql(cr)
2720 self._parent_store_compute(cr)
2724 # trigger computation of new-style stored fields with a compute
2726 _logger.info("Storing computed values of %s fields %s",
2727 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2728 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2729 recs = recs.search([])
2731 map(recs._recompute_todo, stored_fields)
2734 todo_end.append((1000, func, ()))
2738 def _auto_end(self, cr, context=None):
2739 """ Create the foreign keys recorded by _auto_init. """
2740 for t, k, r, d in self._foreign_keys:
2741 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2742 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f', False)
2744 del self._foreign_keys
2747 def _table_exist(self, cr):
2748 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2752 def _create_table(self, cr):
2753 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2754 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2755 _schema.debug("Table '%s': created", self._table)
2758 def _parent_columns_exist(self, cr):
2759 cr.execute("""SELECT c.relname
2760 FROM pg_class c, pg_attribute a
2761 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2762 """, (self._table, 'parent_left'))
2766 def _create_parent_columns(self, cr):
2767 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2768 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2769 if 'parent_left' not in self._columns:
2770 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2772 _schema.debug("Table '%s': added column '%s' with definition=%s",
2773 self._table, 'parent_left', 'INTEGER')
2774 elif not self._columns['parent_left'].select:
2775 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2777 if 'parent_right' not in self._columns:
2778 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2780 _schema.debug("Table '%s': added column '%s' with definition=%s",
2781 self._table, 'parent_right', 'INTEGER')
2782 elif not self._columns['parent_right'].select:
2783 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2785 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2786 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2787 self._parent_name, self._name)
2792 def _select_column_data(self, cr):
2793 # attlen is the number of bytes necessary to represent the type when
2794 # the type has a fixed size. If the type has a varying size attlen is
2795 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2796 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2797 "FROM pg_class c,pg_attribute a,pg_type t " \
2798 "WHERE c.relname=%s " \
2799 "AND c.oid=a.attrelid " \
2800 "AND a.atttypid=t.oid", (self._table,))
2801 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2804 def _o2m_raise_on_missing_reference(self, cr, f):
2805 # TODO this check should be a method on fields.one2many.
2806 if f._obj in self.pool:
2807 other = self.pool[f._obj]
2808 # TODO the condition could use fields_get_keys().
2809 if f._fields_id not in other._columns.keys():
2810 if f._fields_id not in other._inherit_fields.keys():
2811 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2813 def _m2m_raise_or_create_relation(self, cr, f):
2814 m2m_tbl, col1, col2 = f._sql_names(self)
2815 # do not create relations for custom fields as they do not belong to a module
2816 # they will be automatically removed when dropping the corresponding ir.model.field
2817 # table name for custom relation all starts with x_, see __init__
2818 if not m2m_tbl.startswith('x_'):
2819 self._save_relation_table(cr, m2m_tbl)
2820 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2821 if not cr.dictfetchall():
2822 if f._obj not in self.pool:
2823 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2824 dest_model = self.pool[f._obj]
2825 ref = dest_model._table
2826 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2827 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2828 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2829 if not cr.fetchall():
2830 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2831 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2832 if not cr.fetchall():
2833 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2835 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2836 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2837 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2839 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2842 def _add_sql_constraints(self, cr):
2845 Modify this model's database table constraints so they match the one in
2849 def unify_cons_text(txt):
2850 return txt.lower().replace(', ',',').replace(' (','(')
2852 for (key, con, _) in self._sql_constraints:
2853 conname = '%s_%s' % (self._table, key)
2855 # using 1 to get result if no imc but one pgc
2856 cr.execute("""SELECT definition, 1
2857 FROM ir_model_constraint imc
2858 RIGHT JOIN pg_constraint pgc
2859 ON (pgc.conname = imc.name)
2860 WHERE pgc.conname=%s
2862 existing_constraints = cr.dictfetchone()
2866 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2867 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2868 self._table, conname, con),
2869 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2874 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2875 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2876 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2882 if not existing_constraints:
2883 # constraint does not exists:
2884 sql_actions['add']['execute'] = True
2885 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2886 elif unify_cons_text(con) != existing_constraints['definition']:
2887 # constraint exists but its definition has changed:
2888 sql_actions['drop']['execute'] = True
2889 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints['definition'] or '', )
2890 sql_actions['add']['execute'] = True
2891 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2893 # we need to add the constraint:
2894 self._save_constraint(cr, conname, 'u', unify_cons_text(con))
2895 sql_actions = [item for item in sql_actions.values()]
2896 sql_actions.sort(key=lambda x: x['order'])
2897 for sql_action in [action for action in sql_actions if action['execute']]:
2899 cr.execute(sql_action['query'])
2901 _schema.debug(sql_action['msg_ok'])
2903 _schema.warning(sql_action['msg_err'])
2907 def _execute_sql(self, cr):
2908 """ Execute the SQL code from the _sql attribute (if any)."""
2909 if hasattr(self, "_sql"):
2910 for line in self._sql.split(';'):
2911 line2 = line.replace('\n', '').strip()
2917 # Update objects that uses this one to update their _inherits fields
2921 def _inherits_reload_src(cls):
2922 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2923 for model in cls.pool.values():
2924 if cls._name in model._inherits:
2925 model._inherits_reload()
2928 def _inherits_reload(cls):
2929 """ Recompute the _inherit_fields mapping.
2931 This will also call itself on each inherits'd child model.
2935 for table in cls._inherits:
2936 other = cls.pool[table]
2937 for col in other._columns.keys():
2938 res[col] = (table, cls._inherits[table], other._columns[col], table)
2939 for col in other._inherit_fields.keys():
2940 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2941 cls._inherit_fields = res
2942 cls._all_columns = cls._get_column_infos()
2944 # interface columns with new-style fields
2945 for attr, column in cls._columns.items():
2946 if attr not in cls._fields:
2947 cls._add_field(attr, column.to_field())
2949 # interface inherited fields with new-style fields (note that the
2950 # reverse order is for being consistent with _all_columns above)
2951 for parent_model, parent_field in reversed(cls._inherits.items()):
2952 for attr, field in cls.pool[parent_model]._fields.iteritems():
2953 if attr not in cls._fields:
2954 cls._add_field(attr, field.copy(
2955 related=(parent_field, attr),
2960 cls._inherits_reload_src()
2963 def _get_column_infos(cls):
2964 """Returns a dict mapping all fields names (direct fields and
2965 inherited field via _inherits) to a ``column_info`` struct
2966 giving detailed columns """
2968 # do not inverse for loops, since local fields may hide inherited ones!
2969 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2970 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2971 for k, col in cls._columns.iteritems():
2972 result[k] = fields.column_info(k, col)
2976 def _inherits_check(cls):
2977 for table, field_name in cls._inherits.items():
2978 if field_name not in cls._columns:
2979 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2980 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2981 required=True, ondelete="cascade")
2982 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2983 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2984 cls._columns[field_name].required = True
2985 cls._columns[field_name].ondelete = "cascade"
2987 # reflect fields with delegate=True in dictionary cls._inherits
2988 for field in cls._fields.itervalues():
2989 if field.type == 'many2one' and not field.related and field.delegate:
2990 if not field.required:
2991 _logger.warning("Field %s with delegate=True must be required.", field)
2992 field.required = True
2993 if field.ondelete.lower() not in ('cascade', 'restrict'):
2994 field.ondelete = 'cascade'
2995 cls._inherits[field.comodel_name] = field.name
2998 def _prepare_setup_fields(self):
2999 """ Prepare the setup of fields once the models have been loaded. """
3000 for field in self._fields.itervalues():
3004 def _setup_fields(self, partial=False):
3005 """ Setup the fields (dependency triggers, etc). """
3006 for field in self._fields.itervalues():
3007 if partial and field.manual and \
3008 field.relational and field.comodel_name not in self.pool:
3009 # do not set up manual fields that refer to unknown models
3011 field.setup(self.env)
3013 # group fields by compute to determine field.computed_fields
3014 fields_by_compute = defaultdict(list)
3015 for field in self._fields.itervalues():
3017 field.computed_fields = fields_by_compute[field.compute]
3018 field.computed_fields.append(field)
3020 field.computed_fields = []
3022 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3023 """ fields_get([fields])
3025 Return the definition of each field.
3027 The returned value is a dictionary (indiced by field name) of
3028 dictionaries. The _inherits'd fields are included. The string, help,
3029 and selection (if present) attributes are translated.
3031 :param cr: database cursor
3032 :param user: current user id
3033 :param allfields: list of fields
3034 :param context: context arguments, like lang, time zone
3035 :return: dictionary of field dictionaries, each one describing a field of the business object
3036 :raise AccessError: * if user has no create/write rights on the requested object
3039 recs = self.browse(cr, user, [], context)
3042 for fname, field in self._fields.iteritems():
3043 if allfields and fname not in allfields:
3045 if field.groups and not recs.user_has_groups(field.groups):
3047 res[fname] = field.get_description(recs.env)
3049 # if user cannot create or modify records, make all fields readonly
3050 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3051 if not (has_access('write') or has_access('create')):
3052 for description in res.itervalues():
3053 description['readonly'] = True
3054 description['states'] = {}
3058 def get_empty_list_help(self, cr, user, help, context=None):
3059 """ Generic method giving the help message displayed when having
3060 no result to display in a list or kanban view. By default it returns
3061 the help given in parameter that is generally the help message
3062 defined in the action.
3066 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3068 Check the user access rights on the given fields. This raises Access
3069 Denied if the user does not have the rights. Otherwise it returns the
3070 fields (as is if the fields is not falsy, or the readable/writable
3071 fields if fields is falsy).
3073 if user == SUPERUSER_ID:
3074 return fields or list(self._fields)
3077 """ determine whether user has access to field `fname` """
3078 field = self._fields.get(fname)
3079 if field and field.groups:
3080 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3085 fields = filter(valid, self._fields)
3087 invalid_fields = set(filter(lambda name: not valid(name), fields))
3089 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3090 operation, user, self._name, ', '.join(invalid_fields))
3092 _('The requested operation cannot be completed due to security restrictions. '
3093 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3094 (self._description, operation))
3098 # add explicit old-style implementation to read()
3100 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3101 records = self.browse(cr, user, ids, context)
3102 result = BaseModel.read(records, fields, load=load)
3103 return result if isinstance(ids, list) else (bool(result) and result[0])
3105 # new-style implementation of read()
3107 def read(self, fields=None, load='_classic_read'):
3110 Reads the requested fields for the records in `self`, low-level/RPC
3111 method. In Python code, prefer :meth:`~.browse`.
3113 :param fields: list of field names to return (default is all fields)
3114 :return: a list of dictionaries mapping field names to their values,
3115 with one dictionary per record
3116 :raise AccessError: if user has no read rights on some of the given
3119 # check access rights
3120 self.check_access_rights('read')
3121 fields = self.check_field_access_rights('read', fields)
3123 # split fields into stored and computed fields
3124 stored, computed = [], []
3126 if name in self._columns:
3128 elif name in self._fields:
3129 computed.append(name)
3131 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3133 # fetch stored fields from the database to the cache
3134 self._read_from_database(stored)
3136 # retrieve results from records; this takes values from the cache and
3137 # computes remaining fields
3139 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3140 use_name_get = (load == '_classic_read')
3143 values = {'id': record.id}
3144 for name, field in name_fields:
3145 values[name] = field.convert_to_read(record[name], use_name_get)
3146 result.append(values)
3147 except MissingError:
3153 def _prefetch_field(self, field):
3154 """ Read from the database in order to fetch `field` (:class:`Field`
3155 instance) for `self` in cache.
3157 # fetch the records of this model without field_name in their cache
3158 records = self._in_cache_without(field)
3160 if len(records) > PREFETCH_MAX:
3161 records = records[:PREFETCH_MAX] | self
3163 # by default, simply fetch field
3164 fnames = {field.name}
3166 if self.env.in_draft:
3167 # we may be doing an onchange, do not prefetch other fields
3169 elif self.env.field_todo(field):
3170 # field must be recomputed, do not prefetch records to recompute
3171 records -= self.env.field_todo(field)
3172 elif not self._context.get('prefetch_fields', True):
3173 # do not prefetch other fields
3175 elif self._columns[field.name]._prefetch:
3176 # here we can optimize: prefetch all classic and many2one fields
3178 for fname, fcolumn in self._columns.iteritems()
3179 if fcolumn._prefetch
3180 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3183 # fetch records with read()
3184 assert self in records and field.name in fnames
3187 result = records.read(list(fnames), load='_classic_write')
3191 # check the cache, and update it if necessary
3192 if not self._cache.contains(field):
3193 for values in result:
3194 record = self.browse(values.pop('id'))
3195 record._cache.update(record._convert_to_cache(values, validate=False))
3196 if not self._cache.contains(field):
3197 e = AccessError("No value found for %s.%s" % (self, field.name))
3198 self._cache[field] = FailedValue(e)
3201 def _read_from_database(self, field_names):
3202 """ Read the given fields of the records in `self` from the database,
3203 and store them in cache. Access errors are also stored in cache.
3206 cr, user, context = env.args
3208 # FIXME: The query construction needs to be rewritten using the internal Query
3209 # object, as in search(), to avoid ambiguous column references when
3210 # reading/sorting on a table that is auto_joined to another table with
3211 # common columns (e.g. the magical columns)
3213 # Construct a clause for the security rules.
3214 # 'tables' holds the list of tables necessary for the SELECT, including
3215 # the ir.rule clauses, and contains at least self._table.
3216 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3218 # determine the fields that are stored as columns in self._table
3219 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3221 # we need fully-qualified column names in case len(tables) > 1
3223 if isinstance(self._columns.get(f), fields.binary) and \
3224 context.get('bin_size_%s' % f, context.get('bin_size')):
3225 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3226 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3228 return '%s."%s"' % (self._table, f)
3229 qual_names = map(qualify, set(fields_pre + ['id']))
3231 query = """ SELECT %(qual_names)s FROM %(tables)s
3232 WHERE %(table)s.id IN %%s AND (%(extra)s)
3235 'qual_names': ",".join(qual_names),
3236 'tables': ",".join(tables),
3237 'table': self._table,
3238 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3239 'order': self._parent_order or self._order,
3243 for sub_ids in cr.split_for_in_conditions(self.ids):
3244 cr.execute(query, [tuple(sub_ids)] + rule_params)
3245 result.extend(cr.dictfetchall())
3247 ids = [vals['id'] for vals in result]
3250 # translate the fields if necessary
3251 if context.get('lang'):
3252 ir_translation = env['ir.translation']
3253 for f in fields_pre:
3254 if self._columns[f].translate:
3255 #TODO: optimize out of this loop
3256 res_trans = ir_translation._get_ids(
3257 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3259 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3261 # apply the symbol_get functions of the fields we just read
3262 for f in fields_pre:
3263 symbol_get = self._columns[f]._symbol_get
3266 vals[f] = symbol_get(vals[f])
3268 # store result in cache for POST fields
3270 record = self.browse(vals['id'])
3271 record._cache.update(record._convert_to_cache(vals, validate=False))
3273 # determine the fields that must be processed now
3274 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3276 # Compute POST fields, grouped by multi
3277 by_multi = defaultdict(list)
3278 for f in fields_post:
3279 by_multi[self._columns[f]._multi].append(f)
3281 for multi, fs in by_multi.iteritems():
3283 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3284 assert res2 is not None, \
3285 'The function field "%s" on the "%s" model returned None\n' \
3286 '(a dictionary was expected).' % (fs[0], self._name)
3288 # TOCHECK : why got string instend of dict in python2.6
3289 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3290 multi_fields = res2.get(vals['id'], {})
3293 vals[f] = multi_fields.get(f, [])
3296 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3299 vals[f] = res2[vals['id']]
3303 # Warn about deprecated fields now that fields_pre and fields_post are computed
3304 for f in field_names:
3305 column = self._columns[f]
3306 if column.deprecated:
3307 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3309 # store result in cache
3311 record = self.browse(vals.pop('id'))
3312 record._cache.update(record._convert_to_cache(vals, validate=False))
3314 # store failed values in cache for the records that could not be read
3315 fetched = self.browse(ids)
3316 missing = self - fetched
3318 extras = fetched - self
3321 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3322 ', '.join(map(repr, missing._ids)),
3323 ', '.join(map(repr, extras._ids)),
3325 # store an access error exception in existing records
3327 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3328 (self._name, 'read')
3330 forbidden = missing.exists()
3331 forbidden._cache.update(FailedValue(exc))
3332 # store a missing error exception in non-existing records
3334 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3336 (missing - forbidden)._cache.update(FailedValue(exc))
3339 def get_metadata(self):
3341 Returns some metadata about the given records.
3343 :return: list of ownership dictionaries for each requested record
3344 :rtype: list of dictionaries with the following keys:
3347 * create_uid: user who created the record
3348 * create_date: date when the record was created
3349 * write_uid: last user who changed the record
3350 * write_date: date of the last change to the record
3351 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3354 if self._log_access:
3355 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3356 quoted_table = '"%s"' % self._table
3357 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3358 query = '''SELECT %s, __imd.module, __imd.name
3359 FROM %s LEFT JOIN ir_model_data __imd
3360 ON (__imd.model = %%s and __imd.res_id = %s.id)
3361 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3362 self._cr.execute(query, (self._name, tuple(self.ids)))
3363 res = self._cr.dictfetchall()
3365 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3366 names = dict(self.env['res.users'].browse(uids).name_get())
3370 value = r[key] = r[key] or False
3371 if key in ('write_uid', 'create_uid') and value in names:
3372 r[key] = (value, names[value])
3373 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3374 del r['name'], r['module']
3377 def _check_concurrency(self, cr, ids, context):
3380 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3382 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3383 for sub_ids in cr.split_for_in_conditions(ids):
3386 id_ref = "%s,%s" % (self._name, id)
3387 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3389 ids_to_check.extend([id, update_date])
3390 if not ids_to_check:
3392 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3395 # mention the first one only to keep the error message readable
3396 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3398 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3399 """Verify the returned rows after applying record rules matches
3400 the length of `ids`, and raise an appropriate exception if it does not.
3404 ids, result_ids = set(ids), set(result_ids)
3405 missing_ids = ids - result_ids
3407 # Attempt to distinguish record rule restriction vs deleted records,
3408 # to provide a more specific error message - check if the missinf
3409 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3410 forbidden_ids = [x[0] for x in cr.fetchall()]
3412 # the missing ids are (at least partially) hidden by access rules
3413 if uid == SUPERUSER_ID:
3415 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3416 raise except_orm(_('Access Denied'),
3417 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3418 (self._description, operation))
3420 # If we get here, the missing_ids are not in the database
3421 if operation in ('read','unlink'):
3422 # No need to warn about deleting an already deleted record.
3423 # And no error when reading a record that was deleted, to prevent spurious
3424 # errors for non-transactional search/read sequences coming from clients
3426 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3427 raise except_orm(_('Missing document(s)'),
3428 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3431 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3432 """Verifies that the operation given by ``operation`` is allowed for the user
3433 according to the access rights."""
3434 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3436 def check_access_rule(self, cr, uid, ids, operation, context=None):
3437 """Verifies that the operation given by ``operation`` is allowed for the user
3438 according to ir.rules.
3440 :param operation: one of ``write``, ``unlink``
3441 :raise except_orm: * if current ir.rules do not permit this operation.
3442 :return: None if the operation is allowed
3444 if uid == SUPERUSER_ID:
3447 if self.is_transient():
3448 # Only one single implicit access rule for transient models: owner only!
3449 # This is ok to hardcode because we assert that TransientModels always
3450 # have log_access enabled so that the create_uid column is always there.
3451 # And even with _inherits, these fields are always present in the local
3452 # table too, so no need for JOINs.
3453 cr.execute("""SELECT distinct create_uid
3455 WHERE id IN %%s""" % self._table, (tuple(ids),))
3456 uids = [x[0] for x in cr.fetchall()]
3457 if len(uids) != 1 or uids[0] != uid:
3458 raise except_orm(_('Access Denied'),
3459 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3461 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3463 where_clause = ' and ' + ' and '.join(where_clause)
3464 for sub_ids in cr.split_for_in_conditions(ids):
3465 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3466 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3467 [sub_ids] + where_params)
3468 returned_ids = [x['id'] for x in cr.dictfetchall()]
3469 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3471 def create_workflow(self, cr, uid, ids, context=None):
3472 """Create a workflow instance for each given record IDs."""
3473 from openerp import workflow
3475 workflow.trg_create(uid, self._name, res_id, cr)
3476 # self.invalidate_cache(cr, uid, context=context) ?
3479 def delete_workflow(self, cr, uid, ids, context=None):
3480 """Delete the workflow instances bound to the given record IDs."""
3481 from openerp import workflow
3483 workflow.trg_delete(uid, self._name, res_id, cr)
3484 self.invalidate_cache(cr, uid, context=context)
3487 def step_workflow(self, cr, uid, ids, context=None):
3488 """Reevaluate the workflow instances of the given record IDs."""
3489 from openerp import workflow
3491 workflow.trg_write(uid, self._name, res_id, cr)
3492 # self.invalidate_cache(cr, uid, context=context) ?
3495 def signal_workflow(self, cr, uid, ids, signal, context=None):
3496 """Send given workflow signal and return a dict mapping ids to workflow results"""
3497 from openerp import workflow
3500 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3501 # self.invalidate_cache(cr, uid, context=context) ?
3504 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3505 """ Rebind the workflow instance bound to the given 'old' record IDs to
3506 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3508 from openerp import workflow
3509 for old_id, new_id in old_new_ids:
3510 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3511 self.invalidate_cache(cr, uid, context=context)
3514 def unlink(self, cr, uid, ids, context=None):
3517 Deletes the records of the current set
3519 :raise AccessError: * if user has no unlink rights on the requested object
3520 * if user tries to bypass access rules for unlink on the requested object
3521 :raise UserError: if the record is default property for other records
3526 if isinstance(ids, (int, long)):
3529 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3531 # for recomputing new-style fields
3532 recs = self.browse(cr, uid, ids, context)
3533 recs.modified(self._fields)
3535 self._check_concurrency(cr, ids, context)
3537 self.check_access_rights(cr, uid, 'unlink')
3539 ir_property = self.pool.get('ir.property')
3541 # Check if the records are used as default properties.
3542 domain = [('res_id', '=', False),
3543 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3545 if ir_property.search(cr, uid, domain, context=context):
3546 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3548 # Delete the records' properties.
3549 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3550 ir_property.unlink(cr, uid, property_ids, context=context)
3552 self.delete_workflow(cr, uid, ids, context=context)
3554 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3555 pool_model_data = self.pool.get('ir.model.data')
3556 ir_values_obj = self.pool.get('ir.values')
3557 ir_attachment_obj = self.pool.get('ir.attachment')
3558 for sub_ids in cr.split_for_in_conditions(ids):
3559 cr.execute('delete from ' + self._table + ' ' \
3560 'where id IN %s', (sub_ids,))
3562 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3563 # as these are not connected with real database foreign keys, and would be dangling references.
3564 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3565 # to avoid possible side-effects during admin calls.
3566 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3567 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3568 # Step 2. Marching towards the real deletion of referenced records
3570 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3572 # For the same reason, removing the record relevant to ir_values
3573 ir_value_ids = ir_values_obj.search(cr, uid,
3574 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3577 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3579 # For the same reason, removing the record relevant to ir_attachment
3580 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3581 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3582 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3583 if ir_attachment_ids:
3584 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3586 # invalidate the *whole* cache, since the orm does not handle all
3587 # changes made in the database, like cascading delete!
3588 recs.invalidate_cache()
3590 for order, obj_name, store_ids, fields in result_store:
3591 if obj_name == self._name:
3592 effective_store_ids = set(store_ids) - set(ids)
3594 effective_store_ids = store_ids
3595 if effective_store_ids:
3596 obj = self.pool[obj_name]
3597 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3598 rids = map(lambda x: x[0], cr.fetchall())
3600 obj._store_set_values(cr, uid, rids, fields, context)
3602 # recompute new-style fields
3611 def write(self, vals):
3614 Updates all records in the current set with the provided values.
3616 :param dict vals: fields to update and the value to set on them e.g::
3618 {'foo': 1, 'bar': "Qux"}
3620 will set the field ``foo`` to ``1`` and the field ``bar`` to
3621 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3623 :raise AccessError: * if user has no write rights on the requested object
3624 * if user tries to bypass access rules for write on the requested object
3625 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3626 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3628 .. _openerp/models/relationals/format:
3630 .. note:: Relational fields use a special "commands" format to manipulate their values
3632 This format is a list of command triplets executed sequentially,
3633 possible command triplets are:
3635 ``(0, _, values: dict)``
3636 links to a new record created from the provided values
3637 ``(1, id, values: dict)``
3638 updates the already-linked record of id ``id`` with the
3641 unlinks and deletes the linked record of id ``id``
3643 unlinks the linked record of id ``id`` without deleting it
3645 links to an existing record of id ``id``
3647 unlinks all records in the relation, equivalent to using
3648 the command ``3`` on every linked record
3650 replaces the existing list of linked records by the provoded
3651 ones, equivalent to using ``5`` then ``4`` for each id in
3654 (in command triplets, ``_`` values are ignored and can be
3655 anything, generally ``0`` or ``False``)
3657 Any command can be used on :class:`~openerp.fields.Many2many`,
3658 only ``0``, ``1`` and ``2`` can be used on
3659 :class:`~openerp.fields.One2many`.
3664 self._check_concurrency(self._ids)
3665 self.check_access_rights('write')
3667 # No user-driven update of these columns
3668 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3669 vals.pop(field, None)
3671 # split up fields into old-style and pure new-style ones
3672 old_vals, new_vals, unknown = {}, {}, []
3673 for key, val in vals.iteritems():
3674 if key in self._columns:
3676 elif key in self._fields:
3682 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3684 # write old-style fields with (low-level) method _write
3686 self._write(old_vals)
3688 # put the values of pure new-style fields into cache, and inverse them
3691 record._cache.update(record._convert_to_cache(new_vals, update=True))
3692 for key in new_vals:
3693 self._fields[key].determine_inverse(self)
3697 def _write(self, cr, user, ids, vals, context=None):
3698 # low-level implementation of write()
3703 self.check_field_access_rights(cr, user, 'write', vals.keys())
3704 for field in vals.keys():
3706 if field in self._columns:
3707 fobj = self._columns[field]
3708 elif field in self._inherit_fields:
3709 fobj = self._inherit_fields[field][2]
3716 for group in groups:
3717 module = group.split(".")[0]
3718 grp = group.split(".")[1]
3719 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3720 (grp, module, 'res.groups', user))
3721 readonly = cr.fetchall()
3722 if readonly[0][0] >= 1:
3729 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3731 # for recomputing new-style fields
3732 recs = self.browse(cr, user, ids, context)
3733 modified_fields = list(vals)
3734 if self._log_access:
3735 modified_fields += ['write_date', 'write_uid']
3736 recs.modified(modified_fields)
3738 parents_changed = []
3739 parent_order = self._parent_order or self._order
3740 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3741 # The parent_left/right computation may take up to
3742 # 5 seconds. No need to recompute the values if the
3743 # parent is the same.
3744 # Note: to respect parent_order, nodes must be processed in
3745 # order, so ``parents_changed`` must be ordered properly.
3746 parent_val = vals[self._parent_name]
3748 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3749 (self._table, self._parent_name, self._parent_name, parent_order)
3750 cr.execute(query, (tuple(ids), parent_val))
3752 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3753 (self._table, self._parent_name, parent_order)
3754 cr.execute(query, (tuple(ids),))
3755 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3762 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3764 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3765 if field_column and field_column.deprecated:
3766 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3767 if field in self._columns:
3768 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3769 if (not totranslate) or not self._columns[field].translate:
3770 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3771 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3772 direct.append(field)
3774 upd_todo.append(field)
3776 updend.append(field)
3777 if field in self._columns \
3778 and hasattr(self._columns[field], 'selection') \
3780 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3782 if self._log_access:
3783 upd0.append('write_uid=%s')
3784 upd0.append("write_date=(now() at time zone 'UTC')")
3788 self.check_access_rule(cr, user, ids, 'write', context=context)
3789 for sub_ids in cr.split_for_in_conditions(ids):
3790 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3791 'where id IN %s', upd1 + [sub_ids])
3792 if cr.rowcount != len(sub_ids):
3793 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3798 if self._columns[f].translate:
3799 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3802 # Inserting value to DB
3803 context_wo_lang = dict(context, lang=None)
3804 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3805 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3807 # call the 'set' method of fields which are not classic_write
3808 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3810 # default element in context must be removed when call a one2many or many2many
3811 rel_context = context.copy()
3812 for c in context.items():
3813 if c[0].startswith('default_'):
3814 del rel_context[c[0]]
3816 for field in upd_todo:
3818 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3820 unknown_fields = updend[:]
3821 for table in self._inherits:
3822 col = self._inherits[table]
3824 for sub_ids in cr.split_for_in_conditions(ids):
3825 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3826 'where id IN %s', (sub_ids,))
3827 nids.extend([x[0] for x in cr.fetchall()])
3831 if self._inherit_fields[val][0] == table:
3833 unknown_fields.remove(val)
3835 self.pool[table].write(cr, user, nids, v, context)
3839 'No such field(s) in model %s: %s.',
3840 self._name, ', '.join(unknown_fields))
3842 # check Python constraints
3843 recs._validate_fields(vals)
3845 # TODO: use _order to set dest at the right position and not first node of parent
3846 # We can't defer parent_store computation because the stored function
3847 # fields that are computer may refer (directly or indirectly) to
3848 # parent_left/right (via a child_of domain)
3851 self.pool._init_parent[self._name] = True
3853 order = self._parent_order or self._order
3854 parent_val = vals[self._parent_name]
3856 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3858 clause, params = '%s IS NULL' % (self._parent_name,), ()
3860 for id in parents_changed:
3861 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3862 pleft, pright = cr.fetchone()
3863 distance = pright - pleft + 1
3865 # Positions of current siblings, to locate proper insertion point;
3866 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3867 # after each update, in case several nodes are sequentially inserted one
3868 # next to the other (i.e computed incrementally)
3869 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3870 parents = cr.fetchall()
3872 # Find Position of the element
3874 for (parent_pright, parent_id) in parents:
3877 position = parent_pright and parent_pright + 1 or 1
3879 # It's the first node of the parent
3884 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3885 position = cr.fetchone()[0] + 1
3887 if pleft < position <= pright:
3888 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3890 if pleft < position:
3891 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3892 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3893 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3895 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3896 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3897 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3898 recs.invalidate_cache(['parent_left', 'parent_right'])
3900 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3903 # for recomputing new-style fields
3904 recs.modified(modified_fields)
3907 for order, model_name, ids_to_update, fields_to_recompute in result:
3908 key = (model_name, tuple(fields_to_recompute))
3909 done.setdefault(key, {})
3910 # avoid to do several times the same computation
3912 for id in ids_to_update:
3913 if id not in done[key]:
3914 done[key][id] = True
3916 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3918 # recompute new-style fields
3919 if context.get('recompute', True):
3922 self.step_workflow(cr, user, ids, context=context)
3926 # TODO: Should set perm to user.xxx
3929 @api.returns('self', lambda value: value.id)
3930 def create(self, vals):
3931 """ create(vals) -> record
3933 Creates a new record for the model.
3935 The new record is initialized using the values from ``vals`` and
3936 if necessary those from :meth:`~.default_get`.
3939 values for the model's fields, as a dictionary::
3941 {'field_name': field_value, ...}
3943 see :meth:`~.write` for details
3944 :return: new record created
3945 :raise AccessError: * if user has no create rights on the requested object
3946 * if user tries to bypass access rules for create on the requested object
3947 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3948 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3950 self.check_access_rights('create')
3952 # add missing defaults, and drop fields that may not be set by user
3953 vals = self._add_missing_default_values(vals)
3954 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3955 vals.pop(field, None)
3957 # split up fields into old-style and pure new-style ones
3958 old_vals, new_vals, unknown = {}, {}, []
3959 for key, val in vals.iteritems():
3960 if key in self._all_columns:
3962 elif key in self._fields:
3968 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3970 # create record with old-style fields
3971 record = self.browse(self._create(old_vals))
3973 # put the values of pure new-style fields into cache, and inverse them
3974 record._cache.update(record._convert_to_cache(new_vals))
3975 for key in new_vals:
3976 self._fields[key].determine_inverse(record)
3980 def _create(self, cr, user, vals, context=None):
3981 # low-level implementation of create()
3985 if self.is_transient():
3986 self._transient_vacuum(cr, user)
3989 for v in self._inherits:
3990 if self._inherits[v] not in vals:
3993 tocreate[v] = {'id': vals[self._inherits[v]]}
3996 # list of column assignments defined as tuples like:
3997 # (column_name, format_string, column_value)
3998 # (column_name, sql_formula)
3999 # Those tuples will be used by the string formatting for the INSERT
4001 ('id', "nextval('%s')" % self._sequence),
4006 for v in vals.keys():
4007 if v in self._inherit_fields and v not in self._columns:
4008 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4009 tocreate[table][v] = vals[v]
4012 if (v not in self._inherit_fields) and (v not in self._columns):
4014 unknown_fields.append(v)
4017 'No such field(s) in model %s: %s.',
4018 self._name, ', '.join(unknown_fields))
4020 for table in tocreate:
4021 if self._inherits[table] in vals:
4022 del vals[self._inherits[table]]
4024 record_id = tocreate[table].pop('id', None)
4026 if record_id is None or not record_id:
4027 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4029 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4031 updates.append((self._inherits[table], '%s', record_id))
4033 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4034 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4036 for bool_field in bool_fields:
4037 if bool_field not in vals:
4038 vals[bool_field] = False
4040 for field in vals.keys():
4042 if field in self._columns:
4043 fobj = self._columns[field]
4045 fobj = self._inherit_fields[field][2]
4051 for group in groups:
4052 module = group.split(".")[0]
4053 grp = group.split(".")[1]
4054 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4055 (grp, module, 'res.groups', user))
4056 readonly = cr.fetchall()
4057 if readonly[0][0] >= 1:
4060 elif readonly[0][0] == 0:
4068 current_field = self._columns[field]
4069 if current_field._classic_write:
4070 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4072 #for the function fields that receive a value, we set them directly in the database
4073 #(they may be required), but we also need to trigger the _fct_inv()
4074 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4075 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4076 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4077 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4078 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4079 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4080 #after the release but, definitively, the behavior shouldn't be different for related and function
4082 upd_todo.append(field)
4084 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4085 #related. See the above TODO comment for further explanations.
4086 if not isinstance(current_field, fields.related):
4087 upd_todo.append(field)
4088 if field in self._columns \
4089 and hasattr(current_field, 'selection') \
4091 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4092 if self._log_access:
4093 updates.append(('create_uid', '%s', user))
4094 updates.append(('write_uid', '%s', user))
4095 updates.append(('create_date', "(now() at time zone 'UTC')"))
4096 updates.append(('write_date', "(now() at time zone 'UTC')"))
4098 # the list of tuples used in this formatting corresponds to
4099 # tuple(field_name, format, value)
4100 # In some case, for example (id, create_date, write_date) we does not
4101 # need to read the third value of the tuple, because the real value is
4102 # encoded in the second value (the format).
4104 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4106 ', '.join('"%s"' % u[0] for u in updates),
4107 ', '.join(u[1] for u in updates)
4109 tuple([u[2] for u in updates if len(u) > 2])
4112 id_new, = cr.fetchone()
4113 recs = self.browse(cr, user, id_new, context)
4114 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4116 if self._parent_store and not context.get('defer_parent_store_computation'):
4118 self.pool._init_parent[self._name] = True
4120 parent = vals.get(self._parent_name, False)
4122 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4124 result_p = cr.fetchall()
4125 for (pleft,) in result_p:
4130 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4131 pleft_old = cr.fetchone()[0]
4134 cr.execute('select max(parent_right) from '+self._table)
4135 pleft = cr.fetchone()[0] or 0
4136 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4137 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4138 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4139 recs.invalidate_cache(['parent_left', 'parent_right'])
4141 # default element in context must be remove when call a one2many or many2many
4142 rel_context = context.copy()
4143 for c in context.items():
4144 if c[0].startswith('default_'):
4145 del rel_context[c[0]]
4148 for field in upd_todo:
4149 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4151 # check Python constraints
4152 recs._validate_fields(vals)
4154 # invalidate and mark new-style fields to recompute
4155 modified_fields = list(vals)
4156 if self._log_access:
4157 modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
4158 recs.modified(modified_fields)
4160 if context.get('recompute', True):
4161 result += self._store_get_values(cr, user, [id_new],
4162 list(set(vals.keys() + self._inherits.values())),
4166 for order, model_name, ids, fields2 in result:
4167 if not (model_name, ids, fields2) in done:
4168 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4169 done.append((model_name, ids, fields2))
4170 # recompute new-style fields
4173 if self._log_create and context.get('recompute', True):
4174 message = self._description + \
4176 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4177 "' " + _("created.")
4178 self.log(cr, user, id_new, message, True, context=context)
4180 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4181 self.create_workflow(cr, user, [id_new], context=context)
4184 def _store_get_values(self, cr, uid, ids, fields, context):
4185 """Returns an ordered list of fields.function to call due to
4186 an update operation on ``fields`` of records with ``ids``,
4187 obtained by calling the 'store' triggers of these fields,
4188 as setup by their 'store' attribute.
4190 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4192 if fields is None: fields = []
4193 stored_functions = self.pool._store_function.get(self._name, [])
4195 # use indexed names for the details of the stored_functions:
4196 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4198 # only keep store triggers that should be triggered for the ``fields``
4200 triggers_to_compute = (
4201 f for f in stored_functions
4202 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4206 target_id_results = {}
4207 for store_trigger in triggers_to_compute:
4208 target_func_id_ = id(store_trigger[target_ids_func_])
4209 if target_func_id_ not in target_id_results:
4210 # use admin user for accessing objects having rules defined on store fields
4211 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4212 target_ids = target_id_results[target_func_id_]
4214 # the compound key must consider the priority and model name
4215 key = (store_trigger[priority_], store_trigger[model_name_])
4216 for target_id in target_ids:
4217 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4219 # Here to_compute_map looks like:
4220 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4221 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4222 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4225 # Now we need to generate the batch function calls list
4227 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4229 for ((priority,model), id_map) in to_compute_map.iteritems():
4230 trigger_ids_maps = {}
4231 # function_ids_maps =
4232 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4233 for target_id, triggers in id_map.iteritems():
4234 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4235 for triggers, target_ids in trigger_ids_maps.iteritems():
4236 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4237 [t[func_field_to_compute_] for t in triggers]))
4240 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4243 def _store_set_values(self, cr, uid, ids, fields, context):
4244 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4245 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4250 if self._log_access:
4251 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4255 field_dict.setdefault(r[0], [])
4256 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4257 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4258 for i in self.pool._store_function.get(self._name, []):
4260 up_write_date = write_date + datetime.timedelta(hours=i[5])
4261 if datetime.datetime.now() < up_write_date:
4263 field_dict[r[0]].append(i[1])
4269 if self._columns[f]._multi not in keys:
4270 keys.append(self._columns[f]._multi)
4271 todo.setdefault(self._columns[f]._multi, [])
4272 todo[self._columns[f]._multi].append(f)
4276 # use admin user for accessing objects having rules defined on store fields
4277 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4278 for id, value in result.items():
4280 for f in value.keys():
4281 if f in field_dict[id]:
4288 if self._columns[v]._type == 'many2one':
4290 value[v] = value[v][0]
4293 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4294 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4297 cr.execute('update "' + self._table + '" set ' + \
4298 ','.join(upd0) + ' where id = %s', upd1)
4302 # use admin user for accessing objects having rules defined on store fields
4303 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4304 for r in result.keys():
4306 if r in field_dict.keys():
4307 if f in field_dict[r]:
4309 for id, value in result.items():
4310 if self._columns[f]._type == 'many2one':
4315 cr.execute('update "' + self._table + '" set ' + \
4316 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4318 # invalidate and mark new-style fields to recompute
4319 self.browse(cr, uid, ids, context).modified(fields)
4323 # TODO: ameliorer avec NULL
4324 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4325 """Computes the WHERE clause needed to implement an OpenERP domain.
4326 :param domain: the domain to compute
4328 :param active_test: whether the default filtering of records with ``active``
4329 field set to ``False`` should be applied.
4330 :return: the query expressing the given domain as provided in domain
4331 :rtype: osv.query.Query
4336 # if the object has a field named 'active', filter out all inactive
4337 # records unless they were explicitely asked for
4338 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4340 # the item[0] trick below works for domain items and '&'/'|'/'!'
4342 if not any(item[0] == 'active' for item in domain):
4343 domain.insert(0, ('active', '=', 1))
4345 domain = [('active', '=', 1)]
4348 e = expression.expression(cr, user, domain, self, context)
4349 tables = e.get_tables()
4350 where_clause, where_params = e.to_sql()
4351 where_clause = where_clause and [where_clause] or []
4353 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4355 return Query(tables, where_clause, where_params)
4357 def _check_qorder(self, word):
4358 if not regex_order.match(word):
4359 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4362 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4363 """Add what's missing in ``query`` to implement all appropriate ir.rules
4364 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4366 :param query: the current query object
4368 if uid == SUPERUSER_ID:
4371 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4372 """ :param parent_model: name of the parent model, if the added
4373 clause comes from a parent model
4377 # as inherited rules are being applied, we need to add the missing JOIN
4378 # to reach the parent table (if it was not JOINed yet in the query)
4379 parent_alias = self._inherits_join_add(self, parent_model, query)
4380 # inherited rules are applied on the external table -> need to get the alias and replace
4381 parent_table = self.pool[parent_model]._table
4382 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4383 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4385 for table in added_tables:
4386 # table is just a table name -> switch to the full alias
4387 if table == '"%s"' % parent_table:
4388 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4389 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4391 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4392 added_tables = new_tables
4393 query.where_clause += added_clause
4394 query.where_clause_params += added_params
4395 for table in added_tables:
4396 if table not in query.tables:
4397 query.tables.append(table)
4401 # apply main rules on the object
4402 rule_obj = self.pool.get('ir.rule')
4403 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4404 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4406 # apply ir.rules from the parents (through _inherits)
4407 for inherited_model in self._inherits:
4408 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4409 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4410 parent_model=inherited_model)
4412 def _generate_m2o_order_by(self, order_field, query):
4414 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4415 either native m2o fields or function/related fields that are stored, including
4416 intermediate JOINs for inheritance if required.
4418 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4420 if order_field not in self._columns and order_field in self._inherit_fields:
4421 # also add missing joins for reaching the table containing the m2o field
4422 qualified_field = self._inherits_join_calc(order_field, query)
4423 order_field_column = self._inherit_fields[order_field][2]
4425 qualified_field = '"%s"."%s"' % (self._table, order_field)
4426 order_field_column = self._columns[order_field]
4428 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4429 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4430 _logger.debug("Many2one function/related fields must be stored " \
4431 "to be used as ordering fields! Ignoring sorting for %s.%s",
4432 self._name, order_field)
4435 # figure out the applicable order_by for the m2o
4436 dest_model = self.pool[order_field_column._obj]
4437 m2o_order = dest_model._order
4438 if not regex_order.match(m2o_order):
4439 # _order is complex, can't use it here, so we default to _rec_name
4440 m2o_order = dest_model._rec_name
4442 # extract the field names, to be able to qualify them and add desc/asc
4444 for order_part in m2o_order.split(","):
4445 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4446 m2o_order = m2o_order_list
4448 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4449 # as we don't want to exclude results that have NULL values for the m2o
4450 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4451 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4452 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4453 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4455 def _generate_order_by(self, order_spec, query):
4457 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4458 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4460 :raise" except_orm in case order_spec is malformed
4462 order_by_clause = ''
4463 order_spec = order_spec or self._order
4465 order_by_elements = []
4466 self._check_qorder(order_spec)
4467 for order_part in order_spec.split(','):
4468 order_split = order_part.strip().split(' ')
4469 order_field = order_split[0].strip()
4470 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4473 if order_field == 'id':
4474 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4475 elif order_field in self._columns:
4476 order_column = self._columns[order_field]
4477 if order_column._classic_read:
4478 inner_clause = '"%s"."%s"' % (self._table, order_field)
4479 elif order_column._type == 'many2one':
4480 inner_clause = self._generate_m2o_order_by(order_field, query)
4482 continue # ignore non-readable or "non-joinable" fields
4483 elif order_field in self._inherit_fields:
4484 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4485 order_column = parent_obj._columns[order_field]
4486 if order_column._classic_read:
4487 inner_clause = self._inherits_join_calc(order_field, query)
4488 elif order_column._type == 'many2one':
4489 inner_clause = self._generate_m2o_order_by(order_field, query)
4491 continue # ignore non-readable or "non-joinable" fields
4493 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4494 if order_column and order_column._type == 'boolean':
4495 inner_clause = "COALESCE(%s, false)" % inner_clause
4497 if isinstance(inner_clause, list):
4498 for clause in inner_clause:
4499 order_by_elements.append("%s %s" % (clause, order_direction))
4501 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4502 if order_by_elements:
4503 order_by_clause = ",".join(order_by_elements)
4505 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4507 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4509 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4510 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4511 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4512 This is ok at the security level because this method is private and not callable through XML-RPC.
4514 :param access_rights_uid: optional user ID to use when checking access rights
4515 (not for ir.rules, this is only for ir.model.access)
4519 self.check_access_rights(cr, access_rights_uid or user, 'read')
4521 # For transient models, restrict acces to the current user, except for the super-user
4522 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4523 args = expression.AND(([('create_uid', '=', user)], args or []))
4525 query = self._where_calc(cr, user, args, context=context)
4526 self._apply_ir_rules(cr, user, query, 'read', context=context)
4527 order_by = self._generate_order_by(order, query)
4528 from_clause, where_clause, where_clause_params = query.get_sql()
4530 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4533 # Ignore order, limit and offset when just counting, they don't make sense and could
4535 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4536 cr.execute(query_str, where_clause_params)
4540 limit_str = limit and ' limit %d' % limit or ''
4541 offset_str = offset and ' offset %d' % offset or ''
4542 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4543 cr.execute(query_str, where_clause_params)
4546 # TDE note: with auto_join, we could have several lines about the same result
4547 # i.e. a lead with several unread messages; we uniquify the result using
4548 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4549 def _uniquify_list(seq):
4551 return [x for x in seq if x not in seen and not seen.add(x)]
4553 return _uniquify_list([x[0] for x in res])
4555 # returns the different values ever entered for one field
4556 # this is used, for example, in the client when the user hits enter on
4558 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4561 if field in self._inherit_fields:
4562 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4564 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4566 def copy_data(self, cr, uid, id, default=None, context=None):
4568 Copy given record's data with all its fields values
4570 :param cr: database cursor
4571 :param uid: current user id
4572 :param id: id of the record to copy
4573 :param default: field values to override in the original values of the copied record
4574 :type default: dictionary
4575 :param context: context arguments, like lang, time zone
4576 :type context: dictionary
4577 :return: dictionary containing all the field values
4583 # avoid recursion through already copied records in case of circular relationship
4584 seen_map = context.setdefault('__copy_data_seen', {})
4585 if id in seen_map.setdefault(self._name, []):
4587 seen_map[self._name].append(id)
4591 if 'state' not in default:
4592 if 'state' in self._defaults:
4593 if callable(self._defaults['state']):
4594 default['state'] = self._defaults['state'](self, cr, uid, context)
4596 default['state'] = self._defaults['state']
4598 # build a black list of fields that should not be copied
4599 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4600 def blacklist_given_fields(obj):
4601 # blacklist the fields that are given by inheritance
4602 for other, field_to_other in obj._inherits.items():
4603 blacklist.add(field_to_other)
4604 if field_to_other in default:
4605 # all the fields of 'other' are given by the record: default[field_to_other],
4606 # except the ones redefined in self
4607 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4609 blacklist_given_fields(self.pool[other])
4610 # blacklist deprecated fields
4611 for name, field in obj._columns.items():
4612 if field.deprecated:
4615 blacklist_given_fields(self)
4618 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4621 if f not in blacklist)
4623 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4627 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4630 for f, colinfo in fields_to_copy.iteritems():
4631 field = colinfo.column
4632 if field._type == 'many2one':
4633 res[f] = data[f] and data[f][0]
4634 elif field._type == 'one2many':
4635 other = self.pool[field._obj]
4636 # duplicate following the order of the ids because we'll rely on
4637 # it later for copying translations in copy_translation()!
4638 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4639 # the lines are duplicated using the wrong (old) parent, but then
4640 # are reassigned to the correct one thanks to the (0, 0, ...)
4641 res[f] = [(0, 0, line) for line in lines if line]
4642 elif field._type == 'many2many':
4643 res[f] = [(6, 0, data[f])]
4649 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4653 # avoid recursion through already copied records in case of circular relationship
4654 seen_map = context.setdefault('__copy_translations_seen',{})
4655 if old_id in seen_map.setdefault(self._name,[]):
4657 seen_map[self._name].append(old_id)
4659 trans_obj = self.pool.get('ir.translation')
4660 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4661 fields = self.fields_get(cr, uid, context=context)
4663 for field_name, field_def in fields.items():
4664 # removing the lang to compare untranslated values
4665 context_wo_lang = dict(context, lang=None)
4666 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4667 # we must recursively copy the translations for o2o and o2m
4668 if field_def['type'] == 'one2many':
4669 target_obj = self.pool[field_def['relation']]
4670 # here we rely on the order of the ids to match the translations
4671 # as foreseen in copy_data()
4672 old_children = sorted(r.id for r in old_record[field_name])
4673 new_children = sorted(r.id for r in new_record[field_name])
4674 for (old_child, new_child) in zip(old_children, new_children):
4675 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4676 # and for translatable fields we keep them for copy
4677 elif field_def.get('translate'):
4678 if field_name in self._columns:
4679 trans_name = self._name + "," + field_name
4682 elif field_name in self._inherit_fields:
4683 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4684 # get the id of the parent record to set the translation
4685 inherit_field_name = self._inherit_fields[field_name][1]
4686 target_id = new_record[inherit_field_name].id
4687 source_id = old_record[inherit_field_name].id
4691 trans_ids = trans_obj.search(cr, uid, [
4692 ('name', '=', trans_name),
4693 ('res_id', '=', source_id)
4695 user_lang = context.get('lang')
4696 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4698 # remove source to avoid triggering _set_src
4699 del record['source']
4700 record.update({'res_id': target_id})
4701 if user_lang and user_lang == record['lang']:
4702 # 'source' to force the call to _set_src
4703 # 'value' needed if value is changed in copy(), want to see the new_value
4704 record['source'] = old_record[field_name]
4705 record['value'] = new_record[field_name]
4706 trans_obj.create(cr, uid, record, context=context)
4708 @api.returns('self', lambda value: value.id)
4709 def copy(self, cr, uid, id, default=None, context=None):
4710 """ copy(default=None)
4712 Duplicate record with given id updating it with default values
4714 :param dict default: dictionary of field values to override in the
4715 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4716 :returns: new record
4721 context = context.copy()
4722 data = self.copy_data(cr, uid, id, default, context)
4723 new_id = self.create(cr, uid, data, context)
4724 self.copy_translations(cr, uid, id, new_id, context)
4728 @api.returns('self')
4730 """ exists() -> records
4732 Returns the subset of records in `self` that exist, and marks deleted
4733 records as such in cache. It can be used as a test on records::
4738 By convention, new records are returned as existing.
4740 ids = filter(None, self._ids) # ids to check in database
4743 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4744 self._cr.execute(query, (ids,))
4745 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4746 [id for id in self._ids if not id]) # new ids
4747 existing = self.browse(ids)
4748 if len(existing) < len(self):
4749 # mark missing records in cache with a failed value
4750 exc = MissingError(_("Record does not exist or has been deleted."))
4751 (self - existing)._cache.update(FailedValue(exc))
4754 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4755 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4757 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4758 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4759 return self._check_recursion(cr, uid, ids, context, parent)
4761 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4763 Verifies that there is no loop in a hierarchical structure of records,
4764 by following the parent relationship using the **parent** field until a loop
4765 is detected or until a top-level record is found.
4767 :param cr: database cursor
4768 :param uid: current user id
4769 :param ids: list of ids of records to check
4770 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4771 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4774 parent = self._parent_name
4776 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4777 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4780 while current_id is not None:
4781 cr.execute(query, (current_id,))
4782 result = cr.fetchone()
4783 current_id = result[0] if result else None
4784 if current_id == id:
4788 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4790 Verifies that there is no loop in a hierarchical structure of records,
4791 by following the parent relationship using the **parent** field until a loop
4792 is detected or until a top-level record is found.
4794 :param cr: database cursor
4795 :param uid: current user id
4796 :param ids: list of ids of records to check
4797 :param field_name: field to check
4798 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4801 field = self._all_columns.get(field_name)
4802 field = field.column if field else None
4803 if not field or field._type != 'many2many' or field._obj != self._name:
4804 # field must be a many2many on itself
4805 raise ValueError('invalid field_name: %r' % (field_name,))
4807 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4811 for i in range(0, len(ids_parent), cr.IN_MAX):
4813 sub_ids_parent = ids_parent[i:j]
4814 cr.execute(query, (tuple(sub_ids_parent),))
4815 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4816 ids_parent = ids_parent2
4817 for i in ids_parent:
4822 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4823 """Retrieve the External ID(s) of any database record.
4825 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4827 :return: map of ids to the list of their fully qualified External IDs
4828 in the form ``module.key``, or an empty list when there's no External
4829 ID for a record, e.g.::
4831 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4834 ir_model_data = self.pool.get('ir.model.data')
4835 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4836 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4839 # can't use dict.fromkeys() as the list would be shared!
4841 for record in data_results:
4842 result[record['res_id']].append('%(module)s.%(name)s' % record)
4845 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4846 """Retrieve the External ID of any database record, if there
4847 is one. This method works as a possible implementation
4848 for a function field, to be able to add it to any
4849 model object easily, referencing it as ``Model.get_external_id``.
4851 When multiple External IDs exist for a record, only one
4852 of them is returned (randomly).
4854 :return: map of ids to their fully qualified XML ID,
4855 defaulting to an empty string when there's none
4856 (to be usable as a function field),
4859 { 'id': 'module.ext_id',
4862 results = self._get_xml_ids(cr, uid, ids)
4863 for k, v in results.iteritems():
4870 # backwards compatibility
4871 get_xml_id = get_external_id
4872 _get_xml_ids = _get_external_ids
4874 def print_report(self, cr, uid, ids, name, data, context=None):
4876 Render the report `name` for the given IDs. The report must be defined
4877 for this model, not another.
4879 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4880 assert self._name == report.table
4881 return report.create(cr, uid, ids, data, context)
4885 def is_transient(cls):
4886 """ Return whether the model is transient.
4888 See :class:`TransientModel`.
4891 return cls._transient
4893 def _transient_clean_rows_older_than(self, cr, seconds):
4894 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4895 # Never delete rows used in last 5 minutes
4896 seconds = max(seconds, 300)
4897 query = ("SELECT id FROM " + self._table + " WHERE"
4898 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4899 " < ((now() at time zone 'UTC') - interval %s)")
4900 cr.execute(query, ("%s seconds" % seconds,))
4901 ids = [x[0] for x in cr.fetchall()]
4902 self.unlink(cr, SUPERUSER_ID, ids)
4904 def _transient_clean_old_rows(self, cr, max_count):
4905 # Check how many rows we have in the table
4906 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4908 if res[0][0] <= max_count:
4909 return # max not reached, nothing to do
4910 self._transient_clean_rows_older_than(cr, 300)
4912 def _transient_vacuum(self, cr, uid, force=False):
4913 """Clean the transient records.
4915 This unlinks old records from the transient model tables whenever the
4916 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4917 Actual cleaning will happen only once every "_transient_check_time" calls.
4918 This means this method can be called frequently called (e.g. whenever
4919 a new record is created).
4920 Example with both max_hours and max_count active:
4921 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4922 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4923 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4924 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4925 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4926 would immediately cause the maximum to be reached again.
4927 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4929 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4930 _transient_check_time = 20 # arbitrary limit on vacuum executions
4931 self._transient_check_count += 1
4932 if not force and (self._transient_check_count < _transient_check_time):
4933 return True # no vacuum cleaning this time
4934 self._transient_check_count = 0
4936 # Age-based expiration
4937 if self._transient_max_hours:
4938 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4940 # Count-based expiration
4941 if self._transient_max_count:
4942 self._transient_clean_old_rows(cr, self._transient_max_count)
4946 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4947 """ Serializes one2many and many2many commands into record dictionaries
4948 (as if all the records came from the database via a read()). This
4949 method is aimed at onchange methods on one2many and many2many fields.
4951 Because commands might be creation commands, not all record dicts
4952 will contain an ``id`` field. Commands matching an existing record
4953 will have an ``id``.
4955 :param field_name: name of the one2many or many2many field matching the commands
4956 :type field_name: str
4957 :param commands: one2many or many2many commands to execute on ``field_name``
4958 :type commands: list((int|False, int|False, dict|False))
4959 :param fields: list of fields to read from the database, when applicable
4960 :type fields: list(str)
4961 :returns: records in a shape similar to that returned by ``read()``
4962 (except records may be missing the ``id`` field if they don't exist in db)
4965 result = [] # result (list of dict)
4966 record_ids = [] # ids of records to read
4967 updates = {} # {id: dict} of updates on particular records
4969 for command in commands or []:
4970 if not isinstance(command, (list, tuple)):
4971 record_ids.append(command)
4972 elif command[0] == 0:
4973 result.append(command[2])
4974 elif command[0] == 1:
4975 record_ids.append(command[1])
4976 updates.setdefault(command[1], {}).update(command[2])
4977 elif command[0] in (2, 3):
4978 record_ids = [id for id in record_ids if id != command[1]]
4979 elif command[0] == 4:
4980 record_ids.append(command[1])
4981 elif command[0] == 5:
4982 result, record_ids = [], []
4983 elif command[0] == 6:
4984 result, record_ids = [], list(command[2])
4986 # read the records and apply the updates
4987 other_model = self.pool[self._all_columns[field_name].column._obj]
4988 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4989 record.update(updates.get(record['id'], {}))
4990 result.append(record)
4994 # for backward compatibility
4995 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
4997 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
4999 Performs a ``search()`` followed by a ``read()``.
5001 :param cr: database cursor
5002 :param user: current user id
5003 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5004 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5005 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5006 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5007 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5008 :param context: context arguments.
5009 :return: List of dictionaries containing the asked fields.
5010 :rtype: List of dictionaries.
5013 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5017 if fields and fields == ['id']:
5018 # shortcut read if we only want the ids
5019 return [{'id': id} for id in record_ids]
5021 # read() ignores active_test, but it would forward it to any downstream search call
5022 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5023 # was presumably only meant for the main search().
5024 # TODO: Move this to read() directly?
5025 read_ctx = dict(context or {})
5026 read_ctx.pop('active_test', None)
5028 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5029 if len(result) <= 1:
5033 index = dict((r['id'], r) for r in result)
5034 return [index[x] for x in record_ids if x in index]
5036 def _register_hook(self, cr):
5037 """ stuff to do right after the registry is built """
5041 def _patch_method(cls, name, method):
5042 """ Monkey-patch a method for all instances of this model. This replaces
5043 the method called `name` by `method` in the given class.
5044 The original method is then accessible via ``method.origin``, and it
5045 can be restored with :meth:`~._revert_method`.
5050 def do_write(self, values):
5051 # do stuff, and call the original method
5052 return do_write.origin(self, values)
5054 # patch method write of model
5055 model._patch_method('write', do_write)
5057 # this will call do_write
5058 records = model.search([...])
5061 # restore the original method
5062 model._revert_method('write')
5064 origin = getattr(cls, name)
5065 method.origin = origin
5066 # propagate decorators from origin to method, and apply api decorator
5067 wrapped = api.guess(api.propagate(origin, method))
5068 wrapped.origin = origin
5069 setattr(cls, name, wrapped)
5072 def _revert_method(cls, name):
5073 """ Revert the original method called `name` in the given class.
5074 See :meth:`~._patch_method`.
5076 method = getattr(cls, name)
5077 setattr(cls, name, method.origin)
5082 # An instance represents an ordered collection of records in a given
5083 # execution environment. The instance object refers to the environment, and
5084 # the records themselves are represented by their cache dictionary. The 'id'
5085 # of each record is found in its corresponding cache dictionary.
5087 # This design has the following advantages:
5088 # - cache access is direct and thus fast;
5089 # - one can consider records without an 'id' (see new records);
5090 # - the global cache is only an index to "resolve" a record 'id'.
5094 def _browse(cls, env, ids):
5095 """ Create an instance attached to `env`; `ids` is a tuple of record
5098 records = object.__new__(cls)
5101 env.prefetch[cls._name].update(ids)
5105 def browse(self, cr, uid, arg=None, context=None):
5106 ids = _normalize_ids(arg)
5107 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5108 return self._browse(Environment(cr, uid, context or {}), ids)
5111 def browse(self, arg=None):
5112 """ browse([ids]) -> records
5114 Returns a recordset for the ids provided as parameter in the current
5117 Can take no ids, a single id or a sequence of ids.
5119 ids = _normalize_ids(arg)
5120 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5121 return self._browse(self.env, ids)
5124 # Internal properties, for manipulating the instance's implementation
5129 """ List of actual record ids in this recordset (ignores placeholder
5130 ids for records to create)
5132 return filter(None, list(self._ids))
5134 # backward-compatibility with former browse records
5135 _cr = property(lambda self: self.env.cr)
5136 _uid = property(lambda self: self.env.uid)
5137 _context = property(lambda self: self.env.context)
5140 # Conversion methods
5143 def ensure_one(self):
5144 """ Verifies that the current recorset holds a single record. Raises
5145 an exception otherwise.
5149 raise except_orm("ValueError", "Expected singleton: %s" % self)
5151 def with_env(self, env):
5152 """ Returns a new version of this recordset attached to the provided
5155 :type env: :class:`~openerp.api.Environment`
5157 return self._browse(env, self._ids)
5159 def sudo(self, user=SUPERUSER_ID):
5160 """ sudo([user=SUPERUSER])
5162 Returns a new version of this recordset attached to the provided
5165 return self.with_env(self.env(user=user))
5167 def with_context(self, *args, **kwargs):
5168 """ with_context([context][, **overrides]) -> records
5170 Returns a new version of this recordset attached to an extended
5173 The extended context is either the provided ``context`` in which
5174 ``overrides`` are merged or the *current* context in which
5175 ``overrides`` are merged e.g.::
5177 # current context is {'key1': True}
5178 r2 = records.with_context({}, key2=True)
5179 # -> r2._context is {'key2': True}
5180 r2 = records.with_context(key2=True)
5181 # -> r2._context is {'key1': True, 'key2': True}
5183 context = dict(args[0] if args else self._context, **kwargs)
5184 return self.with_env(self.env(context=context))
5186 def _convert_to_cache(self, values, update=False, validate=True):
5187 """ Convert the `values` dictionary into cached values.
5189 :param update: whether the conversion is made for updating `self`;
5190 this is necessary for interpreting the commands of *2many fields
5191 :param validate: whether values must be checked
5193 fields = self._fields
5194 target = self if update else self.browse()
5196 name: fields[name].convert_to_cache(value, target, validate=validate)
5197 for name, value in values.iteritems()
5201 def _convert_to_write(self, values):
5202 """ Convert the `values` dictionary into the format of :meth:`write`. """
5203 fields = self._fields
5205 for name, value in values.iteritems():
5207 value = fields[name].convert_to_write(value)
5208 if not isinstance(value, NewId):
5209 result[name] = value
5213 # Record traversal and update
5216 def _mapped_func(self, func):
5217 """ Apply function `func` on all records in `self`, and return the
5218 result as a list or a recordset (if `func` return recordsets).
5220 vals = [func(rec) for rec in self]
5221 val0 = vals[0] if vals else func(self)
5222 if isinstance(val0, BaseModel):
5223 return reduce(operator.or_, vals, val0)
5226 def mapped(self, func):
5227 """ Apply `func` on all records in `self`, and return the result as a
5228 list or a recordset (if `func` return recordsets). In the latter
5229 case, the order of the returned recordset is arbritrary.
5231 :param func: a function or a dot-separated sequence of field names
5233 if isinstance(func, basestring):
5235 for name in func.split('.'):
5236 recs = recs._mapped_func(operator.itemgetter(name))
5239 return self._mapped_func(func)
5241 def _mapped_cache(self, name_seq):
5242 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5243 field names, and only cached values are used.
5246 for name in name_seq.split('.'):
5247 field = recs._fields[name]
5248 null = field.null(self.env)
5249 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5252 def filtered(self, func):
5253 """ Select the records in `self` such that `func(rec)` is true, and
5254 return them as a recordset.
5256 :param func: a function or a dot-separated sequence of field names
5258 if isinstance(func, basestring):
5260 func = lambda rec: filter(None, rec.mapped(name))
5261 return self.browse([rec.id for rec in self if func(rec)])
5263 def sorted(self, key=None):
5264 """ Return the recordset `self` ordered by `key` """
5266 return self.search([('id', 'in', self.ids)])
5268 return self.browse(map(int, sorted(self, key=key)))
5270 def update(self, values):
5271 """ Update record `self[0]` with `values`. """
5272 for name, value in values.iteritems():
5276 # New records - represent records that do not exist in the database yet;
5277 # they are used to compute default values and perform onchanges.
5281 def new(self, values={}):
5282 """ new([values]) -> record
5284 Return a new record instance attached to the current environment and
5285 initialized with the provided ``value``. The record is *not* created
5286 in database, it only exists in memory.
5288 record = self.browse([NewId()])
5289 record._cache.update(record._convert_to_cache(values, update=True))
5291 if record.env.in_onchange:
5292 # The cache update does not set inverse fields, so do it manually.
5293 # This is useful for computing a function field on secondary
5294 # records, if that field depends on the main record.
5296 field = self._fields.get(name)
5298 for invf in field.inverse_fields:
5299 invf._update(record[name], record)
5304 # Dirty flag, to mark records modified (in draft mode)
5309 """ Return whether any record in `self` is dirty. """
5310 dirty = self.env.dirty
5311 return any(record in dirty for record in self)
5314 def _dirty(self, value):
5315 """ Mark the records in `self` as dirty. """
5317 map(self.env.dirty.add, self)
5319 map(self.env.dirty.discard, self)
5325 def __nonzero__(self):
5326 """ Test whether `self` is nonempty. """
5327 return bool(getattr(self, '_ids', True))
5330 """ Return the size of `self`. """
5331 return len(self._ids)
5334 """ Return an iterator over `self`. """
5335 for id in self._ids:
5336 yield self._browse(self.env, (id,))
5338 def __contains__(self, item):
5339 """ Test whether `item` (record or field name) is an element of `self`.
5340 In the first case, the test is fully equivalent to::
5342 any(item == record for record in self)
5344 if isinstance(item, BaseModel) and self._name == item._name:
5345 return len(item) == 1 and item.id in self._ids
5346 elif isinstance(item, basestring):
5347 return item in self._fields
5349 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5351 def __add__(self, other):
5352 """ Return the concatenation of two recordsets. """
5353 if not isinstance(other, BaseModel) or self._name != other._name:
5354 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5355 return self.browse(self._ids + other._ids)
5357 def __sub__(self, other):
5358 """ Return the recordset of all the records in `self` that are not in `other`. """
5359 if not isinstance(other, BaseModel) or self._name != other._name:
5360 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5361 other_ids = set(other._ids)
5362 return self.browse([id for id in self._ids if id not in other_ids])
5364 def __and__(self, other):
5365 """ Return the intersection of two recordsets.
5366 Note that recordset order is not preserved.
5368 if not isinstance(other, BaseModel) or self._name != other._name:
5369 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5370 return self.browse(set(self._ids) & set(other._ids))
5372 def __or__(self, other):
5373 """ Return the union of two recordsets.
5374 Note that recordset order is not preserved.
5376 if not isinstance(other, BaseModel) or self._name != other._name:
5377 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5378 return self.browse(set(self._ids) | set(other._ids))
5380 def __eq__(self, other):
5381 """ Test whether two recordsets are equivalent (up to reordering). """
5382 if not isinstance(other, BaseModel):
5384 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5386 return self._name == other._name and set(self._ids) == set(other._ids)
5388 def __ne__(self, other):
5389 return not self == other
5391 def __lt__(self, other):
5392 if not isinstance(other, BaseModel) or self._name != other._name:
5393 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5394 return set(self._ids) < set(other._ids)
5396 def __le__(self, other):
5397 if not isinstance(other, BaseModel) or self._name != other._name:
5398 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5399 return set(self._ids) <= set(other._ids)
5401 def __gt__(self, other):
5402 if not isinstance(other, BaseModel) or self._name != other._name:
5403 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5404 return set(self._ids) > set(other._ids)
5406 def __ge__(self, other):
5407 if not isinstance(other, BaseModel) or self._name != other._name:
5408 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5409 return set(self._ids) >= set(other._ids)
5415 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5417 def __unicode__(self):
5418 return unicode(str(self))
5423 if hasattr(self, '_ids'):
5424 return hash((self._name, frozenset(self._ids)))
5426 return hash(self._name)
5428 def __getitem__(self, key):
5429 """ If `key` is an integer or a slice, return the corresponding record
5430 selection as an instance (attached to `self.env`).
5431 Otherwise read the field `key` of the first record in `self`.
5435 inst = model.search(dom) # inst is a recordset
5436 r4 = inst[3] # fourth record in inst
5437 rs = inst[10:20] # subset of inst
5438 nm = rs['name'] # name of first record in inst
5440 if isinstance(key, basestring):
5441 # important: one must call the field's getter
5442 return self._fields[key].__get__(self, type(self))
5443 elif isinstance(key, slice):
5444 return self._browse(self.env, self._ids[key])
5446 return self._browse(self.env, (self._ids[key],))
5448 def __setitem__(self, key, value):
5449 """ Assign the field `key` to `value` in record `self`. """
5450 # important: one must call the field's setter
5451 return self._fields[key].__set__(self, value)
5454 # Cache and recomputation management
5459 """ Return the cache of `self`, mapping field names to values. """
5460 return RecordCache(self)
5463 def _in_cache_without(self, field):
5464 """ Make sure `self` is present in cache (for prefetching), and return
5465 the records of model `self` in cache that have no value for `field`
5466 (:class:`Field` instance).
5469 prefetch_ids = env.prefetch[self._name]
5470 prefetch_ids.update(self._ids)
5471 ids = filter(None, prefetch_ids - set(env.cache[field]))
5472 return self.browse(ids)
5476 """ Clear the records cache.
5479 The record cache is automatically invalidated.
5481 self.invalidate_cache()
5484 def invalidate_cache(self, fnames=None, ids=None):
5485 """ Invalidate the record caches after some records have been modified.
5486 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5488 :param fnames: the list of modified fields, or ``None`` for all fields
5489 :param ids: the list of modified record ids, or ``None`` for all
5493 return self.env.invalidate_all()
5494 fields = self._fields.values()
5496 fields = map(self._fields.__getitem__, fnames)
5498 # invalidate fields and inverse fields, too
5499 spec = [(f, ids) for f in fields] + \
5500 [(invf, None) for f in fields for invf in f.inverse_fields]
5501 self.env.invalidate(spec)
5504 def modified(self, fnames):
5505 """ Notify that fields have been modified on `self`. This invalidates
5506 the cache, and prepares the recomputation of stored function fields
5507 (new-style fields only).
5509 :param fnames: iterable of field names that have been modified on
5512 # each field knows what to invalidate and recompute
5514 for fname in fnames:
5515 spec += self._fields[fname].modified(self)
5519 for env in self.env.all
5520 for field in env.cache
5522 # invalidate non-stored fields.function which are currently cached
5523 spec += [(f, None) for f in self.pool.pure_function_fields
5524 if f in cached_fields]
5526 self.env.invalidate(spec)
5528 def _recompute_check(self, field):
5529 """ If `field` must be recomputed on some record in `self`, return the
5530 corresponding records that must be recomputed.
5532 return self.env.check_todo(field, self)
5534 def _recompute_todo(self, field):
5535 """ Mark `field` to be recomputed. """
5536 self.env.add_todo(field, self)
5538 def _recompute_done(self, field):
5539 """ Mark `field` as recomputed. """
5540 self.env.remove_todo(field, self)
5543 def recompute(self):
5544 """ Recompute stored function fields. The fields and records to
5545 recompute have been determined by method :meth:`modified`.
5547 while self.env.has_todo():
5548 field, recs = self.env.get_todo()
5549 # evaluate the fields to recompute, and save them to database
5550 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5552 values = rec._convert_to_write({
5553 f.name: rec[f.name] for f in field.computed_fields
5556 except MissingError:
5558 # mark the computed fields as done
5559 map(recs._recompute_done, field.computed_fields)
5562 # Generic onchange method
5565 def _has_onchange(self, field, other_fields):
5566 """ Return whether `field` should trigger an onchange event in the
5567 presence of `other_fields`.
5569 # test whether self has an onchange method for field, or field is a
5570 # dependency of any field in other_fields
5571 return field.name in self._onchange_methods or \
5572 any(dep in other_fields for dep in field.dependents)
5575 def _onchange_spec(self, view_info=None):
5576 """ Return the onchange spec from a view description; if not given, the
5577 result of ``self.fields_view_get()`` is used.
5581 # for traversing the XML arch and populating result
5582 def process(node, info, prefix):
5583 if node.tag == 'field':
5584 name = node.attrib['name']
5585 names = "%s.%s" % (prefix, name) if prefix else name
5586 if not result.get(names):
5587 result[names] = node.attrib.get('on_change')
5588 # traverse the subviews included in relational fields
5589 for subinfo in info['fields'][name].get('views', {}).itervalues():
5590 process(etree.fromstring(subinfo['arch']), subinfo, names)
5593 process(child, info, prefix)
5595 if view_info is None:
5596 view_info = self.fields_view_get()
5597 process(etree.fromstring(view_info['arch']), view_info, '')
5600 def _onchange_eval(self, field_name, onchange, result):
5601 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5602 on record `self`. Value assignments are applied on `self`, while
5603 domain and warning messages are put in dictionary `result`.
5605 onchange = onchange.strip()
5608 if onchange in ("1", "true"):
5609 for method in self._onchange_methods.get(field_name, ()):
5610 method_res = method(self)
5613 if 'domain' in method_res:
5614 result.setdefault('domain', {}).update(method_res['domain'])
5615 if 'warning' in method_res:
5616 result['warning'] = method_res['warning']
5620 match = onchange_v7.match(onchange)
5622 method, params = match.groups()
5624 # evaluate params -> tuple
5625 global_vars = {'context': self._context, 'uid': self._uid}
5626 if self._context.get('field_parent'):
5627 class RawRecord(object):
5628 def __init__(self, record):
5629 self._record = record
5630 def __getattr__(self, name):
5631 field = self._record._fields[name]
5632 value = self._record[name]
5633 return field.convert_to_onchange(value)
5634 record = self[self._context['field_parent']]
5635 global_vars['parent'] = RawRecord(record)
5637 key: self._fields[key].convert_to_onchange(val)
5638 for key, val in self._cache.iteritems()
5640 params = eval("[%s]" % params, global_vars, field_vars)
5642 # call onchange method
5643 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5644 method_res = getattr(self._model, method)(*args)
5645 if not isinstance(method_res, dict):
5647 if 'value' in method_res:
5648 method_res['value'].pop('id', None)
5649 self.update(self._convert_to_cache(method_res['value'], validate=False))
5650 if 'domain' in method_res:
5651 result.setdefault('domain', {}).update(method_res['domain'])
5652 if 'warning' in method_res:
5653 result['warning'] = method_res['warning']
5656 def onchange(self, values, field_name, field_onchange):
5657 """ Perform an onchange on the given field.
5659 :param values: dictionary mapping field names to values, giving the
5660 current state of modification
5661 :param field_name: name of the modified field_name
5662 :param field_onchange: dictionary mapping field names to their
5667 if field_name and field_name not in self._fields:
5670 # determine subfields for field.convert_to_write() below
5672 subfields = defaultdict(set)
5673 for dotname in field_onchange:
5675 secondary.append(dotname)
5676 name, subname = dotname.split('.')
5677 subfields[name].add(subname)
5679 # create a new record with values, and attach `self` to it
5680 with env.do_in_onchange():
5681 record = self.new(values)
5682 values = dict(record._cache)
5683 # attach `self` with a different context (for cache consistency)
5684 record._origin = self.with_context(__onchange=True)
5686 # determine which field should be triggered an onchange
5687 todo = set([field_name]) if field_name else set(values)
5690 # dummy assignment: trigger invalidations on the record
5692 value = record[name]
5693 field = self._fields[name]
5694 if not field_name and field.type == 'many2one' and field.delegate and not value:
5695 # do not nullify all fields of parent record for new records
5697 record[name] = value
5699 result = {'value': {}}
5707 with env.do_in_onchange():
5708 # apply field-specific onchange methods
5709 if field_onchange.get(name):
5710 record._onchange_eval(name, field_onchange[name], result)
5712 # force re-evaluation of function fields on secondary records
5713 for field_seq in secondary:
5714 record.mapped(field_seq)
5716 # determine which fields have been modified
5717 for name, oldval in values.iteritems():
5718 field = self._fields[name]
5719 newval = record[name]
5720 if field.type in ('one2many', 'many2many'):
5721 if newval != oldval or newval._dirty:
5722 # put new value in result
5723 result['value'][name] = field.convert_to_write(
5724 newval, record._origin, subfields.get(name),
5728 # keep result: newval may have been dirty before
5731 if newval != oldval:
5732 # put new value in result
5733 result['value'][name] = field.convert_to_write(
5734 newval, record._origin, subfields.get(name),
5738 # clean up result to not return another value
5739 result['value'].pop(name, None)
5741 # At the moment, the client does not support updates on a *2many field
5742 # while this one is modified by the user.
5743 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5744 result['value'].pop(field_name, None)
5749 class RecordCache(MutableMapping):
5750 """ Implements a proxy dictionary to read/update the cache of a record.
5751 Upon iteration, it looks like a dictionary mapping field names to
5752 values. However, fields may be used as keys as well.
5754 def __init__(self, records):
5755 self._recs = records
5757 def contains(self, field):
5758 """ Return whether `records[0]` has a value for `field` in cache. """
5759 if isinstance(field, basestring):
5760 field = self._recs._fields[field]
5761 return self._recs.id in self._recs.env.cache[field]
5763 def __contains__(self, field):
5764 """ Return whether `records[0]` has a regular value for `field` in cache. """
5765 if isinstance(field, basestring):
5766 field = self._recs._fields[field]
5767 dummy = SpecialValue(None)
5768 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5769 return not isinstance(value, SpecialValue)
5771 def __getitem__(self, field):
5772 """ Return the cached value of `field` for `records[0]`. """
5773 if isinstance(field, basestring):
5774 field = self._recs._fields[field]
5775 value = self._recs.env.cache[field][self._recs.id]
5776 return value.get() if isinstance(value, SpecialValue) else value
5778 def __setitem__(self, field, value):
5779 """ Assign the cached value of `field` for all records in `records`. """
5780 if isinstance(field, basestring):
5781 field = self._recs._fields[field]
5782 values = dict.fromkeys(self._recs._ids, value)
5783 self._recs.env.cache[field].update(values)
5785 def update(self, *args, **kwargs):
5786 """ Update the cache of all records in `records`. If the argument is a
5787 `SpecialValue`, update all fields (except "magic" columns).
5789 if args and isinstance(args[0], SpecialValue):
5790 values = dict.fromkeys(self._recs._ids, args[0])
5791 for name, field in self._recs._fields.iteritems():
5793 self._recs.env.cache[field].update(values)
5795 return super(RecordCache, self).update(*args, **kwargs)
5797 def __delitem__(self, field):
5798 """ Remove the cached value of `field` for all `records`. """
5799 if isinstance(field, basestring):
5800 field = self._recs._fields[field]
5801 field_cache = self._recs.env.cache[field]
5802 for id in self._recs._ids:
5803 field_cache.pop(id, None)
5806 """ Iterate over the field names with a regular value in cache. """
5807 cache, id = self._recs.env.cache, self._recs.id
5808 dummy = SpecialValue(None)
5809 for name, field in self._recs._fields.iteritems():
5810 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5814 """ Return the number of fields with a regular value in cache. """
5815 return sum(1 for name in self)
5817 class Model(BaseModel):
5818 """Main super-class for regular database-persisted OpenERP models.
5820 OpenERP models are created by inheriting from this class::
5825 The system will later instantiate the class once per database (on
5826 which the class' module is installed).
5829 _register = False # not visible in ORM registry, meant to be python-inherited only
5830 _transient = False # True in a TransientModel
5832 class TransientModel(BaseModel):
5833 """Model super-class for transient records, meant to be temporarily
5834 persisted, and regularly vaccuum-cleaned.
5836 A TransientModel has a simplified access rights management,
5837 all users can create new records, and may only access the
5838 records they created. The super-user has unrestricted access
5839 to all TransientModel records.
5842 _register = False # not visible in ORM registry, meant to be python-inherited only
5845 class AbstractModel(BaseModel):
5846 """Abstract Model super-class for creating an abstract class meant to be
5847 inherited by regular models (Models or TransientModels) but not meant to
5848 be usable on its own, or persisted.
5850 Technical note: we don't want to make AbstractModel the super-class of
5851 Model or BaseModel because it would not make sense to put the main
5852 definition of persistence methods such as create() in it, and still we
5853 should be able to override them within an AbstractModel.
5855 _auto = False # don't create any database backend for AbstractModels
5856 _register = False # not visible in ORM registry, meant to be python-inherited only
5859 def itemgetter_tuple(items):
5860 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5861 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5866 return lambda gettable: (gettable[items[0]],)
5867 return operator.itemgetter(*items)
5869 def convert_pgerror_23502(model, fields, info, e):
5870 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5871 r'not-null constraint\n',
5873 field_name = m and m.group('field')
5874 if not m or field_name not in fields:
5875 return {'message': unicode(e)}
5876 message = _(u"Missing required value for the field '%s'.") % field_name
5877 field = fields.get(field_name)
5879 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5882 'field': field_name,
5885 def convert_pgerror_23505(model, fields, info, e):
5886 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5888 field_name = m and m.group('field')
5889 if not m or field_name not in fields:
5890 return {'message': unicode(e)}
5891 message = _(u"The value for the field '%s' already exists.") % field_name
5892 field = fields.get(field_name)
5894 message = _(u"%s This might be '%s' in the current model, or a field "
5895 u"of the same name in an o2m.") % (message, field['string'])
5898 'field': field_name,
5901 PGERROR_TO_OE = defaultdict(
5902 # shape of mapped converters
5903 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5904 # not_null_violation
5905 '23502': convert_pgerror_23502,
5906 # unique constraint error
5907 '23505': convert_pgerror_23505,
5910 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5911 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5913 Various implementations were tested on the corpus of all browse() calls
5914 performed during a full crawler run (after having installed all website_*
5915 modules) and this one was the most efficient overall.
5917 A possible bit of correctness was sacrificed by not doing any test on
5918 Iterable and just assuming that any non-atomic type was an iterable of
5923 # much of the corpus is falsy objects (empty list, tuple or set, None)
5927 # `type in set` is significantly faster (because more restrictive) than
5928 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5929 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5930 # (and looks much worse) in most cases, but over millions of calls it
5931 # does have a very minor effect.
5932 if arg.__class__ in atoms:
5937 # keep those imports here to avoid dependency cycle errors
5938 from .osv import expression
5939 from .fields import Field, SpecialValue, FailedValue
5941 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: