1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
52 from collections import defaultdict, MutableMapping
53 from inspect import getmembers
56 import dateutil.relativedelta
58 from lxml import etree
61 from . import SUPERUSER_ID
64 from .api import Environment
65 from .exceptions import except_orm, AccessError, MissingError, ValidationError
66 from .osv import fields
67 from .osv.query import Query
68 from .tools import lazy_property
69 from .tools.config import config
70 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
71 from .tools.safe_eval import safe_eval as eval
72 from .tools.translate import _
74 _logger = logging.getLogger(__name__)
75 _schema = logging.getLogger(__name__ + '.schema')
77 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
78 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
79 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
81 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
84 def check_object_name(name):
85 """ Check if the given name is a valid openerp object name.
87 The _name attribute in osv and osv_memory object is subject to
88 some restrictions. This function returns True or False whether
89 the given name is allowed or not.
91 TODO: this is an approximation. The goal in this approximation
92 is to disallow uppercase characters (in some places, we quote
93 table/column names and in other not, which leads to this kind
96 psycopg2.ProgrammingError: relation "xxx" does not exist).
98 The same restriction should apply to both osv and osv_memory
99 objects for consistency.
102 if regex_object_name.match(name) is None:
106 def raise_on_invalid_object_name(name):
107 if not check_object_name(name):
108 msg = "The _name attribute %s is not valid." % name
110 raise except_orm('ValueError', msg)
112 POSTGRES_CONFDELTYPES = {
120 def intersect(la, lb):
121 return filter(lambda x: x in lb, la)
124 """ Test whether functions `f` and `g` are identical or have the same name """
125 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
127 def fix_import_export_id_paths(fieldname):
129 Fixes the id fields in import and exports, and splits field paths
132 :param str fieldname: name of the field to import/export
133 :return: split field name
136 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
137 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
138 return fixed_external_id.split('/')
140 def pg_varchar(size=0):
141 """ Returns the VARCHAR declaration for the provided size:
143 * If no size (or an empty or negative size is provided) return an
145 * Otherwise return a VARCHAR(n)
147 :type int size: varchar size, optional
151 if not isinstance(size, int):
152 raise TypeError("VARCHAR parameter should be an int, got %s"
155 return 'VARCHAR(%d)' % size
158 FIELDS_TO_PGTYPES = {
159 fields.boolean: 'bool',
160 fields.integer: 'int4',
164 fields.datetime: 'timestamp',
165 fields.binary: 'bytea',
166 fields.many2one: 'int4',
167 fields.serialized: 'text',
170 def get_pg_type(f, type_override=None):
172 :param fields._column f: field to get a Postgres type for
173 :param type type_override: use the provided type for dispatching instead of the field's own type
174 :returns: (postgres_identification_type, postgres_type_specification)
177 field_type = type_override or type(f)
179 if field_type in FIELDS_TO_PGTYPES:
180 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
181 elif issubclass(field_type, fields.float):
183 pg_type = ('numeric', 'NUMERIC')
185 pg_type = ('float8', 'DOUBLE PRECISION')
186 elif issubclass(field_type, (fields.char, fields.reference)):
187 pg_type = ('varchar', pg_varchar(f.size))
188 elif issubclass(field_type, fields.selection):
189 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
190 or getattr(f, 'size', None) == -1:
191 pg_type = ('int4', 'INTEGER')
193 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
194 elif issubclass(field_type, fields.function):
195 if f._type == 'selection':
196 pg_type = ('varchar', pg_varchar())
198 pg_type = get_pg_type(f, getattr(fields, f._type))
200 _logger.warning('%s type not supported!', field_type)
206 class MetaModel(api.Meta):
207 """ Metaclass for the models.
209 This class is used as the metaclass for the class :class:`BaseModel` to
210 discover the models defined in a module (without instanciating them).
211 If the automatic discovery is not needed, it is possible to set the model's
212 ``_register`` attribute to False.
216 module_to_models = {}
218 def __init__(self, name, bases, attrs):
219 if not self._register:
220 self._register = True
221 super(MetaModel, self).__init__(name, bases, attrs)
224 if not hasattr(self, '_module'):
225 # The (OpenERP) module name can be in the `openerp.addons` namespace
226 # or not. For instance, module `sale` can be imported as
227 # `openerp.addons.sale` (the right way) or `sale` (for backward
229 module_parts = self.__module__.split('.')
230 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
231 module_name = self.__module__.split('.')[2]
233 module_name = self.__module__.split('.')[0]
234 self._module = module_name
236 # Remember which models to instanciate for this module.
238 self.module_to_models.setdefault(self._module, []).append(self)
242 """ Pseudo-ids for new records. """
243 def __nonzero__(self):
246 IdType = (int, long, basestring, NewId)
249 # special columns automatically created by the ORM
250 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
251 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
253 class BaseModel(object):
254 """ Base class for OpenERP models.
256 OpenERP models are created by inheriting from this class' subclasses:
258 * :class:`Model` for regular database-persisted models
260 * :class:`TransientModel` for temporary data, stored in the database but
261 automatically vaccuumed every so often
263 * :class:`AbstractModel` for abstract super classes meant to be shared by
264 multiple inheriting model
266 The system automatically instantiates every model once per database. Those
267 instances represent the available models on each database, and depend on
268 which modules are installed on that database. The actual class of each
269 instance is built from the Python classes that create and inherit from the
272 Every model instance is a "recordset", i.e., an ordered collection of
273 records of the model. Recordsets are returned by methods like
274 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
275 explicit representation: a record is represented as a recordset of one
278 To create a class that should not be instantiated, the _register class
279 attribute may be set to False.
281 __metaclass__ = MetaModel
282 _auto = True # create database backend
283 _register = False # Set to false if the model shouldn't be automatically discovered.
290 _parent_name = 'parent_id'
291 _parent_store = False
292 _parent_order = False
298 _translate = True # set to False to disable translations export for this model
300 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
301 # to include in the _read_group, if grouped on this field
305 _transient = False # True in a TransientModel
308 # { 'parent_model': 'm2o_field', ... }
311 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
312 # model from which it is inherits'd, r is the (local) field towards m, f
313 # is the _column object itself, and n is the original (i.e. top-most)
316 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
317 # field_column_obj, origina_parent_model), ... }
320 # Mapping field name/column_info object
321 # This is similar to _inherit_fields but:
322 # 1. includes self fields,
323 # 2. uses column_info instead of a triple.
328 _sql_constraints = []
330 # model dependencies, for models backed up by sql views:
331 # {model_name: field_names, ...}
334 CONCURRENCY_CHECK_FIELD = '__last_update'
336 def log(self, cr, uid, id, message, secondary=False, context=None):
337 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
339 def view_init(self, cr, uid, fields_list, context=None):
340 """Override this method to do specific things when a view on the object is opened."""
343 def _field_create(self, cr, context=None):
344 """ Create entries in ir_model_fields for all the model's fields.
346 If necessary, also create an entry in ir_model, and if called from the
347 modules loading scheme (by receiving 'module' in the context), also
348 create entries in ir_model_data (for the model and the fields).
350 - create an entry in ir_model (if there is not already one),
351 - create an entry in ir_model_data (if there is not already one, and if
352 'module' is in the context),
353 - update ir_model_fields with the fields found in _columns
354 (TODO there is some redundancy as _columns is updated from
355 ir_model_fields in __init__).
360 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
362 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
363 model_id = cr.fetchone()[0]
364 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
366 model_id = cr.fetchone()[0]
367 if 'module' in context:
368 name_id = 'model_'+self._name.replace('.', '_')
369 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
371 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
372 (name_id, context['module'], 'ir.model', model_id)
375 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
377 for rec in cr.dictfetchall():
378 cols[rec['name']] = rec
380 ir_model_fields_obj = self.pool.get('ir.model.fields')
382 # sparse field should be created at the end, as it depends on its serialized field already existing
383 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
384 for (k, f) in model_fields:
386 'model_id': model_id,
389 'field_description': f.string,
391 'relation': f._obj or '',
392 'select_level': tools.ustr(int(f.select)),
393 'readonly': (f.readonly and 1) or 0,
394 'required': (f.required and 1) or 0,
395 'selectable': (f.selectable and 1) or 0,
396 'translate': (f.translate and 1) or 0,
397 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
398 'serialization_field_id': None,
400 if getattr(f, 'serialization_field', None):
401 # resolve link to serialization_field if specified by name
402 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
403 if not serialization_field_id:
404 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
405 vals['serialization_field_id'] = serialization_field_id[0]
407 # When its a custom field,it does not contain f.select
408 if context.get('field_state', 'base') == 'manual':
409 if context.get('field_name', '') == k:
410 vals['select_level'] = context.get('select', '0')
411 #setting value to let the problem NOT occur next time
413 vals['select_level'] = cols[k]['select_level']
416 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
417 id = cr.fetchone()[0]
419 cr.execute("""INSERT INTO ir_model_fields (
420 id, model_id, model, name, field_description, ttype,
421 relation,state,select_level,relation_field, translate, serialization_field_id
423 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
425 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
426 vals['relation'], 'base',
427 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
429 if 'module' in context:
430 name1 = 'field_' + self._table + '_' + k
431 cr.execute("select name from ir_model_data where name=%s", (name1,))
433 name1 = name1 + "_" + str(id)
434 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
435 (name1, context['module'], 'ir.model.fields', id)
438 for key, val in vals.items():
439 if cols[k][key] != vals[key]:
440 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
441 cr.execute("""UPDATE ir_model_fields SET
442 model_id=%s, field_description=%s, ttype=%s, relation=%s,
443 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
445 model=%s AND name=%s""", (
446 vals['model_id'], vals['field_description'], vals['ttype'],
448 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
451 self.invalidate_cache(cr, SUPERUSER_ID)
454 def _add_field(cls, name, field):
455 """ Add the given `field` under the given `name` in the class """
456 field.set_class_name(cls, name)
458 # add field in _fields (for reflection)
459 cls._fields[name] = field
461 # add field as an attribute, unless another kind of value already exists
462 if isinstance(getattr(cls, name, field), Field):
463 setattr(cls, name, field)
465 _logger.warning("In model %r, member %r is not a field", cls._name, name)
468 cls._columns[name] = field.to_column()
470 # remove potential column that may be overridden by field
471 cls._columns.pop(name, None)
474 def _add_magic_fields(cls):
475 """ Introduce magic fields on the current class
477 * id is a "normal" field (with a specific getter)
478 * create_uid, create_date, write_uid and write_date have become
480 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
481 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
482 to get the same structure as the previous
483 ``(now() at time zone 'UTC')::timestamp``::
485 # select (now() at time zone 'UTC')::timestamp;
487 ----------------------------
488 2013-06-18 08:30:37.292809
490 >>> str(datetime.datetime.utcnow())
491 '2013-06-18 08:31:32.821177'
493 def add(name, field):
494 """ add `field` with the given `name` if it does not exist yet """
495 if name not in cls._columns and name not in cls._fields:
496 cls._add_field(name, field)
501 # this field 'id' must override any other column or field
502 cls._add_field('id', fields.Id(automatic=True))
504 add('display_name', fields.Char(string='Display Name', automatic=True,
505 compute='_compute_display_name'))
508 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
509 add('create_date', fields.Datetime(string='Created on', automatic=True))
510 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
511 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
512 last_modified_name = 'compute_concurrency_field_with_access'
514 last_modified_name = 'compute_concurrency_field'
516 # this field must override any other column or field
517 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
518 string='Last Modified on', compute=last_modified_name, automatic=True))
521 def compute_concurrency_field(self):
522 self[self.CONCURRENCY_CHECK_FIELD] = \
523 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
526 @api.depends('create_date', 'write_date')
527 def compute_concurrency_field_with_access(self):
528 self[self.CONCURRENCY_CHECK_FIELD] = \
529 self.write_date or self.create_date or \
530 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
533 # Goal: try to apply inheritance at the instanciation level and
534 # put objects in the pool var
537 def _build_model(cls, pool, cr):
538 """ Instanciate a given model.
540 This class method instanciates the class of some model (i.e. a class
541 deriving from osv or osv_memory). The class might be the class passed
542 in argument or, if it inherits from another class, a class constructed
543 by combining the two classes.
547 # IMPORTANT: the registry contains an instance for each model. The class
548 # of each model carries inferred metadata that is shared among the
549 # model's instances for this registry, but not among registries. Hence
550 # we cannot use that "registry class" for combining model classes by
551 # inheritance, since it confuses the metadata inference process.
553 # Keep links to non-inherited constraints in cls; this is useful for
554 # instance when exporting translations
555 cls._local_constraints = cls.__dict__.get('_constraints', [])
556 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
558 # determine inherited models
559 parents = getattr(cls, '_inherit', [])
560 parents = [parents] if isinstance(parents, basestring) else (parents or [])
562 # determine the model's name
563 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
565 # determine the module that introduced the model
566 original_module = pool[name]._original_module if name in parents else cls._module
568 # build the class hierarchy for the model
569 for parent in parents:
570 if parent not in pool:
571 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
572 'You may need to add a dependency on the parent class\' module.' % (name, parent))
573 parent_model = pool[parent]
575 # do no use the class of parent_model, since that class contains
576 # inferred metadata; use its ancestor instead
577 parent_class = type(parent_model).__base__
579 # don't inherit custom fields
580 columns = dict((key, val)
581 for key, val in parent_class._columns.iteritems()
584 columns.update(cls._columns)
586 defaults = dict(parent_class._defaults)
587 defaults.update(cls._defaults)
589 inherits = dict(parent_class._inherits)
590 inherits.update(cls._inherits)
592 depends = dict(parent_class._depends)
593 for m, fs in cls._depends.iteritems():
594 depends[m] = depends.get(m, []) + fs
596 old_constraints = parent_class._constraints
597 new_constraints = cls._constraints
598 # filter out from old_constraints the ones overridden by a
599 # constraint with the same function name in new_constraints
600 constraints = new_constraints + [oldc
601 for oldc in old_constraints
602 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
603 for newc in new_constraints)
606 sql_constraints = cls._sql_constraints + \
607 parent_class._sql_constraints
613 '_defaults': defaults,
614 '_inherits': inherits,
616 '_constraints': constraints,
617 '_sql_constraints': sql_constraints,
619 cls = type(name, (cls, parent_class), attrs)
621 # introduce the "registry class" of the model;
622 # duplicate some attributes so that the ORM can modify them
626 '_columns': dict(cls._columns),
627 '_defaults': dict(cls._defaults),
628 '_inherits': dict(cls._inherits),
629 '_depends': dict(cls._depends),
630 '_constraints': list(cls._constraints),
631 '_sql_constraints': list(cls._sql_constraints),
632 '_original_module': original_module,
634 cls = type(cls._name, (cls,), attrs)
636 # float fields are registry-dependent (digit attribute); duplicate them
638 for key, col in cls._columns.items():
639 if col._type == 'float':
640 cls._columns[key] = copy.copy(col)
642 # instantiate the model, and initialize it
643 model = object.__new__(cls)
644 model.__init__(pool, cr)
648 def _init_function_fields(cls, pool, cr):
649 # initialize the list of non-stored function fields for this model
650 pool._pure_function_fields[cls._name] = []
652 # process store of low-level function fields
653 for fname, column in cls._columns.iteritems():
654 if hasattr(column, 'digits_change'):
655 column.digits_change(cr)
656 # filter out existing store about this field
657 pool._store_function[cls._name] = [
659 for stored in pool._store_function.get(cls._name, [])
660 if (stored[0], stored[1]) != (cls._name, fname)
662 if not isinstance(column, fields.function):
665 # register it on the pool for invalidation
666 pool._pure_function_fields[cls._name].append(fname)
668 # process store parameter
671 get_ids = lambda self, cr, uid, ids, c={}: ids
672 store = {cls._name: (get_ids, None, column.priority, None)}
673 for model, spec in store.iteritems():
675 (fnct, fields2, order, length) = spec
677 (fnct, fields2, order) = spec
680 raise except_orm('Error',
681 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
682 pool._store_function.setdefault(model, [])
683 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
684 if t not in pool._store_function[model]:
685 pool._store_function[model].append(t)
686 pool._store_function[model].sort(key=lambda x: x[4])
689 def _init_manual_fields(cls, pool, cr):
690 # Check whether the query is already done
691 if pool.fields_by_model is not None:
692 manual_fields = pool.fields_by_model.get(cls._name, [])
694 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
695 manual_fields = cr.dictfetchall()
697 for field in manual_fields:
698 if field['name'] in cls._columns:
701 'string': field['field_description'],
702 'required': bool(field['required']),
703 'readonly': bool(field['readonly']),
704 'domain': eval(field['domain']) if field['domain'] else None,
705 'size': field['size'] or None,
706 'ondelete': field['on_delete'],
707 'translate': (field['translate']),
710 #'select': int(field['select_level'])
712 if field['serialization_field_id']:
713 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
714 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
715 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
716 attrs.update({'relation': field['relation']})
717 cls._columns[field['name']] = fields.sparse(**attrs)
718 elif field['ttype'] == 'selection':
719 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
720 elif field['ttype'] == 'reference':
721 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
722 elif field['ttype'] == 'many2one':
723 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
724 elif field['ttype'] == 'one2many':
725 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
726 elif field['ttype'] == 'many2many':
727 _rel1 = field['relation'].replace('.', '_')
728 _rel2 = field['model'].replace('.', '_')
729 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
730 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
732 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
735 def _init_constraints_onchanges(cls):
736 # store sql constraint error messages
737 for (key, _, msg) in cls._sql_constraints:
738 cls.pool._sql_error[cls._table + '_' + key] = msg
740 # collect constraint and onchange methods
741 cls._constraint_methods = []
742 cls._onchange_methods = defaultdict(list)
743 for attr, func in getmembers(cls, callable):
744 if hasattr(func, '_constrains'):
745 if not all(name in cls._fields for name in func._constrains):
746 _logger.warning("@constrains%r parameters must be field names", func._constrains)
747 cls._constraint_methods.append(func)
748 if hasattr(func, '_onchange'):
749 if not all(name in cls._fields for name in func._onchange):
750 _logger.warning("@onchange%r parameters must be field names", func._onchange)
751 for name in func._onchange:
752 cls._onchange_methods[name].append(func)
755 # In the past, this method was registering the model class in the server.
756 # This job is now done entirely by the metaclass MetaModel.
758 # Do not create an instance here. Model instances are created by method
762 def __init__(self, pool, cr):
763 """ Initialize a model and make it part of the given registry.
765 - copy the stored fields' functions in the registry,
766 - retrieve custom fields and add them in the model,
767 - ensure there is a many2one for each _inherits'd parent,
768 - update the children's _columns,
769 - give a chance to each field to initialize itself.
774 # link the class to the registry, and update the registry
776 cls._model = self # backward compatibility
777 pool.add(cls._name, self)
779 # determine description, table, sequence and log_access
780 if not cls._description:
781 cls._description = cls._name
783 cls._table = cls._name.replace('.', '_')
784 if not cls._sequence:
785 cls._sequence = cls._table + '_id_seq'
786 if not hasattr(cls, '_log_access'):
787 # If _log_access is not specified, it is the same value as _auto.
788 cls._log_access = cls._auto
791 if cls.is_transient():
792 cls._transient_check_count = 0
793 cls._transient_max_count = config.get('osv_memory_count_limit')
794 cls._transient_max_hours = config.get('osv_memory_age_limit')
795 assert cls._log_access, \
796 "TransientModels must have log_access turned on, " \
797 "in order to implement their access rights policy"
799 # retrieve new-style fields and duplicate them (to avoid clashes with
800 # inheritance between different models)
802 for attr, field in getmembers(cls, Field.__instancecheck__):
803 if not field._origin:
804 cls._add_field(attr, field.copy())
806 # introduce magic fields
807 cls._add_magic_fields()
809 # register stuff about low-level function fields and custom fields
810 cls._init_function_fields(pool, cr)
811 cls._init_manual_fields(pool, cr)
814 cls._inherits_check()
815 cls._inherits_reload()
817 # register constraints and onchange methods
818 cls._init_constraints_onchanges()
821 for k in cls._defaults:
822 assert k in cls._fields, \
823 "Model %s has a default for nonexiting field %s" % (cls._name, k)
826 for column in cls._columns.itervalues():
831 assert cls._rec_name in cls._fields, \
832 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
833 elif 'name' in cls._fields:
834 cls._rec_name = 'name'
836 # prepare ormcache, which must be shared by all instances of the model
839 def __export_xml_id(self):
840 """ Return a valid xml_id for the record `self`. """
841 ir_model_data = self.sudo().env['ir.model.data']
842 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
845 return '%s.%s' % (data[0].module, data[0].name)
850 name = '%s_%s' % (self._table, self.id)
851 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
853 name = '%s_%s_%s' % (self._table, self.id, postfix)
854 ir_model_data.create({
857 'module': '__export__',
860 return '__export__.' + name
863 def __export_rows(self, fields):
864 """ Export fields of the records in `self`.
866 :param fields: list of lists of fields to traverse
867 :return: list of lists of corresponding values
871 # main line of record, initially empty
872 current = [''] * len(fields)
873 lines.append(current)
875 # list of primary fields followed by secondary field(s)
878 # process column by column
879 for i, path in enumerate(fields):
884 if name in primary_done:
888 current[i] = str(record.id)
890 current[i] = record.__export_xml_id()
892 field = record._fields[name]
895 # this part could be simpler, but it has to be done this way
896 # in order to reproduce the former behavior
897 if not isinstance(value, BaseModel):
898 current[i] = field.convert_to_export(value, self.env)
900 primary_done.append(name)
902 # This is a special case, its strange behavior is intended!
903 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
904 xml_ids = [r.__export_xml_id() for r in value]
905 current[i] = ','.join(xml_ids) or False
908 # recursively export the fields that follow name
909 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
910 lines2 = value.__export_rows(fields2)
912 # merge first line with record's main line
913 for j, val in enumerate(lines2[0]):
916 # check value of current field
918 # assign xml_ids, and forget about remaining lines
919 xml_ids = [item[1] for item in value.name_get()]
920 current[i] = ','.join(xml_ids)
922 # append the other lines at the end
930 def export_data(self, fields_to_export, raw_data=False):
931 """ Export fields for selected objects
933 :param fields_to_export: list of fields
934 :param raw_data: True to return value in native Python type
935 :rtype: dictionary with a *datas* matrix
937 This method is used when exporting data via client menu
939 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
941 self = self.with_context(export_raw_data=True)
942 return {'datas': self.__export_rows(fields_to_export)}
944 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
947 Use :meth:`~load` instead
949 Import given data in given module
951 This method is used when importing data via client menu.
953 Example of fields to import for a sale.order::
956 partner_id, (=name_search)
957 order_line/.id, (=database_id)
959 order_line/product_id/id, (=xml id)
960 order_line/price_unit,
961 order_line/product_uom_qty,
962 order_line/product_uom/id (=xml_id)
964 This method returns a 4-tuple with the following structure::
966 (return_code, errored_resource, error_message, unused)
968 * The first item is a return code, it is ``-1`` in case of
969 import error, or the last imported row number in case of success
970 * The second item contains the record data dict that failed to import
971 in case of error, otherwise it's 0
972 * The third item contains an error message string in case of error,
974 * The last item is currently unused, with no specific semantics
976 :param fields: list of fields to import
977 :param datas: data to import
978 :param mode: 'init' or 'update' for record creation
979 :param current_module: module name
980 :param noupdate: flag for record creation
981 :param filename: optional file to store partial import state for recovery
982 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
983 :rtype: (int, dict or 0, str or 0, str or 0)
985 context = dict(context) if context is not None else {}
986 context['_import_current_module'] = current_module
988 fields = map(fix_import_export_id_paths, fields)
989 ir_model_data_obj = self.pool.get('ir.model.data')
992 if m['type'] == 'error':
993 raise Exception(m['message'])
995 if config.get('import_partial') and filename:
996 with open(config.get('import_partial'), 'rb') as partial_import_file:
997 data = pickle.load(partial_import_file)
998 position = data.get(filename, 0)
1002 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1003 self._extract_records(cr, uid, fields, datas,
1004 context=context, log=log),
1005 context=context, log=log):
1006 ir_model_data_obj._update(cr, uid, self._name,
1007 current_module, res, mode=mode, xml_id=xml_id,
1008 noupdate=noupdate, res_id=res_id, context=context)
1009 position = info.get('rows', {}).get('to', 0) + 1
1010 if config.get('import_partial') and filename and (not (position%100)):
1011 with open(config.get('import_partial'), 'rb') as partial_import:
1012 data = pickle.load(partial_import)
1013 data[filename] = position
1014 with open(config.get('import_partial'), 'wb') as partial_import:
1015 pickle.dump(data, partial_import)
1016 if context.get('defer_parent_store_computation'):
1017 self._parent_store_compute(cr)
1019 except Exception, e:
1021 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1023 if context.get('defer_parent_store_computation'):
1024 self._parent_store_compute(cr)
1025 return position, 0, 0, 0
1027 def load(self, cr, uid, fields, data, context=None):
1029 Attempts to load the data matrix, and returns a list of ids (or
1030 ``False`` if there was an error and no id could be generated) and a
1033 The ids are those of the records created and saved (in database), in
1034 the same order they were extracted from the file. They can be passed
1035 directly to :meth:`~read`
1037 :param fields: list of fields to import, at the same index as the corresponding data
1038 :type fields: list(str)
1039 :param data: row-major matrix of data to import
1040 :type data: list(list(str))
1041 :param dict context:
1042 :returns: {ids: list(int)|False, messages: [Message]}
1044 cr.execute('SAVEPOINT model_load')
1047 fields = map(fix_import_export_id_paths, fields)
1048 ModelData = self.pool['ir.model.data'].clear_caches()
1050 fg = self.fields_get(cr, uid, context=context)
1057 for id, xid, record, info in self._convert_records(cr, uid,
1058 self._extract_records(cr, uid, fields, data,
1059 context=context, log=messages.append),
1060 context=context, log=messages.append):
1062 cr.execute('SAVEPOINT model_load_save')
1063 except psycopg2.InternalError, e:
1064 # broken transaction, exit and hope the source error was
1066 if not any(message['type'] == 'error' for message in messages):
1067 messages.append(dict(info, type='error',message=
1068 u"Unknown database error: '%s'" % e))
1071 ids.append(ModelData._update(cr, uid, self._name,
1072 current_module, record, mode=mode, xml_id=xid,
1073 noupdate=noupdate, res_id=id, context=context))
1074 cr.execute('RELEASE SAVEPOINT model_load_save')
1075 except psycopg2.Warning, e:
1076 messages.append(dict(info, type='warning', message=str(e)))
1077 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1078 except psycopg2.Error, e:
1079 messages.append(dict(
1081 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1082 # Failed to write, log to messages, rollback savepoint (to
1083 # avoid broken transaction) and keep going
1084 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1085 except Exception, e:
1086 message = (_('Unknown error during import:') +
1087 ' %s: %s' % (type(e), unicode(e)))
1088 moreinfo = _('Resolve other errors first')
1089 messages.append(dict(info, type='error',
1092 # Failed for some reason, perhaps due to invalid data supplied,
1093 # rollback savepoint and keep going
1094 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1095 if any(message['type'] == 'error' for message in messages):
1096 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1098 return {'ids': ids, 'messages': messages}
1100 def _extract_records(self, cr, uid, fields_, data,
1101 context=None, log=lambda a: None):
1102 """ Generates record dicts from the data sequence.
1104 The result is a generator of dicts mapping field names to raw
1105 (unconverted, unvalidated) values.
1107 For relational fields, if sub-fields were provided the value will be
1108 a list of sub-records
1110 The following sub-fields may be set on the record (by key):
1111 * None is the name_get for the record (to use with name_create/name_search)
1112 * "id" is the External ID for the record
1113 * ".id" is the Database ID for the record
1115 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1116 # Fake columns to avoid special cases in extractor
1117 columns[None] = fields.char('rec_name')
1118 columns['id'] = fields.char('External ID')
1119 columns['.id'] = fields.integer('Database ID')
1121 # m2o fields can't be on multiple lines so exclude them from the
1122 # is_relational field rows filter, but special-case it later on to
1123 # be handled with relational fields (as it can have subfields)
1124 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1125 get_o2m_values = itemgetter_tuple(
1126 [index for index, field in enumerate(fields_)
1127 if columns[field[0]]._type == 'one2many'])
1128 get_nono2m_values = itemgetter_tuple(
1129 [index for index, field in enumerate(fields_)
1130 if columns[field[0]]._type != 'one2many'])
1131 # Checks if the provided row has any non-empty non-relational field
1132 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1133 return any(g(row)) and not any(f(row))
1137 if index >= len(data): return
1140 # copy non-relational fields to record dict
1141 record = dict((field[0], value)
1142 for field, value in itertools.izip(fields_, row)
1143 if not is_relational(field[0]))
1145 # Get all following rows which have relational values attached to
1146 # the current record (no non-relational values)
1147 record_span = itertools.takewhile(
1148 only_o2m_values, itertools.islice(data, index + 1, None))
1149 # stitch record row back on for relational fields
1150 record_span = list(itertools.chain([row], record_span))
1151 for relfield in set(
1152 field[0] for field in fields_
1153 if is_relational(field[0])):
1154 column = columns[relfield]
1155 # FIXME: how to not use _obj without relying on fields_get?
1156 Model = self.pool[column._obj]
1158 # get only cells for this sub-field, should be strictly
1159 # non-empty, field path [None] is for name_get column
1160 indices, subfields = zip(*((index, field[1:] or [None])
1161 for index, field in enumerate(fields_)
1162 if field[0] == relfield))
1164 # return all rows which have at least one value for the
1165 # subfields of relfield
1166 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1167 record[relfield] = [subrecord
1168 for subrecord, _subinfo in Model._extract_records(
1169 cr, uid, subfields, relfield_data,
1170 context=context, log=log)]
1172 yield record, {'rows': {
1174 'to': index + len(record_span) - 1
1176 index += len(record_span)
1178 def _convert_records(self, cr, uid, records,
1179 context=None, log=lambda a: None):
1180 """ Converts records from the source iterable (recursive dicts of
1181 strings) into forms which can be written to the database (via
1182 self.create or (ir.model.data)._update)
1184 :returns: a list of triplets of (id, xid, record)
1185 :rtype: list((int|None, str|None, dict))
1187 if context is None: context = {}
1188 Converter = self.pool['ir.fields.converter']
1189 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1190 Translation = self.pool['ir.translation']
1192 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1193 context.get('lang'))
1195 for f, column in columns.iteritems())
1197 convert = Converter.for_model(cr, uid, self, context=context)
1199 def _log(base, field, exception):
1200 type = 'warning' if isinstance(exception, Warning) else 'error'
1201 # logs the logical (not human-readable) field name for automated
1202 # processing of response, but injects human readable in message
1203 record = dict(base, type=type, field=field,
1204 message=unicode(exception.args[0]) % base)
1205 if len(exception.args) > 1 and exception.args[1]:
1206 record.update(exception.args[1])
1209 stream = CountingStream(records)
1210 for record, extras in stream:
1213 # name_get/name_create
1214 if None in record: pass
1221 dbid = int(record['.id'])
1223 # in case of overridden id column
1224 dbid = record['.id']
1225 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1228 record=stream.index,
1230 message=_(u"Unknown database identifier '%s'") % dbid))
1233 converted = convert(record, lambda field, err:\
1234 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1236 yield dbid, xid, converted, dict(extras, record=stream.index)
1239 def _validate_fields(self, field_names):
1240 field_names = set(field_names)
1242 # old-style constraint methods
1243 trans = self.env['ir.translation']
1244 cr, uid, context = self.env.args
1247 for fun, msg, names in self._constraints:
1249 # validation must be context-independent; call `fun` without context
1250 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1252 except Exception, e:
1253 _logger.debug('Exception while validating constraint', exc_info=True)
1255 extra_error = tools.ustr(e)
1258 res_msg = msg(self._model, cr, uid, ids, context=context)
1259 if isinstance(res_msg, tuple):
1260 template, params = res_msg
1261 res_msg = template % params
1263 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1265 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1267 _("Field(s) `%s` failed against a constraint: %s") %
1268 (', '.join(names), res_msg)
1271 raise ValidationError('\n'.join(errors))
1273 # new-style constraint methods
1274 for check in self._constraint_methods:
1275 if set(check._constrains) & field_names:
1278 except ValidationError, e:
1280 except Exception, e:
1281 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1283 def default_get(self, cr, uid, fields_list, context=None):
1284 """ default_get(fields) -> default_values
1286 Return default values for the fields in `fields_list`. Default
1287 values are determined by the context, user defaults, and the model
1290 :param fields_list: a list of field names
1291 :return: a dictionary mapping each field name to its corresponding
1292 default value; the keys of the dictionary are the fields in
1293 `fields_list` that have a default value different from ``False``.
1295 This method should not be overridden. In order to change the
1296 mechanism for determining default values, you should override method
1297 :meth:`add_default_value` instead.
1299 # trigger view init hook
1300 self.view_init(cr, uid, fields_list, context)
1302 # use a new record to determine default values; evaluate fields on the
1303 # new record and put default values in result
1304 record = self.new(cr, uid, {}, context=context)
1306 for name in fields_list:
1307 if name in self._fields:
1308 value = record[name]
1309 if name in record._cache:
1310 result[name] = value # it really is a default value
1312 # convert default values to the expected format
1313 result = self._convert_to_write(result)
1316 def add_default_value(self, field):
1317 """ Set the default value of `field` to the new record `self`.
1318 The value must be assigned to `self`.
1320 assert not self.id, "Expected new record: %s" % self
1321 cr, uid, context = self.env.args
1324 # 1. look up context
1325 key = 'default_' + name
1327 self[name] = context[key]
1330 # 2. look up ir_values
1331 # Note: performance is good, because get_defaults_dict is cached!
1332 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1333 if name in ir_values_dict:
1334 self[name] = ir_values_dict[name]
1337 # 3. look up property fields
1338 # TODO: get rid of this one
1339 column = self._columns.get(name)
1340 if isinstance(column, fields.property):
1341 self[name] = self.env['ir.property'].get(name, self._name)
1344 # 4. look up _defaults
1345 if name in self._defaults:
1346 value = self._defaults[name]
1348 value = value(self._model, cr, uid, context)
1352 # 5. delegate to field
1353 field.determine_default(self)
1355 def fields_get_keys(self, cr, user, context=None):
1356 res = self._columns.keys()
1357 # TODO I believe this loop can be replace by
1358 # res.extend(self._inherit_fields.key())
1359 for parent in self._inherits:
1360 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1363 def _rec_name_fallback(self, cr, uid, context=None):
1364 rec_name = self._rec_name
1365 if rec_name not in self._columns:
1366 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1370 # Overload this method if you need a window title which depends on the context
1372 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1375 def user_has_groups(self, cr, uid, groups, context=None):
1376 """Return true if the user is at least member of one of the groups
1377 in groups_str. Typically used to resolve `groups` attribute
1378 in view and model definitions.
1380 :param str groups: comma-separated list of fully-qualified group
1381 external IDs, e.g.: ``base.group_user,base.group_system``
1382 :return: True if the current user is a member of one of the
1385 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1386 for group_ext_id in groups.split(','))
1388 def _get_default_form_view(self, cr, user, context=None):
1389 """ Generates a default single-line form view using all fields
1390 of the current model except the m2m and o2m ones.
1392 :param cr: database cursor
1393 :param int user: user id
1394 :param dict context: connection context
1395 :returns: a form view as an lxml document
1396 :rtype: etree._Element
1398 view = etree.Element('form', string=self._description)
1399 group = etree.SubElement(view, 'group', col="4")
1400 for fname, field in self._fields.iteritems():
1401 if field.automatic or field.type in ('one2many', 'many2many'):
1404 etree.SubElement(group, 'field', name=fname)
1405 if field.type == 'text':
1406 etree.SubElement(group, 'newline')
1409 def _get_default_search_view(self, cr, user, context=None):
1410 """ Generates a single-field search view, based on _rec_name.
1412 :param cr: database cursor
1413 :param int user: user id
1414 :param dict context: connection context
1415 :returns: a tree view as an lxml document
1416 :rtype: etree._Element
1418 view = etree.Element('search', string=self._description)
1419 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1422 def _get_default_tree_view(self, cr, user, context=None):
1423 """ Generates a single-field tree view, based on _rec_name.
1425 :param cr: database cursor
1426 :param int user: user id
1427 :param dict context: connection context
1428 :returns: a tree view as an lxml document
1429 :rtype: etree._Element
1431 view = etree.Element('tree', string=self._description)
1432 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1435 def _get_default_calendar_view(self, cr, user, context=None):
1436 """ Generates a default calendar view by trying to infer
1437 calendar fields from a number of pre-set attribute names
1439 :param cr: database cursor
1440 :param int user: user id
1441 :param dict context: connection context
1442 :returns: a calendar view
1443 :rtype: etree._Element
1445 def set_first_of(seq, in_, to):
1446 """Sets the first value of `seq` also found in `in_` to
1447 the `to` attribute of the view being closed over.
1449 Returns whether it's found a suitable value (and set it on
1450 the attribute) or not
1458 view = etree.Element('calendar', string=self._description)
1459 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1461 if self._date_name not in self._columns:
1463 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1464 if dt in self._columns:
1465 self._date_name = dt
1470 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1471 view.set('date_start', self._date_name)
1473 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1474 self._columns, 'color')
1476 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1477 self._columns, 'date_stop'):
1478 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1479 self._columns, 'date_delay'):
1481 _('Invalid Object Architecture!'),
1482 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1486 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1487 """ fields_view_get([view_id | view_type='form'])
1489 Get the detailed composition of the requested view like fields, model, view architecture
1491 :param view_id: id of the view or None
1492 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1493 :param toolbar: true to include contextual actions
1494 :param submenu: deprecated
1495 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1496 :raise AttributeError:
1497 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1498 * if some tag other than 'position' is found in parent view
1499 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1503 View = self.pool['ir.ui.view']
1506 'model': self._name,
1507 'field_parent': False,
1510 # try to find a view_id if none provided
1512 # <view_type>_view_ref in context can be used to overrride the default view
1513 view_ref_key = view_type + '_view_ref'
1514 view_ref = context.get(view_ref_key)
1517 module, view_ref = view_ref.split('.', 1)
1518 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1519 view_ref_res = cr.fetchone()
1521 view_id = view_ref_res[0]
1523 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1524 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1528 # otherwise try to find the lowest priority matching ir.ui.view
1529 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1531 # context for post-processing might be overriden
1534 # read the view with inherited views applied
1535 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1536 result['arch'] = root_view['arch']
1537 result['name'] = root_view['name']
1538 result['type'] = root_view['type']
1539 result['view_id'] = root_view['id']
1540 result['field_parent'] = root_view['field_parent']
1541 # override context fro postprocessing
1542 if root_view.get('model') != self._name:
1543 ctx = dict(context, base_model_name=root_view.get('model'))
1545 # fallback on default views methods if no ir.ui.view could be found
1547 get_func = getattr(self, '_get_default_%s_view' % view_type)
1548 arch_etree = get_func(cr, uid, context)
1549 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1550 result['type'] = view_type
1551 result['name'] = 'default'
1552 except AttributeError:
1553 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1555 # Apply post processing, groups and modifiers etc...
1556 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1557 result['arch'] = xarch
1558 result['fields'] = xfields
1560 # Add related action information if aksed
1562 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1568 ir_values_obj = self.pool.get('ir.values')
1569 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1570 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1571 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1572 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1573 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1574 #When multi="True" set it will display only in More of the list view
1575 resrelate = [clean(action) for action in resrelate
1576 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1578 for x in itertools.chain(resprint, resaction, resrelate):
1579 x['string'] = x['name']
1581 result['toolbar'] = {
1583 'action': resaction,
1588 def get_formview_id(self, cr, uid, id, context=None):
1589 """ Return an view id to open the document with. This method is meant to be
1590 overridden in addons that want to give specific view ids for example.
1592 :param int id: id of the document to open
1596 def get_formview_action(self, cr, uid, id, context=None):
1597 """ Return an action to open the document. This method is meant to be
1598 overridden in addons that want to give specific view ids for example.
1600 :param int id: id of the document to open
1602 view_id = self.get_formview_id(cr, uid, id, context=context)
1604 'type': 'ir.actions.act_window',
1605 'res_model': self._name,
1606 'view_type': 'form',
1607 'view_mode': 'form',
1608 'views': [(view_id, 'form')],
1609 'target': 'current',
1613 def get_access_action(self, cr, uid, id, context=None):
1614 """ Return an action to open the document. This method is meant to be
1615 overridden in addons that want to give specific access to the document.
1616 By default it opens the formview of the document.
1618 :paramt int id: id of the document to open
1620 return self.get_formview_action(cr, uid, id, context=context)
1622 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1623 return self.pool['ir.ui.view'].postprocess_and_fields(
1624 cr, uid, self._name, node, view_id, context=context)
1626 def search_count(self, cr, user, args, context=None):
1627 """ search_count(args) -> int
1629 Returns the number of records in the current model matching :ref:`the
1630 provided domain <reference/orm/domains>`.
1632 res = self.search(cr, user, args, context=context, count=True)
1633 if isinstance(res, list):
1637 @api.returns('self')
1638 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1639 """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1641 Searches for records based on the ``args``
1642 :ref:`search domain <reference/orm/domains>`.
1644 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1645 list to match all records.
1646 :param int offset: number of results to ignore (default: none)
1647 :param int limit: maximum number of records to return (default: all)
1648 :param str order: sort string
1649 :param bool count: if ``True``, the call should return the number of
1650 records matching ``args`` rather than the records
1652 :returns: at most ``limit`` records matching the search criteria
1654 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1656 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1659 # display_name, name_get, name_create, name_search
1662 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1663 def _compute_display_name(self):
1664 for i, got_name in enumerate(self.name_get()):
1665 self[i].display_name = got_name[1]
1669 """ name_get() -> [(id, name), ...]
1671 Returns a textual representation for the records in ``self``.
1672 By default this is the value of the ``display_name`` field.
1674 :return: list of pairs ``(id, text_repr)`` for each records
1678 name = self._rec_name
1679 if name in self._fields:
1680 convert = self._fields[name].convert_to_display_name
1682 result.append((record.id, convert(record[name])))
1685 result.append((record.id, "%s,%s" % (record._name, record.id)))
1690 def name_create(self, name):
1691 """ name_create(name) -> record
1693 Create a new record by calling :meth:`~.create` with only one value
1694 provided: the display name of the new record.
1696 The new record will be initialized with any default values
1697 applicable to this model, or provided through the context. The usual
1698 behavior of :meth:`~.create` applies.
1700 :param name: display name of the record to create
1702 :return: the :meth:`~.name_get` pair value of the created record
1705 record = self.create({self._rec_name: name})
1706 return record.name_get()[0]
1708 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1712 def name_search(self, name='', args=None, operator='ilike', limit=100):
1713 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1715 Search for records that have a display name matching the given
1716 `name` pattern when compared with the given `operator`, while also
1717 matching the optional search domain (`args`).
1719 This is used for example to provide suggestions based on a partial
1720 value for a relational field. Sometimes be seen as the inverse
1721 function of :meth:`~.name_get`, but it is not guaranteed to be.
1723 This method is equivalent to calling :meth:`~.search` with a search
1724 domain based on ``display_name`` and then :meth:`~.name_get` on the
1725 result of the search.
1727 :param str name: the name pattern to match
1728 :param list args: optional search domain (see :meth:`~.search` for
1729 syntax), specifying further restrictions
1730 :param str operator: domain operator for matching `name`, such as
1731 ``'like'`` or ``'='``.
1732 :param int limit: optional max number of records to return
1734 :return: list of pairs ``(id, text_repr)`` for all matching records.
1736 return self._name_search(name, args, operator, limit=limit)
1738 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1739 # private implementation of name_search, allows passing a dedicated user
1740 # for the name_get part to solve some access rights issues
1741 args = list(args or [])
1742 # optimize out the default criterion of ``ilike ''`` that matches everything
1743 if not self._rec_name:
1744 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1745 elif not (name == '' and operator == 'ilike'):
1746 args += [(self._rec_name, operator, name)]
1747 access_rights_uid = name_get_uid or user
1748 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1749 res = self.name_get(cr, access_rights_uid, ids, context)
1752 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1755 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1757 fields = self._columns.keys() + self._inherit_fields.keys()
1758 #FIXME: collect all calls to _get_source into one SQL call.
1760 res[lang] = {'code': lang}
1762 if f in self._columns:
1763 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1765 res[lang][f] = res_trans
1767 res[lang][f] = self._columns[f].string
1768 for table in self._inherits:
1769 cols = intersect(self._inherit_fields.keys(), fields)
1770 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1773 res[lang]['code'] = lang
1774 for f in res2[lang]:
1775 res[lang][f] = res2[lang][f]
1778 def write_string(self, cr, uid, id, langs, vals, context=None):
1779 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1780 #FIXME: try to only call the translation in one SQL
1783 if field in self._columns:
1784 src = self._columns[field].string
1785 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1786 for table in self._inherits:
1787 cols = intersect(self._inherit_fields.keys(), vals)
1789 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1792 def _add_missing_default_values(self, cr, uid, values, context=None):
1793 # avoid overriding inherited values when parent is set
1795 for tables, parent_field in self._inherits.items():
1796 if parent_field in values:
1797 avoid_tables.append(tables)
1799 # compute missing fields
1800 missing_defaults = set()
1801 for field in self._columns.keys():
1802 if not field in values:
1803 missing_defaults.add(field)
1804 for field in self._inherit_fields.keys():
1805 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1806 missing_defaults.add(field)
1807 # discard magic fields
1808 missing_defaults -= set(MAGIC_COLUMNS)
1810 if missing_defaults:
1811 # override defaults with the provided values, never allow the other way around
1812 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1814 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1815 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1816 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1817 defaults[dv] = [(6, 0, defaults[dv])]
1818 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1819 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1820 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1821 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1822 defaults.update(values)
1826 def clear_caches(self):
1827 """ Clear the caches
1829 This clears the caches associated to methods decorated with
1830 ``tools.ormcache`` or ``tools.ormcache_multi``.
1833 self._ormcache.clear()
1834 self.pool._any_cache_cleared = True
1835 except AttributeError:
1839 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,
1840 read_group_result, read_group_order=None, context=None):
1841 """Helper method for filling in empty groups for all possible values of
1842 the field being grouped by"""
1844 # self._group_by_full should map groupable fields to a method that returns
1845 # a list of all aggregated values that we want to display for this field,
1846 # in the form of a m2o-like pair (key,label).
1847 # This is useful to implement kanban views for instance, where all columns
1848 # should be displayed even if they don't contain any record.
1850 # Grab the list of all groups that should be displayed, including all present groups
1851 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1852 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1853 read_group_order=read_group_order,
1854 access_rights_uid=openerp.SUPERUSER_ID,
1857 result_template = dict.fromkeys(aggregated_fields, False)
1858 result_template[groupby + '_count'] = 0
1859 if remaining_groupbys:
1860 result_template['__context'] = {'group_by': remaining_groupbys}
1862 # Merge the left_side (current results as dicts) with the right_side (all
1863 # possible values as m2o pairs). Both lists are supposed to be using the
1864 # same ordering, and can be merged in one pass.
1867 def append_left(left_side):
1868 grouped_value = left_side[groupby] and left_side[groupby][0]
1869 if not grouped_value in known_values:
1870 result.append(left_side)
1871 known_values[grouped_value] = left_side
1873 count_attr = groupby + '_count'
1874 known_values[grouped_value].update({count_attr: left_side[count_attr]})
1875 def append_right(right_side):
1876 grouped_value = right_side[0]
1877 if not grouped_value in known_values:
1878 line = dict(result_template)
1879 line[groupby] = right_side
1880 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1882 known_values[grouped_value] = line
1883 while read_group_result or all_groups:
1884 left_side = read_group_result[0] if read_group_result else None
1885 right_side = all_groups[0] if all_groups else None
1886 assert left_side is None or left_side[groupby] is False \
1887 or isinstance(left_side[groupby], (tuple,list)), \
1888 'M2O-like pair expected, got %r' % left_side[groupby]
1889 assert right_side is None or isinstance(right_side, (tuple,list)), \
1890 'M2O-like pair expected, got %r' % right_side
1891 if left_side is None:
1892 append_right(all_groups.pop(0))
1893 elif right_side is None:
1894 append_left(read_group_result.pop(0))
1895 elif left_side[groupby] == right_side:
1896 append_left(read_group_result.pop(0))
1897 all_groups.pop(0) # discard right_side
1898 elif not left_side[groupby] or not left_side[groupby][0]:
1899 # left side == "Undefined" entry, not present on right_side
1900 append_left(read_group_result.pop(0))
1902 append_right(all_groups.pop(0))
1906 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1909 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1911 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1912 to the query if order should be computed against m2o field.
1913 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1914 :param aggregated_fields: list of aggregated fields in the query
1915 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1916 These dictionaries contains the qualified name of each groupby
1917 (fully qualified SQL name for the corresponding field),
1918 and the (non raw) field name.
1919 :param osv.Query query: the query under construction
1920 :return: (groupby_terms, orderby_terms)
1923 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1924 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1926 return groupby_terms, orderby_terms
1928 self._check_qorder(orderby)
1929 for order_part in orderby.split(','):
1930 order_split = order_part.split()
1931 order_field = order_split[0]
1932 if order_field in groupby_fields:
1934 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1935 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1937 orderby_terms.append(order_clause)
1938 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1940 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1941 orderby_terms.append(order)
1942 elif order_field in aggregated_fields:
1943 orderby_terms.append(order_part)
1945 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1946 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1947 self._name, order_part)
1948 return groupby_terms, orderby_terms
1950 def _read_group_process_groupby(self, gb, query, context):
1952 Helper method to collect important information about groupbys: raw
1953 field name, type, time informations, qualified name, ...
1955 split = gb.split(':')
1956 field_type = self._all_columns[split[0]].column._type
1957 gb_function = split[1] if len(split) == 2 else None
1958 temporal = field_type in ('date', 'datetime')
1959 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1960 qualified_field = self._inherits_join_calc(split[0], query)
1963 'day': 'dd MMM YYYY',
1964 'week': "'W'w YYYY",
1965 'month': 'MMMM YYYY',
1966 'quarter': 'QQQ YYYY',
1970 'day': dateutil.relativedelta.relativedelta(days=1),
1971 'week': datetime.timedelta(days=7),
1972 'month': dateutil.relativedelta.relativedelta(months=1),
1973 'quarter': dateutil.relativedelta.relativedelta(months=3),
1974 'year': dateutil.relativedelta.relativedelta(years=1)
1977 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
1978 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
1979 if field_type == 'boolean':
1980 qualified_field = "coalesce(%s,false)" % qualified_field
1985 'display_format': display_formats[gb_function or 'month'] if temporal else None,
1986 'interval': time_intervals[gb_function or 'month'] if temporal else None,
1987 'tz_convert': tz_convert,
1988 'qualified_field': qualified_field
1991 def _read_group_prepare_data(self, key, value, groupby_dict, context):
1993 Helper method to sanitize the data received by read_group. The None
1994 values are converted to False, and the date/datetime are formatted,
1995 and corrected according to the timezones.
1997 value = False if value is None else value
1998 gb = groupby_dict.get(key)
1999 if gb and gb['type'] in ('date', 'datetime') and value:
2000 if isinstance(value, basestring):
2001 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2002 value = datetime.datetime.strptime(value, dt_format)
2003 if gb['tz_convert']:
2004 value = pytz.timezone(context['tz']).localize(value)
2007 def _read_group_get_domain(self, groupby, value):
2009 Helper method to construct the domain corresponding to a groupby and
2010 a given value. This is mostly relevant for date/datetime.
2012 if groupby['type'] in ('date', 'datetime') and value:
2013 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2014 domain_dt_begin = value
2015 domain_dt_end = value + groupby['interval']
2016 if groupby['tz_convert']:
2017 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2018 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2019 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2020 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2021 if groupby['type'] == 'many2one' and value:
2023 return [(groupby['field'], '=', value)]
2025 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2027 Helper method to format the data contained in the dictianary data by
2028 adding the domain corresponding to its values, the groupbys in the
2029 context and by properly formatting the date/datetime values.
2031 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2032 for k,v in data.iteritems():
2033 gb = groupby_dict.get(k)
2034 if gb and gb['type'] in ('date', 'datetime') and v:
2035 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2037 data['__domain'] = domain_group + domain
2038 if len(groupby) - len(annotated_groupbys) >= 1:
2039 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2043 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2045 Get the list of records in list view grouped by the given ``groupby`` fields
2047 :param cr: database cursor
2048 :param uid: current user id
2049 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2050 :param list fields: list of fields present in the list view specified on the object
2051 :param list groupby: list of groupby descriptions by which the records will be grouped.
2052 A groupby description is either a field (then it will be grouped by that field)
2053 or a string 'field:groupby_function'. Right now, the only functions supported
2054 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2055 date/datetime fields.
2056 :param int offset: optional number of records to skip
2057 :param int limit: optional max number of records to return
2058 :param dict context: context arguments, like lang, time zone.
2059 :param list orderby: optional ``order by`` specification, for
2060 overriding the natural sort ordering of the
2061 groups, see also :py:meth:`~osv.osv.osv.search`
2062 (supported only for many2one fields currently)
2063 :param bool lazy: if true, the results are only grouped by the first groupby and the
2064 remaining groupbys are put in the __context key. If false, all the groupbys are
2066 :return: list of dictionaries(one dictionary for each record) containing:
2068 * the values of fields grouped by the fields in ``groupby`` argument
2069 * __domain: list of tuples specifying the search criteria
2070 * __context: dictionary with argument like ``groupby``
2071 :rtype: [{'field_name_1': value, ...]
2072 :raise AccessError: * if user has no read rights on the requested object
2073 * if user tries to bypass access rules for read on the requested object
2077 self.check_access_rights(cr, uid, 'read')
2078 query = self._where_calc(cr, uid, domain, context=context)
2079 fields = fields or self._columns.keys()
2081 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2082 groupby_list = groupby[:1] if lazy else groupby
2083 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2084 for gb in groupby_list]
2085 groupby_fields = [g['field'] for g in annotated_groupbys]
2086 order = orderby or ','.join([g for g in groupby_list])
2087 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2089 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2090 for gb in groupby_fields:
2091 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2092 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2093 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2094 if not (gb in self._all_columns):
2095 # Don't allow arbitrary values, as this would be a SQL injection vector!
2096 raise except_orm(_('Invalid group_by'),
2097 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2099 aggregated_fields = [
2101 if f not in ('id', 'sequence')
2102 if f not in groupby_fields
2103 if f in self._all_columns
2104 if self._all_columns[f].column._type in ('integer', 'float')
2105 if getattr(self._all_columns[f].column, '_classic_write')]
2107 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2108 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2110 for gb in annotated_groupbys:
2111 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2113 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2114 from_clause, where_clause, where_clause_params = query.get_sql()
2115 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2116 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2120 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2121 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2124 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count %(extra_fields)s
2132 'table': self._table,
2133 'count_field': count_field,
2134 'extra_fields': prefix_terms(',', select_terms),
2135 'from': from_clause,
2136 'where': prefix_term('WHERE', where_clause),
2137 'groupby': prefix_terms('GROUP BY', groupby_terms),
2138 'orderby': prefix_terms('ORDER BY', orderby_terms),
2139 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2140 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2142 cr.execute(query, where_clause_params)
2143 fetched_data = cr.dictfetchall()
2145 if not groupby_fields:
2148 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2150 data_ids = [r['id'] for r in fetched_data]
2151 many2onefields = list(set(many2onefields))
2152 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2153 for d in fetched_data:
2154 d.update(data_dict[d['id']])
2156 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2157 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2158 if lazy and groupby_fields[0] in self._group_by_full:
2159 # Right now, read_group only fill results in lazy mode (by default).
2160 # If you need to have the empty groups in 'eager' mode, then the
2161 # method _read_group_fill_results need to be completely reimplemented
2163 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2164 aggregated_fields, result, read_group_order=order,
2168 def _inherits_join_add(self, current_model, parent_model_name, query):
2170 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2171 :param current_model: current model object
2172 :param parent_model_name: name of the parent model for which the clauses should be added
2173 :param query: query object on which the JOIN should be added
2175 inherits_field = current_model._inherits[parent_model_name]
2176 parent_model = self.pool[parent_model_name]
2177 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2180 def _inherits_join_calc(self, field, query):
2182 Adds missing table select and join clause(s) to ``query`` for reaching
2183 the field coming from an '_inherits' parent table (no duplicates).
2185 :param field: name of inherited field to reach
2186 :param query: query object on which the JOIN should be added
2187 :return: qualified name of field, to be used in SELECT clause
2189 current_table = self
2190 parent_alias = '"%s"' % current_table._table
2191 while field in current_table._inherit_fields and not field in current_table._columns:
2192 parent_model_name = current_table._inherit_fields[field][0]
2193 parent_table = self.pool[parent_model_name]
2194 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2195 current_table = parent_table
2196 return '%s."%s"' % (parent_alias, field)
2198 def _parent_store_compute(self, cr):
2199 if not self._parent_store:
2201 _logger.info('Computing parent left and right for table %s...', self._table)
2202 def browse_rec(root, pos=0):
2204 where = self._parent_name+'='+str(root)
2206 where = self._parent_name+' IS NULL'
2207 if self._parent_order:
2208 where += ' order by '+self._parent_order
2209 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2211 for id in cr.fetchall():
2212 pos2 = browse_rec(id[0], pos2)
2213 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2215 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2216 if self._parent_order:
2217 query += ' order by ' + self._parent_order
2220 for (root,) in cr.fetchall():
2221 pos = browse_rec(root, pos)
2222 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2225 def _update_store(self, cr, f, k):
2226 _logger.info("storing computed values of fields.function '%s'", k)
2227 ss = self._columns[k]._symbol_set
2228 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2229 cr.execute('select id from '+self._table)
2230 ids_lst = map(lambda x: x[0], cr.fetchall())
2232 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2233 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2234 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2235 for key, val in res.items():
2238 # if val is a many2one, just write the ID
2239 if type(val) == tuple:
2241 if val is not False:
2242 cr.execute(update_query, (ss[1](val), key))
2244 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2245 """Raise except_orm if value is not among the valid values for the selection field"""
2246 if self._columns[field]._type == 'reference':
2247 val_model, val_id_str = value.split(',', 1)
2250 val_id = long(val_id_str)
2254 raise except_orm(_('ValidateError'),
2255 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2259 if isinstance(self._columns[field].selection, (tuple, list)):
2260 if val in dict(self._columns[field].selection):
2262 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2264 raise except_orm(_('ValidateError'),
2265 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
2267 def _check_removed_columns(self, cr, log=False):
2268 # iterate on the database columns to drop the NOT NULL constraints
2269 # of fields which were required but have been removed (or will be added by another module)
2270 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2271 columns += MAGIC_COLUMNS
2272 cr.execute("SELECT a.attname, a.attnotnull"
2273 " FROM pg_class c, pg_attribute a"
2274 " WHERE c.relname=%s"
2275 " AND c.oid=a.attrelid"
2276 " AND a.attisdropped=%s"
2277 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2278 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2280 for column in cr.dictfetchall():
2282 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2283 column['attname'], self._table, self._name)
2284 if column['attnotnull']:
2285 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2286 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2287 self._table, column['attname'])
2289 def _save_constraint(self, cr, constraint_name, type):
2291 Record the creation of a constraint for this model, to make it possible
2292 to delete it later when the module is uninstalled. Type can be either
2293 'f' or 'u' depending on the constraint being a foreign key or not.
2295 if not self._module:
2296 # no need to save constraints for custom models as they're not part
2299 assert type in ('f', 'u')
2301 SELECT 1 FROM ir_model_constraint, ir_module_module
2302 WHERE ir_model_constraint.module=ir_module_module.id
2303 AND ir_model_constraint.name=%s
2304 AND ir_module_module.name=%s
2305 """, (constraint_name, self._module))
2308 INSERT INTO ir_model_constraint
2309 (name, date_init, date_update, module, model, type)
2310 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2311 (SELECT id FROM ir_module_module WHERE name=%s),
2312 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2313 (constraint_name, self._module, self._name, type))
2315 def _save_relation_table(self, cr, relation_table):
2317 Record the creation of a many2many for this model, to make it possible
2318 to delete it later when the module is uninstalled.
2321 SELECT 1 FROM ir_model_relation, ir_module_module
2322 WHERE ir_model_relation.module=ir_module_module.id
2323 AND ir_model_relation.name=%s
2324 AND ir_module_module.name=%s
2325 """, (relation_table, self._module))
2327 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2328 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2329 (SELECT id FROM ir_module_module WHERE name=%s),
2330 (SELECT id FROM ir_model WHERE model=%s))""",
2331 (relation_table, self._module, self._name))
2332 self.invalidate_cache(cr, SUPERUSER_ID)
2334 # checked version: for direct m2o starting from `self`
2335 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2336 assert self.is_transient() or not dest_model.is_transient(), \
2337 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2338 if self.is_transient() and not dest_model.is_transient():
2339 # TransientModel relationships to regular Models are annoying
2340 # usually because they could block deletion due to the FKs.
2341 # So unless stated otherwise we default them to ondelete=cascade.
2342 ondelete = ondelete or 'cascade'
2343 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2344 self._foreign_keys.add(fk_def)
2345 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2347 # unchecked version: for custom cases, such as m2m relationships
2348 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2349 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2350 self._foreign_keys.add(fk_def)
2351 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2353 def _drop_constraint(self, cr, source_table, constraint_name):
2354 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2356 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2357 # Find FK constraint(s) currently established for the m2o field,
2358 # and see whether they are stale or not
2359 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2360 cl2.relname as foreign_table
2361 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2362 pg_attribute as att1, pg_attribute as att2
2363 WHERE con.conrelid = cl1.oid
2364 AND cl1.relname = %s
2365 AND con.confrelid = cl2.oid
2366 AND array_lower(con.conkey, 1) = 1
2367 AND con.conkey[1] = att1.attnum
2368 AND att1.attrelid = cl1.oid
2369 AND att1.attname = %s
2370 AND array_lower(con.confkey, 1) = 1
2371 AND con.confkey[1] = att2.attnum
2372 AND att2.attrelid = cl2.oid
2373 AND att2.attname = %s
2374 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2375 constraints = cr.dictfetchall()
2377 if len(constraints) == 1:
2378 # Is it the right constraint?
2380 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2381 or cons['foreign_table'] != dest_model._table:
2382 # Wrong FK: drop it and recreate
2383 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2384 source_table, cons['constraint_name'])
2385 self._drop_constraint(cr, source_table, cons['constraint_name'])
2387 # it's all good, nothing to do!
2390 # Multiple FKs found for the same field, drop them all, and re-create
2391 for cons in constraints:
2392 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2393 source_table, cons['constraint_name'])
2394 self._drop_constraint(cr, source_table, cons['constraint_name'])
2396 # (re-)create the FK
2397 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2400 def _set_default_value_on_column(self, cr, column_name, context=None):
2401 # ideally should use add_default_value but fails
2402 # due to ir.values not being ready
2404 # get old-style default
2405 default = self._defaults.get(column_name)
2406 if callable(default):
2407 default = default(self, cr, SUPERUSER_ID, context)
2409 # get new_style default if no old-style
2411 record = self.new(cr, SUPERUSER_ID, context=context)
2412 field = self._fields[column_name]
2413 field.determine_default(record)
2414 defaults = dict(record._cache)
2415 if column_name in defaults:
2416 default = field.convert_to_write(defaults[column_name])
2418 column = self._columns[column_name]
2419 ss = column._symbol_set
2420 db_default = ss[1](default)
2421 # Write default if non-NULL, except for booleans for which False means
2422 # the same as NULL - this saves us an expensive query on large tables.
2423 write_default = (db_default is not None if column._type != 'boolean'
2426 _logger.debug("Table '%s': setting default value of new column %s to %r",
2427 self._table, column_name, default)
2428 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2429 self._table, column_name, ss[0], column_name)
2430 cr.execute(query, (db_default,))
2431 # this is a disgrace
2434 def _auto_init(self, cr, context=None):
2437 Call _field_create and, unless _auto is False:
2439 - create the corresponding table in database for the model,
2440 - possibly add the parent columns in database,
2441 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2442 'write_date' in database if _log_access is True (the default),
2443 - report on database columns no more existing in _columns,
2444 - remove no more existing not null constraints,
2445 - alter existing database columns to match _columns,
2446 - create database tables to match _columns,
2447 - add database indices to match _columns,
2448 - save in self._foreign_keys a list a foreign keys to create (see
2452 self._foreign_keys = set()
2453 raise_on_invalid_object_name(self._name)
2456 store_compute = False
2457 stored_fields = [] # new-style stored fields with compute
2459 update_custom_fields = context.get('update_custom_fields', False)
2460 self._field_create(cr, context=context)
2461 create = not self._table_exist(cr)
2465 self._create_table(cr)
2468 cr.execute('SELECT min(id) FROM "%s"' % (self._table,))
2469 has_rows = cr.fetchone()[0] is not None
2472 if self._parent_store:
2473 if not self._parent_columns_exist(cr):
2474 self._create_parent_columns(cr)
2475 store_compute = True
2477 self._check_removed_columns(cr, log=False)
2479 # iterate on the "object columns"
2480 column_data = self._select_column_data(cr)
2482 for k, f in self._columns.iteritems():
2483 if k == 'id': # FIXME: maybe id should be a regular column?
2485 # Don't update custom (also called manual) fields
2486 if f.manual and not update_custom_fields:
2489 if isinstance(f, fields.one2many):
2490 self._o2m_raise_on_missing_reference(cr, f)
2492 elif isinstance(f, fields.many2many):
2493 self._m2m_raise_or_create_relation(cr, f)
2496 res = column_data.get(k)
2498 # The field is not found as-is in database, try if it
2499 # exists with an old name.
2500 if not res and hasattr(f, 'oldname'):
2501 res = column_data.get(f.oldname)
2503 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2505 column_data[k] = res
2506 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2507 self._table, f.oldname, k)
2509 # The field already exists in database. Possibly
2510 # change its type, rename it, drop it or change its
2513 f_pg_type = res['typname']
2514 f_pg_size = res['size']
2515 f_pg_notnull = res['attnotnull']
2516 if isinstance(f, fields.function) and not f.store and\
2517 not getattr(f, 'nodrop', False):
2518 _logger.info('column %s (%s) converted to a function, removed from table %s',
2519 k, f.string, self._table)
2520 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2522 _schema.debug("Table '%s': dropped column '%s' with cascade",
2526 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2531 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2532 ('varchar', 'text', 'TEXT', ''),
2533 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2534 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2535 ('timestamp', 'date', 'date', '::date'),
2536 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2537 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2539 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2541 with cr.savepoint():
2542 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2543 except psycopg2.NotSupportedError:
2544 # In place alter table cannot be done because a view is depending of this field.
2545 # Do a manual copy. This will drop the view (that will be recreated later)
2546 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2547 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2548 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2549 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2551 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2552 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2554 if (f_pg_type==c[0]) and (f._type==c[1]):
2555 if f_pg_type != f_obj_type:
2557 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2558 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2559 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2560 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2562 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2563 self._table, k, c[0], c[1])
2566 if f_pg_type != f_obj_type:
2570 newname = k + '_moved' + str(i)
2571 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2572 "WHERE c.relname=%s " \
2573 "AND a.attname=%s " \
2574 "AND c.oid=a.attrelid ", (self._table, newname))
2575 if not cr.fetchone()[0]:
2579 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2580 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2581 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2582 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2583 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2584 self._table, k, f_pg_type, f._type, newname)
2586 # if the field is required and hasn't got a NOT NULL constraint
2587 if f.required and f_pg_notnull == 0:
2589 self._set_default_value_on_column(cr, k, context=context)
2590 # add the NOT NULL constraint
2592 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2594 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2597 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2598 "If you want to have it, you should update the records and execute manually:\n"\
2599 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2600 _schema.warning(msg, self._table, k, self._table, k)
2602 elif not f.required and f_pg_notnull == 1:
2603 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2605 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2608 indexname = '%s_%s_index' % (self._table, k)
2609 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2610 res2 = cr.dictfetchall()
2611 if not res2 and f.select:
2612 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2614 if f._type == 'text':
2615 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2616 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2617 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2618 " because there is a length limit for indexable btree values!\n"\
2619 "Use a search view instead if you simply want to make the field searchable."
2620 _schema.warning(msg, self._table, f._type, k)
2621 if res2 and not f.select:
2622 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2624 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2625 _schema.debug(msg, self._table, k, f._type)
2627 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2628 dest_model = self.pool[f._obj]
2629 if dest_model._auto and dest_model._table != 'ir_actions':
2630 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2632 # The field doesn't exist in database. Create it if necessary.
2634 if not isinstance(f, fields.function) or f.store:
2635 # add the missing field
2636 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2637 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2638 _schema.debug("Table '%s': added column '%s' with definition=%s",
2639 self._table, k, get_pg_type(f)[1])
2643 self._set_default_value_on_column(cr, k, context=context)
2645 # remember the functions to call for the stored fields
2646 if isinstance(f, fields.function):
2648 if f.store is not True: # i.e. if f.store is a dict
2649 order = f.store[f.store.keys()[0]][2]
2650 todo_end.append((order, self._update_store, (f, k)))
2652 # remember new-style stored fields with compute method
2653 if k in self._fields and self._fields[k].depends:
2654 stored_fields.append(self._fields[k])
2656 # and add constraints if needed
2657 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2658 if f._obj not in self.pool:
2659 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2660 dest_model = self.pool[f._obj]
2661 ref = dest_model._table
2662 # ir_actions is inherited so foreign key doesn't work on it
2663 if ref != 'ir_actions':
2664 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2666 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2670 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2671 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2674 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2675 "Try to re-run: openerp-server --update=module\n"\
2676 "If it doesn't work, update records and execute manually:\n"\
2677 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2678 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2682 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2683 create = not bool(cr.fetchone())
2685 cr.commit() # start a new transaction
2688 self._add_sql_constraints(cr)
2691 self._execute_sql(cr)
2694 self._parent_store_compute(cr)
2698 # trigger computation of new-style stored fields with a compute
2700 _logger.info("Storing computed values of %s fields %s",
2701 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2702 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2703 recs = recs.search([])
2705 map(recs._recompute_todo, stored_fields)
2708 todo_end.append((1000, func, ()))
2712 def _auto_end(self, cr, context=None):
2713 """ Create the foreign keys recorded by _auto_init. """
2714 for t, k, r, d in self._foreign_keys:
2715 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2716 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2718 del self._foreign_keys
2721 def _table_exist(self, cr):
2722 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2726 def _create_table(self, cr):
2727 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2728 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2729 _schema.debug("Table '%s': created", self._table)
2732 def _parent_columns_exist(self, cr):
2733 cr.execute("""SELECT c.relname
2734 FROM pg_class c, pg_attribute a
2735 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2736 """, (self._table, 'parent_left'))
2740 def _create_parent_columns(self, cr):
2741 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2742 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2743 if 'parent_left' not in self._columns:
2744 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2746 _schema.debug("Table '%s': added column '%s' with definition=%s",
2747 self._table, 'parent_left', 'INTEGER')
2748 elif not self._columns['parent_left'].select:
2749 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2751 if 'parent_right' not in self._columns:
2752 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2754 _schema.debug("Table '%s': added column '%s' with definition=%s",
2755 self._table, 'parent_right', 'INTEGER')
2756 elif not self._columns['parent_right'].select:
2757 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2759 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2760 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2761 self._parent_name, self._name)
2766 def _select_column_data(self, cr):
2767 # attlen is the number of bytes necessary to represent the type when
2768 # the type has a fixed size. If the type has a varying size attlen is
2769 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2770 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2771 "FROM pg_class c,pg_attribute a,pg_type t " \
2772 "WHERE c.relname=%s " \
2773 "AND c.oid=a.attrelid " \
2774 "AND a.atttypid=t.oid", (self._table,))
2775 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2778 def _o2m_raise_on_missing_reference(self, cr, f):
2779 # TODO this check should be a method on fields.one2many.
2780 if f._obj in self.pool:
2781 other = self.pool[f._obj]
2782 # TODO the condition could use fields_get_keys().
2783 if f._fields_id not in other._columns.keys():
2784 if f._fields_id not in other._inherit_fields.keys():
2785 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2787 def _m2m_raise_or_create_relation(self, cr, f):
2788 m2m_tbl, col1, col2 = f._sql_names(self)
2789 # do not create relations for custom fields as they do not belong to a module
2790 # they will be automatically removed when dropping the corresponding ir.model.field
2791 # table name for custom relation all starts with x_, see __init__
2792 if not m2m_tbl.startswith('x_'):
2793 self._save_relation_table(cr, m2m_tbl)
2794 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2795 if not cr.dictfetchall():
2796 if f._obj not in self.pool:
2797 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2798 dest_model = self.pool[f._obj]
2799 ref = dest_model._table
2800 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2801 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2802 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2803 if not cr.fetchall():
2804 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2805 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2806 if not cr.fetchall():
2807 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2809 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2810 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2811 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2813 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2816 def _add_sql_constraints(self, cr):
2819 Modify this model's database table constraints so they match the one in
2823 def unify_cons_text(txt):
2824 return txt.lower().replace(', ',',').replace(' (','(')
2826 for (key, con, _) in self._sql_constraints:
2827 conname = '%s_%s' % (self._table, key)
2829 self._save_constraint(cr, conname, 'u')
2830 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2831 existing_constraints = cr.dictfetchall()
2835 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2836 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2837 self._table, conname, con),
2838 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2843 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2844 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2845 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2851 if not existing_constraints:
2852 # constraint does not exists:
2853 sql_actions['add']['execute'] = True
2854 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2855 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2856 # constraint exists but its definition has changed:
2857 sql_actions['drop']['execute'] = True
2858 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2859 sql_actions['add']['execute'] = True
2860 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2862 # we need to add the constraint:
2863 sql_actions = [item for item in sql_actions.values()]
2864 sql_actions.sort(key=lambda x: x['order'])
2865 for sql_action in [action for action in sql_actions if action['execute']]:
2867 cr.execute(sql_action['query'])
2869 _schema.debug(sql_action['msg_ok'])
2871 _schema.warning(sql_action['msg_err'])
2875 def _execute_sql(self, cr):
2876 """ Execute the SQL code from the _sql attribute (if any)."""
2877 if hasattr(self, "_sql"):
2878 for line in self._sql.split(';'):
2879 line2 = line.replace('\n', '').strip()
2885 # Update objects that uses this one to update their _inherits fields
2889 def _inherits_reload_src(cls):
2890 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2891 for model in cls.pool.values():
2892 if cls._name in model._inherits:
2893 model._inherits_reload()
2896 def _inherits_reload(cls):
2897 """ Recompute the _inherit_fields mapping.
2899 This will also call itself on each inherits'd child model.
2903 for table in cls._inherits:
2904 other = cls.pool[table]
2905 for col in other._columns.keys():
2906 res[col] = (table, cls._inherits[table], other._columns[col], table)
2907 for col in other._inherit_fields.keys():
2908 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2909 cls._inherit_fields = res
2910 cls._all_columns = cls._get_column_infos()
2912 # interface columns with new-style fields
2913 for attr, column in cls._columns.items():
2914 if attr not in cls._fields:
2915 cls._add_field(attr, column.to_field())
2917 # interface inherited fields with new-style fields (note that the
2918 # reverse order is for being consistent with _all_columns above)
2919 for parent_model, parent_field in reversed(cls._inherits.items()):
2920 for attr, field in cls.pool[parent_model]._fields.iteritems():
2921 if attr not in cls._fields:
2922 new_field = field.copy(related=(parent_field, attr), _origin=field)
2923 cls._add_field(attr, new_field)
2925 cls._inherits_reload_src()
2928 def _get_column_infos(cls):
2929 """Returns a dict mapping all fields names (direct fields and
2930 inherited field via _inherits) to a ``column_info`` struct
2931 giving detailed columns """
2933 # do not inverse for loops, since local fields may hide inherited ones!
2934 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2935 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2936 for k, col in cls._columns.iteritems():
2937 result[k] = fields.column_info(k, col)
2941 def _inherits_check(cls):
2942 for table, field_name in cls._inherits.items():
2943 if field_name not in cls._columns:
2944 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2945 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2946 required=True, ondelete="cascade")
2947 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2948 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2949 cls._columns[field_name].required = True
2950 cls._columns[field_name].ondelete = "cascade"
2952 # reflect fields with delegate=True in dictionary cls._inherits
2953 for field in cls._fields.itervalues():
2954 if field.type == 'many2one' and not field.related and field.delegate:
2955 if not field.required:
2956 _logger.warning("Field %s with delegate=True must be required.", field)
2957 field.required = True
2958 if field.ondelete.lower() not in ('cascade', 'restrict'):
2959 field.ondelete = 'cascade'
2960 cls._inherits[field.comodel_name] = field.name
2963 def _prepare_setup_fields(self):
2964 """ Prepare the setup of fields once the models have been loaded. """
2965 for field in self._fields.itervalues():
2969 def _setup_fields(self, partial=False):
2970 """ Setup the fields (dependency triggers, etc). """
2971 for field in self._fields.itervalues():
2972 if partial and field.manual and \
2973 field.relational and field.comodel_name not in self.pool:
2974 # do not set up manual fields that refer to unknown models
2976 field.setup(self.env)
2978 # group fields by compute to determine field.computed_fields
2979 fields_by_compute = defaultdict(list)
2980 for field in self._fields.itervalues():
2982 field.computed_fields = fields_by_compute[field.compute]
2983 field.computed_fields.append(field)
2985 field.computed_fields = []
2987 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
2988 """ fields_get([fields])
2990 Return the definition of each field.
2992 The returned value is a dictionary (indiced by field name) of
2993 dictionaries. The _inherits'd fields are included. The string, help,
2994 and selection (if present) attributes are translated.
2996 :param cr: database cursor
2997 :param user: current user id
2998 :param allfields: list of fields
2999 :param context: context arguments, like lang, time zone
3000 :return: dictionary of field dictionaries, each one describing a field of the business object
3001 :raise AccessError: * if user has no create/write rights on the requested object
3004 recs = self.browse(cr, user, [], context)
3007 for fname, field in self._fields.iteritems():
3008 if allfields and fname not in allfields:
3010 if field.groups and not recs.user_has_groups(field.groups):
3012 res[fname] = field.get_description(recs.env)
3014 # if user cannot create or modify records, make all fields readonly
3015 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3016 if not (has_access('write') or has_access('create')):
3017 for description in res.itervalues():
3018 description['readonly'] = True
3019 description['states'] = {}
3023 def get_empty_list_help(self, cr, user, help, context=None):
3024 """ Generic method giving the help message displayed when having
3025 no result to display in a list or kanban view. By default it returns
3026 the help given in parameter that is generally the help message
3027 defined in the action.
3031 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3033 Check the user access rights on the given fields. This raises Access
3034 Denied if the user does not have the rights. Otherwise it returns the
3035 fields (as is if the fields is not falsy, or the readable/writable
3036 fields if fields is falsy).
3038 if user == SUPERUSER_ID:
3039 return fields or list(self._fields)
3042 """ determine whether user has access to field `fname` """
3043 field = self._fields.get(fname)
3044 if field and field.groups:
3045 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3050 fields = filter(valid, self._fields)
3052 invalid_fields = set(filter(lambda name: not valid(name), fields))
3054 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3055 operation, user, self._name, ', '.join(invalid_fields))
3057 _('The requested operation cannot be completed due to security restrictions. '
3058 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3059 (self._description, operation))
3063 # add explicit old-style implementation to read()
3065 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3066 records = self.browse(cr, user, ids, context)
3067 result = BaseModel.read(records, fields, load=load)
3068 return result if isinstance(ids, list) else (bool(result) and result[0])
3070 # new-style implementation of read()
3072 def read(self, fields=None, load='_classic_read'):
3075 Reads the requested fields for the records in `self`, low-level/RPC
3076 method. In Python code, prefer :meth:`~.browse`.
3078 :param fields: list of field names to return (default is all fields)
3079 :return: a list of dictionaries mapping field names to their values,
3080 with one dictionary per record
3081 :raise AccessError: if user has no read rights on some of the given
3084 # check access rights
3085 self.check_access_rights('read')
3086 fields = self.check_field_access_rights('read', fields)
3088 # split fields into stored and computed fields
3089 stored, computed = [], []
3091 if name in self._columns:
3093 elif name in self._fields:
3094 computed.append(name)
3096 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3098 # fetch stored fields from the database to the cache
3099 self._read_from_database(stored)
3101 # retrieve results from records; this takes values from the cache and
3102 # computes remaining fields
3104 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3105 use_name_get = (load == '_classic_read')
3108 values = {'id': record.id}
3109 for name, field in name_fields:
3110 values[name] = field.convert_to_read(record[name], use_name_get)
3111 result.append(values)
3112 except MissingError:
3118 def _prefetch_field(self, field):
3119 """ Read from the database in order to fetch `field` (:class:`Field`
3120 instance) for `self` in cache.
3122 # fetch the records of this model without field_name in their cache
3123 records = self._in_cache_without(field)
3125 # by default, simply fetch field
3126 fnames = {field.name}
3128 if self.env.in_draft:
3129 # we may be doing an onchange, do not prefetch other fields
3131 elif field in self.env.todo:
3132 # field must be recomputed, do not prefetch records to recompute
3133 records -= self.env.todo[field]
3134 elif self._columns[field.name]._prefetch:
3135 # here we can optimize: prefetch all classic and many2one fields
3137 for fname, fcolumn in self._columns.iteritems()
3138 if fcolumn._prefetch)
3140 # fetch records with read()
3141 assert self in records and field.name in fnames
3144 result = records.read(list(fnames), load='_classic_write')
3148 # check the cache, and update it if necessary
3149 if not self._cache.contains(field):
3150 for values in result:
3151 record = self.browse(values.pop('id'))
3152 record._cache.update(record._convert_to_cache(values, validate=False))
3153 if not self._cache.contains(field):
3154 e = AccessError("No value found for %s.%s" % (self, field.name))
3155 self._cache[field] = FailedValue(e)
3158 def _read_from_database(self, field_names):
3159 """ Read the given fields of the records in `self` from the database,
3160 and store them in cache. Access errors are also stored in cache.
3163 cr, user, context = env.args
3165 # FIXME: The query construction needs to be rewritten using the internal Query
3166 # object, as in search(), to avoid ambiguous column references when
3167 # reading/sorting on a table that is auto_joined to another table with
3168 # common columns (e.g. the magical columns)
3170 # Construct a clause for the security rules.
3171 # 'tables' holds the list of tables necessary for the SELECT, including
3172 # the ir.rule clauses, and contains at least self._table.
3173 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3175 # determine the fields that are stored as columns in self._table
3176 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3178 # we need fully-qualified column names in case len(tables) > 1
3180 if isinstance(self._columns.get(f), fields.binary) and \
3181 context.get('bin_size_%s' % f, context.get('bin_size')):
3182 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3183 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3185 return '%s."%s"' % (self._table, f)
3186 qual_names = map(qualify, set(fields_pre + ['id']))
3188 query = """ SELECT %(qual_names)s FROM %(tables)s
3189 WHERE %(table)s.id IN %%s AND (%(extra)s)
3192 'qual_names': ",".join(qual_names),
3193 'tables': ",".join(tables),
3194 'table': self._table,
3195 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3196 'order': self._parent_order or self._order,
3200 for sub_ids in cr.split_for_in_conditions(self.ids):
3201 cr.execute(query, [tuple(sub_ids)] + rule_params)
3202 result.extend(cr.dictfetchall())
3204 ids = [vals['id'] for vals in result]
3207 # translate the fields if necessary
3208 if context.get('lang'):
3209 ir_translation = env['ir.translation']
3210 for f in fields_pre:
3211 if self._columns[f].translate:
3212 #TODO: optimize out of this loop
3213 res_trans = ir_translation._get_ids(
3214 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3216 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3218 # apply the symbol_get functions of the fields we just read
3219 for f in fields_pre:
3220 symbol_get = self._columns[f]._symbol_get
3223 vals[f] = symbol_get(vals[f])
3225 # store result in cache for POST fields
3227 record = self.browse(vals['id'])
3228 record._cache.update(record._convert_to_cache(vals, validate=False))
3230 # determine the fields that must be processed now
3231 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3233 # Compute POST fields, grouped by multi
3234 by_multi = defaultdict(list)
3235 for f in fields_post:
3236 by_multi[self._columns[f]._multi].append(f)
3238 for multi, fs in by_multi.iteritems():
3240 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3241 assert res2 is not None, \
3242 'The function field "%s" on the "%s" model returned None\n' \
3243 '(a dictionary was expected).' % (fs[0], self._name)
3245 # TOCHECK : why got string instend of dict in python2.6
3246 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3247 multi_fields = res2.get(vals['id'], {})
3250 vals[f] = multi_fields.get(f, [])
3253 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3256 vals[f] = res2[vals['id']]
3260 # Warn about deprecated fields now that fields_pre and fields_post are computed
3261 for f in field_names:
3262 column = self._columns[f]
3263 if column.deprecated:
3264 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3266 # store result in cache
3268 record = self.browse(vals.pop('id'))
3269 record._cache.update(record._convert_to_cache(vals, validate=False))
3271 # store failed values in cache for the records that could not be read
3272 fetched = self.browse(ids)
3273 missing = self - fetched
3275 extras = fetched - self
3278 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3279 ', '.join(map(repr, missing._ids)),
3280 ', '.join(map(repr, extras._ids)),
3282 # store an access error exception in existing records
3284 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3285 (self._name, 'read')
3287 forbidden = missing.exists()
3288 forbidden._cache.update(FailedValue(exc))
3289 # store a missing error exception in non-existing records
3291 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3293 (missing - forbidden)._cache.update(FailedValue(exc))
3296 def get_metadata(self):
3298 Returns some metadata about the given records.
3300 :return: list of ownership dictionaries for each requested record
3301 :rtype: list of dictionaries with the following keys:
3304 * create_uid: user who created the record
3305 * create_date: date when the record was created
3306 * write_uid: last user who changed the record
3307 * write_date: date of the last change to the record
3308 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3311 if self._log_access:
3312 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3313 quoted_table = '"%s"' % self._table
3314 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3315 query = '''SELECT %s, __imd.module, __imd.name
3316 FROM %s LEFT JOIN ir_model_data __imd
3317 ON (__imd.model = %%s and __imd.res_id = %s.id)
3318 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3319 self._cr.execute(query, (self._name, tuple(self.ids)))
3320 res = self._cr.dictfetchall()
3322 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3323 names = dict(self.env['res.users'].browse(uids).name_get())
3327 value = r[key] = r[key] or False
3328 if key in ('write_uid', 'create_uid') and value in names:
3329 r[key] = (value, names[value])
3330 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3331 del r['name'], r['module']
3334 def _check_concurrency(self, cr, ids, context):
3337 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3339 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3340 for sub_ids in cr.split_for_in_conditions(ids):
3343 id_ref = "%s,%s" % (self._name, id)
3344 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3346 ids_to_check.extend([id, update_date])
3347 if not ids_to_check:
3349 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3352 # mention the first one only to keep the error message readable
3353 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3355 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3356 """Verify the returned rows after applying record rules matches
3357 the length of `ids`, and raise an appropriate exception if it does not.
3361 ids, result_ids = set(ids), set(result_ids)
3362 missing_ids = ids - result_ids
3364 # Attempt to distinguish record rule restriction vs deleted records,
3365 # to provide a more specific error message - check if the missinf
3366 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3367 forbidden_ids = [x[0] for x in cr.fetchall()]
3369 # the missing ids are (at least partially) hidden by access rules
3370 if uid == SUPERUSER_ID:
3372 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3373 raise except_orm(_('Access Denied'),
3374 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3375 (self._description, operation))
3377 # If we get here, the missing_ids are not in the database
3378 if operation in ('read','unlink'):
3379 # No need to warn about deleting an already deleted record.
3380 # And no error when reading a record that was deleted, to prevent spurious
3381 # errors for non-transactional search/read sequences coming from clients
3383 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3384 raise except_orm(_('Missing document(s)'),
3385 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3388 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3389 """Verifies that the operation given by ``operation`` is allowed for the user
3390 according to the access rights."""
3391 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3393 def check_access_rule(self, cr, uid, ids, operation, context=None):
3394 """Verifies that the operation given by ``operation`` is allowed for the user
3395 according to ir.rules.
3397 :param operation: one of ``write``, ``unlink``
3398 :raise except_orm: * if current ir.rules do not permit this operation.
3399 :return: None if the operation is allowed
3401 if uid == SUPERUSER_ID:
3404 if self.is_transient():
3405 # Only one single implicit access rule for transient models: owner only!
3406 # This is ok to hardcode because we assert that TransientModels always
3407 # have log_access enabled so that the create_uid column is always there.
3408 # And even with _inherits, these fields are always present in the local
3409 # table too, so no need for JOINs.
3410 cr.execute("""SELECT distinct create_uid
3412 WHERE id IN %%s""" % self._table, (tuple(ids),))
3413 uids = [x[0] for x in cr.fetchall()]
3414 if len(uids) != 1 or uids[0] != uid:
3415 raise except_orm(_('Access Denied'),
3416 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3418 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3420 where_clause = ' and ' + ' and '.join(where_clause)
3421 for sub_ids in cr.split_for_in_conditions(ids):
3422 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3423 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3424 [sub_ids] + where_params)
3425 returned_ids = [x['id'] for x in cr.dictfetchall()]
3426 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3428 def create_workflow(self, cr, uid, ids, context=None):
3429 """Create a workflow instance for each given record IDs."""
3430 from openerp import workflow
3432 workflow.trg_create(uid, self._name, res_id, cr)
3433 # self.invalidate_cache(cr, uid, context=context) ?
3436 def delete_workflow(self, cr, uid, ids, context=None):
3437 """Delete the workflow instances bound to the given record IDs."""
3438 from openerp import workflow
3440 workflow.trg_delete(uid, self._name, res_id, cr)
3441 self.invalidate_cache(cr, uid, context=context)
3444 def step_workflow(self, cr, uid, ids, context=None):
3445 """Reevaluate the workflow instances of the given record IDs."""
3446 from openerp import workflow
3448 workflow.trg_write(uid, self._name, res_id, cr)
3449 # self.invalidate_cache(cr, uid, context=context) ?
3452 def signal_workflow(self, cr, uid, ids, signal, context=None):
3453 """Send given workflow signal and return a dict mapping ids to workflow results"""
3454 from openerp import workflow
3457 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3458 # self.invalidate_cache(cr, uid, context=context) ?
3461 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3462 """ Rebind the workflow instance bound to the given 'old' record IDs to
3463 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3465 from openerp import workflow
3466 for old_id, new_id in old_new_ids:
3467 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3468 self.invalidate_cache(cr, uid, context=context)
3471 def unlink(self, cr, uid, ids, context=None):
3474 Deletes the records of the current set
3476 :raise AccessError: * if user has no unlink rights on the requested object
3477 * if user tries to bypass access rules for unlink on the requested object
3478 :raise UserError: if the record is default property for other records
3483 if isinstance(ids, (int, long)):
3486 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3488 # for recomputing new-style fields
3489 recs = self.browse(cr, uid, ids, context)
3490 recs.modified(self._fields)
3492 self._check_concurrency(cr, ids, context)
3494 self.check_access_rights(cr, uid, 'unlink')
3496 ir_property = self.pool.get('ir.property')
3498 # Check if the records are used as default properties.
3499 domain = [('res_id', '=', False),
3500 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3502 if ir_property.search(cr, uid, domain, context=context):
3503 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3505 # Delete the records' properties.
3506 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3507 ir_property.unlink(cr, uid, property_ids, context=context)
3509 self.delete_workflow(cr, uid, ids, context=context)
3511 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3512 pool_model_data = self.pool.get('ir.model.data')
3513 ir_values_obj = self.pool.get('ir.values')
3514 for sub_ids in cr.split_for_in_conditions(ids):
3515 cr.execute('delete from ' + self._table + ' ' \
3516 'where id IN %s', (sub_ids,))
3518 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3519 # as these are not connected with real database foreign keys, and would be dangling references.
3520 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3521 # to avoid possible side-effects during admin calls.
3522 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3523 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3524 # Step 2. Marching towards the real deletion of referenced records
3526 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3528 # For the same reason, removing the record relevant to ir_values
3529 ir_value_ids = ir_values_obj.search(cr, uid,
3530 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3533 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3535 # invalidate the *whole* cache, since the orm does not handle all
3536 # changes made in the database, like cascading delete!
3537 recs.invalidate_cache()
3539 for order, obj_name, store_ids, fields in result_store:
3540 if obj_name == self._name:
3541 effective_store_ids = set(store_ids) - set(ids)
3543 effective_store_ids = store_ids
3544 if effective_store_ids:
3545 obj = self.pool[obj_name]
3546 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3547 rids = map(lambda x: x[0], cr.fetchall())
3549 obj._store_set_values(cr, uid, rids, fields, context)
3551 # recompute new-style fields
3560 def write(self, vals):
3563 Updates all records in the current set with the provided values.
3565 :param dict vals: fields to update and the value to set on them e.g::
3567 {'foo': 1, 'bar': "Qux"}
3569 will set the field ``foo`` to ``1`` and the field ``bar`` to
3570 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3572 :raise AccessError: * if user has no write rights on the requested object
3573 * if user tries to bypass access rules for write on the requested object
3574 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3575 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3577 .. _openerp/models/relationals/format:
3579 .. note:: Relational fields use a special "commands" format to manipulate their values
3581 This format is a list of command triplets executed sequentially,
3582 possible command triplets are:
3584 ``(0, _, values: dict)``
3585 links to a new record created from the provided values
3586 ``(1, id, values: dict)``
3587 updates the already-linked record of id ``id`` with the
3590 unlinks and deletes the linked record of id ``id``
3592 unlinks the linked record of id ``id`` without deleting it
3594 links to an existing record of id ``id``
3596 unlinks all records in the relation, equivalent to using
3597 the command ``3`` on every linked record
3599 replaces the existing list of linked records by the provoded
3600 ones, equivalent to using ``5`` then ``4`` for each id in
3603 (in command triplets, ``_`` values are ignored and can be
3604 anything, generally ``0`` or ``False``)
3606 Any command can be used on :class:`~openerp.fields.Many2many`,
3607 only ``0``, ``1`` and ``2`` can be used on
3608 :class:`~openerp.fields.One2many`.
3613 self._check_concurrency(self._ids)
3614 self.check_access_rights('write')
3616 # No user-driven update of these columns
3617 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3618 vals.pop(field, None)
3620 # split up fields into old-style and pure new-style ones
3621 old_vals, new_vals, unknown = {}, {}, []
3622 for key, val in vals.iteritems():
3623 if key in self._columns:
3625 elif key in self._fields:
3631 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3633 # write old-style fields with (low-level) method _write
3635 self._write(old_vals)
3637 # put the values of pure new-style fields into cache, and inverse them
3640 record._cache.update(record._convert_to_cache(new_vals, update=True))
3641 for key in new_vals:
3642 self._fields[key].determine_inverse(self)
3646 def _write(self, cr, user, ids, vals, context=None):
3647 # low-level implementation of write()
3652 self.check_field_access_rights(cr, user, 'write', vals.keys())
3653 for field in vals.keys():
3655 if field in self._columns:
3656 fobj = self._columns[field]
3657 elif field in self._inherit_fields:
3658 fobj = self._inherit_fields[field][2]
3665 for group in groups:
3666 module = group.split(".")[0]
3667 grp = group.split(".")[1]
3668 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3669 (grp, module, 'res.groups', user))
3670 readonly = cr.fetchall()
3671 if readonly[0][0] >= 1:
3678 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3680 # for recomputing new-style fields
3681 recs = self.browse(cr, user, ids, context)
3682 modified_fields = list(vals)
3683 if self._log_access:
3684 modified_fields += ['write_date', 'write_uid']
3685 recs.modified(modified_fields)
3687 parents_changed = []
3688 parent_order = self._parent_order or self._order
3689 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3690 # The parent_left/right computation may take up to
3691 # 5 seconds. No need to recompute the values if the
3692 # parent is the same.
3693 # Note: to respect parent_order, nodes must be processed in
3694 # order, so ``parents_changed`` must be ordered properly.
3695 parent_val = vals[self._parent_name]
3697 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3698 (self._table, self._parent_name, self._parent_name, parent_order)
3699 cr.execute(query, (tuple(ids), parent_val))
3701 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3702 (self._table, self._parent_name, parent_order)
3703 cr.execute(query, (tuple(ids),))
3704 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3711 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3713 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3714 if field_column and field_column.deprecated:
3715 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3716 if field in self._columns:
3717 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3718 if (not totranslate) or not self._columns[field].translate:
3719 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3720 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3721 direct.append(field)
3723 upd_todo.append(field)
3725 updend.append(field)
3726 if field in self._columns \
3727 and hasattr(self._columns[field], 'selection') \
3729 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3731 if self._log_access:
3732 upd0.append('write_uid=%s')
3733 upd0.append("write_date=(now() at time zone 'UTC')")
3737 self.check_access_rule(cr, user, ids, 'write', context=context)
3738 for sub_ids in cr.split_for_in_conditions(ids):
3739 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3740 'where id IN %s', upd1 + [sub_ids])
3741 if cr.rowcount != len(sub_ids):
3742 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3747 if self._columns[f].translate:
3748 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3751 # Inserting value to DB
3752 context_wo_lang = dict(context, lang=None)
3753 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3754 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3756 # call the 'set' method of fields which are not classic_write
3757 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3759 # default element in context must be removed when call a one2many or many2many
3760 rel_context = context.copy()
3761 for c in context.items():
3762 if c[0].startswith('default_'):
3763 del rel_context[c[0]]
3765 for field in upd_todo:
3767 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3769 unknown_fields = updend[:]
3770 for table in self._inherits:
3771 col = self._inherits[table]
3773 for sub_ids in cr.split_for_in_conditions(ids):
3774 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3775 'where id IN %s', (sub_ids,))
3776 nids.extend([x[0] for x in cr.fetchall()])
3780 if self._inherit_fields[val][0] == table:
3782 unknown_fields.remove(val)
3784 self.pool[table].write(cr, user, nids, v, context)
3788 'No such field(s) in model %s: %s.',
3789 self._name, ', '.join(unknown_fields))
3791 # check Python constraints
3792 recs._validate_fields(vals)
3794 # TODO: use _order to set dest at the right position and not first node of parent
3795 # We can't defer parent_store computation because the stored function
3796 # fields that are computer may refer (directly or indirectly) to
3797 # parent_left/right (via a child_of domain)
3800 self.pool._init_parent[self._name] = True
3802 order = self._parent_order or self._order
3803 parent_val = vals[self._parent_name]
3805 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3807 clause, params = '%s IS NULL' % (self._parent_name,), ()
3809 for id in parents_changed:
3810 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3811 pleft, pright = cr.fetchone()
3812 distance = pright - pleft + 1
3814 # Positions of current siblings, to locate proper insertion point;
3815 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3816 # after each update, in case several nodes are sequentially inserted one
3817 # next to the other (i.e computed incrementally)
3818 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3819 parents = cr.fetchall()
3821 # Find Position of the element
3823 for (parent_pright, parent_id) in parents:
3826 position = parent_pright and parent_pright + 1 or 1
3828 # It's the first node of the parent
3833 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3834 position = cr.fetchone()[0] + 1
3836 if pleft < position <= pright:
3837 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3839 if pleft < position:
3840 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3841 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3842 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3844 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3845 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3846 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3847 recs.invalidate_cache(['parent_left', 'parent_right'])
3849 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3852 # for recomputing new-style fields
3853 recs.modified(modified_fields)
3856 for order, model_name, ids_to_update, fields_to_recompute in result:
3857 key = (model_name, tuple(fields_to_recompute))
3858 done.setdefault(key, {})
3859 # avoid to do several times the same computation
3861 for id in ids_to_update:
3862 if id not in done[key]:
3863 done[key][id] = True
3865 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3867 # recompute new-style fields
3868 if context.get('recompute', True):
3871 self.step_workflow(cr, user, ids, context=context)
3875 # TODO: Should set perm to user.xxx
3878 @api.returns('self', lambda value: value.id)
3879 def create(self, vals):
3880 """ create(vals) -> record
3882 Creates a new record for the model.
3884 The new record is initialized using the values from ``vals`` and
3885 if necessary those from :meth:`~.default_get`.
3888 values for the model's fields, as a dictionary::
3890 {'field_name': field_value, ...}
3892 see :meth:`~.write` for details
3893 :return: new record created
3894 :raise AccessError: * if user has no create rights on the requested object
3895 * if user tries to bypass access rules for create on the requested object
3896 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3897 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3899 self.check_access_rights('create')
3901 # add missing defaults, and drop fields that may not be set by user
3902 vals = self._add_missing_default_values(vals)
3903 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3904 vals.pop(field, None)
3906 # split up fields into old-style and pure new-style ones
3907 old_vals, new_vals, unknown = {}, {}, []
3908 for key, val in vals.iteritems():
3909 if key in self._all_columns:
3911 elif key in self._fields:
3917 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3919 # create record with old-style fields
3920 record = self.browse(self._create(old_vals))
3922 # put the values of pure new-style fields into cache, and inverse them
3923 record._cache.update(record._convert_to_cache(new_vals))
3924 for key in new_vals:
3925 self._fields[key].determine_inverse(record)
3929 def _create(self, cr, user, vals, context=None):
3930 # low-level implementation of create()
3934 if self.is_transient():
3935 self._transient_vacuum(cr, user)
3938 for v in self._inherits:
3939 if self._inherits[v] not in vals:
3942 tocreate[v] = {'id': vals[self._inherits[v]]}
3945 # list of column assignments defined as tuples like:
3946 # (column_name, format_string, column_value)
3947 # (column_name, sql_formula)
3948 # Those tuples will be used by the string formatting for the INSERT
3950 ('id', "nextval('%s')" % self._sequence),
3955 for v in vals.keys():
3956 if v in self._inherit_fields and v not in self._columns:
3957 (table, col, col_detail, original_parent) = self._inherit_fields[v]
3958 tocreate[table][v] = vals[v]
3961 if (v not in self._inherit_fields) and (v not in self._columns):
3963 unknown_fields.append(v)
3966 'No such field(s) in model %s: %s.',
3967 self._name, ', '.join(unknown_fields))
3969 for table in tocreate:
3970 if self._inherits[table] in vals:
3971 del vals[self._inherits[table]]
3973 record_id = tocreate[table].pop('id', None)
3975 if record_id is None or not record_id:
3976 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
3978 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
3980 updates.append((self._inherits[table], '%s', record_id))
3982 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
3983 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
3985 for bool_field in bool_fields:
3986 if bool_field not in vals:
3987 vals[bool_field] = False
3989 for field in vals.keys():
3991 if field in self._columns:
3992 fobj = self._columns[field]
3994 fobj = self._inherit_fields[field][2]
4000 for group in groups:
4001 module = group.split(".")[0]
4002 grp = group.split(".")[1]
4003 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4004 (grp, module, 'res.groups', user))
4005 readonly = cr.fetchall()
4006 if readonly[0][0] >= 1:
4009 elif readonly[0][0] == 0:
4017 current_field = self._columns[field]
4018 if current_field._classic_write:
4019 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4021 #for the function fields that receive a value, we set them directly in the database
4022 #(they may be required), but we also need to trigger the _fct_inv()
4023 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4024 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4025 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4026 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4027 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4028 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4029 #after the release but, definitively, the behavior shouldn't be different for related and function
4031 upd_todo.append(field)
4033 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4034 #related. See the above TODO comment for further explanations.
4035 if not isinstance(current_field, fields.related):
4036 upd_todo.append(field)
4037 if field in self._columns \
4038 and hasattr(current_field, 'selection') \
4040 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4041 if self._log_access:
4042 updates.append(('create_uid', '%s', user))
4043 updates.append(('write_uid', '%s', user))
4044 updates.append(('create_date', "(now() at time zone 'UTC')"))
4045 updates.append(('write_date', "(now() at time zone 'UTC')"))
4047 # the list of tuples used in this formatting corresponds to
4048 # tuple(field_name, format, value)
4049 # In some case, for example (id, create_date, write_date) we does not
4050 # need to read the third value of the tuple, because the real value is
4051 # encoded in the second value (the format).
4053 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4055 ', '.join('"%s"' % u[0] for u in updates),
4056 ', '.join(u[1] for u in updates)
4058 tuple([u[2] for u in updates if len(u) > 2])
4061 id_new, = cr.fetchone()
4062 recs = self.browse(cr, user, id_new, context)
4063 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4065 if self._parent_store and not context.get('defer_parent_store_computation'):
4067 self.pool._init_parent[self._name] = True
4069 parent = vals.get(self._parent_name, False)
4071 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4073 result_p = cr.fetchall()
4074 for (pleft,) in result_p:
4079 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4080 pleft_old = cr.fetchone()[0]
4083 cr.execute('select max(parent_right) from '+self._table)
4084 pleft = cr.fetchone()[0] or 0
4085 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4086 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4087 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4088 recs.invalidate_cache(['parent_left', 'parent_right'])
4090 # default element in context must be remove when call a one2many or many2many
4091 rel_context = context.copy()
4092 for c in context.items():
4093 if c[0].startswith('default_'):
4094 del rel_context[c[0]]
4097 for field in upd_todo:
4098 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4100 # check Python constraints
4101 recs._validate_fields(vals)
4103 # invalidate and mark new-style fields to recompute
4104 modified_fields = list(vals)
4105 if self._log_access:
4106 modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
4107 recs.modified(modified_fields)
4109 if context.get('recompute', True):
4110 result += self._store_get_values(cr, user, [id_new],
4111 list(set(vals.keys() + self._inherits.values())),
4115 for order, model_name, ids, fields2 in result:
4116 if not (model_name, ids, fields2) in done:
4117 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4118 done.append((model_name, ids, fields2))
4119 # recompute new-style fields
4122 if self._log_create and context.get('recompute', True):
4123 message = self._description + \
4125 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4126 "' " + _("created.")
4127 self.log(cr, user, id_new, message, True, context=context)
4129 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4130 self.create_workflow(cr, user, [id_new], context=context)
4133 def _store_get_values(self, cr, uid, ids, fields, context):
4134 """Returns an ordered list of fields.function to call due to
4135 an update operation on ``fields`` of records with ``ids``,
4136 obtained by calling the 'store' triggers of these fields,
4137 as setup by their 'store' attribute.
4139 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4141 if fields is None: fields = []
4142 stored_functions = self.pool._store_function.get(self._name, [])
4144 # use indexed names for the details of the stored_functions:
4145 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4147 # only keep store triggers that should be triggered for the ``fields``
4149 triggers_to_compute = (
4150 f for f in stored_functions
4151 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4155 target_id_results = {}
4156 for store_trigger in triggers_to_compute:
4157 target_func_id_ = id(store_trigger[target_ids_func_])
4158 if target_func_id_ not in target_id_results:
4159 # use admin user for accessing objects having rules defined on store fields
4160 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4161 target_ids = target_id_results[target_func_id_]
4163 # the compound key must consider the priority and model name
4164 key = (store_trigger[priority_], store_trigger[model_name_])
4165 for target_id in target_ids:
4166 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4168 # Here to_compute_map looks like:
4169 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4170 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4171 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4174 # Now we need to generate the batch function calls list
4176 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4178 for ((priority,model), id_map) in to_compute_map.iteritems():
4179 trigger_ids_maps = {}
4180 # function_ids_maps =
4181 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4182 for target_id, triggers in id_map.iteritems():
4183 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4184 for triggers, target_ids in trigger_ids_maps.iteritems():
4185 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4186 [t[func_field_to_compute_] for t in triggers]))
4189 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4192 def _store_set_values(self, cr, uid, ids, fields, context):
4193 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4194 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4199 if self._log_access:
4200 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4204 field_dict.setdefault(r[0], [])
4205 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4206 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4207 for i in self.pool._store_function.get(self._name, []):
4209 up_write_date = write_date + datetime.timedelta(hours=i[5])
4210 if datetime.datetime.now() < up_write_date:
4212 field_dict[r[0]].append(i[1])
4218 if self._columns[f]._multi not in keys:
4219 keys.append(self._columns[f]._multi)
4220 todo.setdefault(self._columns[f]._multi, [])
4221 todo[self._columns[f]._multi].append(f)
4225 # use admin user for accessing objects having rules defined on store fields
4226 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4227 for id, value in result.items():
4229 for f in value.keys():
4230 if f in field_dict[id]:
4237 if self._columns[v]._type == 'many2one':
4239 value[v] = value[v][0]
4242 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4243 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4246 cr.execute('update "' + self._table + '" set ' + \
4247 ','.join(upd0) + ' where id = %s', upd1)
4251 # use admin user for accessing objects having rules defined on store fields
4252 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4253 for r in result.keys():
4255 if r in field_dict.keys():
4256 if f in field_dict[r]:
4258 for id, value in result.items():
4259 if self._columns[f]._type == 'many2one':
4264 cr.execute('update "' + self._table + '" set ' + \
4265 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4267 # invalidate and mark new-style fields to recompute
4268 self.browse(cr, uid, ids, context).modified(fields)
4272 # TODO: ameliorer avec NULL
4273 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4274 """Computes the WHERE clause needed to implement an OpenERP domain.
4275 :param domain: the domain to compute
4277 :param active_test: whether the default filtering of records with ``active``
4278 field set to ``False`` should be applied.
4279 :return: the query expressing the given domain as provided in domain
4280 :rtype: osv.query.Query
4285 # if the object has a field named 'active', filter out all inactive
4286 # records unless they were explicitely asked for
4287 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4289 # the item[0] trick below works for domain items and '&'/'|'/'!'
4291 if not any(item[0] == 'active' for item in domain):
4292 domain.insert(0, ('active', '=', 1))
4294 domain = [('active', '=', 1)]
4297 e = expression.expression(cr, user, domain, self, context)
4298 tables = e.get_tables()
4299 where_clause, where_params = e.to_sql()
4300 where_clause = where_clause and [where_clause] or []
4302 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4304 return Query(tables, where_clause, where_params)
4306 def _check_qorder(self, word):
4307 if not regex_order.match(word):
4308 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4311 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4312 """Add what's missing in ``query`` to implement all appropriate ir.rules
4313 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4315 :param query: the current query object
4317 if uid == SUPERUSER_ID:
4320 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4321 """ :param parent_model: name of the parent model, if the added
4322 clause comes from a parent model
4326 # as inherited rules are being applied, we need to add the missing JOIN
4327 # to reach the parent table (if it was not JOINed yet in the query)
4328 parent_alias = self._inherits_join_add(self, parent_model, query)
4329 # inherited rules are applied on the external table -> need to get the alias and replace
4330 parent_table = self.pool[parent_model]._table
4331 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4332 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4334 for table in added_tables:
4335 # table is just a table name -> switch to the full alias
4336 if table == '"%s"' % parent_table:
4337 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4338 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4340 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4341 added_tables = new_tables
4342 query.where_clause += added_clause
4343 query.where_clause_params += added_params
4344 for table in added_tables:
4345 if table not in query.tables:
4346 query.tables.append(table)
4350 # apply main rules on the object
4351 rule_obj = self.pool.get('ir.rule')
4352 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4353 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4355 # apply ir.rules from the parents (through _inherits)
4356 for inherited_model in self._inherits:
4357 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4358 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4359 parent_model=inherited_model)
4361 def _generate_m2o_order_by(self, order_field, query):
4363 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4364 either native m2o fields or function/related fields that are stored, including
4365 intermediate JOINs for inheritance if required.
4367 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4369 if order_field not in self._columns and order_field in self._inherit_fields:
4370 # also add missing joins for reaching the table containing the m2o field
4371 qualified_field = self._inherits_join_calc(order_field, query)
4372 order_field_column = self._inherit_fields[order_field][2]
4374 qualified_field = '"%s"."%s"' % (self._table, order_field)
4375 order_field_column = self._columns[order_field]
4377 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4378 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4379 _logger.debug("Many2one function/related fields must be stored " \
4380 "to be used as ordering fields! Ignoring sorting for %s.%s",
4381 self._name, order_field)
4384 # figure out the applicable order_by for the m2o
4385 dest_model = self.pool[order_field_column._obj]
4386 m2o_order = dest_model._order
4387 if not regex_order.match(m2o_order):
4388 # _order is complex, can't use it here, so we default to _rec_name
4389 m2o_order = dest_model._rec_name
4391 # extract the field names, to be able to qualify them and add desc/asc
4393 for order_part in m2o_order.split(","):
4394 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4395 m2o_order = m2o_order_list
4397 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4398 # as we don't want to exclude results that have NULL values for the m2o
4399 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4400 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4401 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4402 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4404 def _generate_order_by(self, order_spec, query):
4406 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4407 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4409 :raise" except_orm in case order_spec is malformed
4411 order_by_clause = ''
4412 order_spec = order_spec or self._order
4414 order_by_elements = []
4415 self._check_qorder(order_spec)
4416 for order_part in order_spec.split(','):
4417 order_split = order_part.strip().split(' ')
4418 order_field = order_split[0].strip()
4419 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4422 if order_field == 'id':
4423 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4424 elif order_field in self._columns:
4425 order_column = self._columns[order_field]
4426 if order_column._classic_read:
4427 inner_clause = '"%s"."%s"' % (self._table, order_field)
4428 elif order_column._type == 'many2one':
4429 inner_clause = self._generate_m2o_order_by(order_field, query)
4431 continue # ignore non-readable or "non-joinable" fields
4432 elif order_field in self._inherit_fields:
4433 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4434 order_column = parent_obj._columns[order_field]
4435 if order_column._classic_read:
4436 inner_clause = self._inherits_join_calc(order_field, query)
4437 elif order_column._type == 'many2one':
4438 inner_clause = self._generate_m2o_order_by(order_field, query)
4440 continue # ignore non-readable or "non-joinable" fields
4442 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4443 if order_column and order_column._type == 'boolean':
4444 inner_clause = "COALESCE(%s, false)" % inner_clause
4446 if isinstance(inner_clause, list):
4447 for clause in inner_clause:
4448 order_by_elements.append("%s %s" % (clause, order_direction))
4450 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4451 if order_by_elements:
4452 order_by_clause = ",".join(order_by_elements)
4454 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4456 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4458 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4459 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4460 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4461 This is ok at the security level because this method is private and not callable through XML-RPC.
4463 :param access_rights_uid: optional user ID to use when checking access rights
4464 (not for ir.rules, this is only for ir.model.access)
4468 self.check_access_rights(cr, access_rights_uid or user, 'read')
4470 # For transient models, restrict acces to the current user, except for the super-user
4471 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4472 args = expression.AND(([('create_uid', '=', user)], args or []))
4474 query = self._where_calc(cr, user, args, context=context)
4475 self._apply_ir_rules(cr, user, query, 'read', context=context)
4476 order_by = self._generate_order_by(order, query)
4477 from_clause, where_clause, where_clause_params = query.get_sql()
4479 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4482 # Ignore order, limit and offset when just counting, they don't make sense and could
4484 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4485 cr.execute(query_str, where_clause_params)
4489 limit_str = limit and ' limit %d' % limit or ''
4490 offset_str = offset and ' offset %d' % offset or ''
4491 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4492 cr.execute(query_str, where_clause_params)
4495 # TDE note: with auto_join, we could have several lines about the same result
4496 # i.e. a lead with several unread messages; we uniquify the result using
4497 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4498 def _uniquify_list(seq):
4500 return [x for x in seq if x not in seen and not seen.add(x)]
4502 return _uniquify_list([x[0] for x in res])
4504 # returns the different values ever entered for one field
4505 # this is used, for example, in the client when the user hits enter on
4507 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4510 if field in self._inherit_fields:
4511 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4513 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4515 def copy_data(self, cr, uid, id, default=None, context=None):
4517 Copy given record's data with all its fields values
4519 :param cr: database cursor
4520 :param uid: current user id
4521 :param id: id of the record to copy
4522 :param default: field values to override in the original values of the copied record
4523 :type default: dictionary
4524 :param context: context arguments, like lang, time zone
4525 :type context: dictionary
4526 :return: dictionary containing all the field values
4532 # avoid recursion through already copied records in case of circular relationship
4533 seen_map = context.setdefault('__copy_data_seen', {})
4534 if id in seen_map.setdefault(self._name, []):
4536 seen_map[self._name].append(id)
4540 if 'state' not in default:
4541 if 'state' in self._defaults:
4542 if callable(self._defaults['state']):
4543 default['state'] = self._defaults['state'](self, cr, uid, context)
4545 default['state'] = self._defaults['state']
4547 # build a black list of fields that should not be copied
4548 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4549 def blacklist_given_fields(obj):
4550 # blacklist the fields that are given by inheritance
4551 for other, field_to_other in obj._inherits.items():
4552 blacklist.add(field_to_other)
4553 if field_to_other in default:
4554 # all the fields of 'other' are given by the record: default[field_to_other],
4555 # except the ones redefined in self
4556 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4558 blacklist_given_fields(self.pool[other])
4559 # blacklist deprecated fields
4560 for name, field in obj._columns.items():
4561 if field.deprecated:
4564 blacklist_given_fields(self)
4567 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4570 if f not in blacklist)
4572 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4576 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4579 for f, colinfo in fields_to_copy.iteritems():
4580 field = colinfo.column
4581 if field._type == 'many2one':
4582 res[f] = data[f] and data[f][0]
4583 elif field._type == 'one2many':
4584 other = self.pool[field._obj]
4585 # duplicate following the order of the ids because we'll rely on
4586 # it later for copying translations in copy_translation()!
4587 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4588 # the lines are duplicated using the wrong (old) parent, but then
4589 # are reassigned to the correct one thanks to the (0, 0, ...)
4590 res[f] = [(0, 0, line) for line in lines if line]
4591 elif field._type == 'many2many':
4592 res[f] = [(6, 0, data[f])]
4598 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4602 # avoid recursion through already copied records in case of circular relationship
4603 seen_map = context.setdefault('__copy_translations_seen',{})
4604 if old_id in seen_map.setdefault(self._name,[]):
4606 seen_map[self._name].append(old_id)
4608 trans_obj = self.pool.get('ir.translation')
4609 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4610 fields = self.fields_get(cr, uid, context=context)
4612 for field_name, field_def in fields.items():
4613 # removing the lang to compare untranslated values
4614 context_wo_lang = dict(context, lang=None)
4615 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4616 # we must recursively copy the translations for o2o and o2m
4617 if field_def['type'] == 'one2many':
4618 target_obj = self.pool[field_def['relation']]
4619 # here we rely on the order of the ids to match the translations
4620 # as foreseen in copy_data()
4621 old_children = sorted(r.id for r in old_record[field_name])
4622 new_children = sorted(r.id for r in new_record[field_name])
4623 for (old_child, new_child) in zip(old_children, new_children):
4624 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4625 # and for translatable fields we keep them for copy
4626 elif field_def.get('translate'):
4627 if field_name in self._columns:
4628 trans_name = self._name + "," + field_name
4631 elif field_name in self._inherit_fields:
4632 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4633 # get the id of the parent record to set the translation
4634 inherit_field_name = self._inherit_fields[field_name][1]
4635 target_id = new_record[inherit_field_name].id
4636 source_id = old_record[inherit_field_name].id
4640 trans_ids = trans_obj.search(cr, uid, [
4641 ('name', '=', trans_name),
4642 ('res_id', '=', source_id)
4644 user_lang = context.get('lang')
4645 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4647 # remove source to avoid triggering _set_src
4648 del record['source']
4649 record.update({'res_id': target_id})
4650 if user_lang and user_lang == record['lang']:
4651 # 'source' to force the call to _set_src
4652 # 'value' needed if value is changed in copy(), want to see the new_value
4653 record['source'] = old_record[field_name]
4654 record['value'] = new_record[field_name]
4655 trans_obj.create(cr, uid, record, context=context)
4657 @api.returns('self', lambda value: value.id)
4658 def copy(self, cr, uid, id, default=None, context=None):
4659 """ copy(default=None)
4661 Duplicate record with given id updating it with default values
4663 :param dict default: dictionary of field values to override in the
4664 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4665 :returns: new record
4670 context = context.copy()
4671 data = self.copy_data(cr, uid, id, default, context)
4672 new_id = self.create(cr, uid, data, context)
4673 self.copy_translations(cr, uid, id, new_id, context)
4677 @api.returns('self')
4679 """ exists() -> records
4681 Returns the subset of records in `self` that exist, and marks deleted
4682 records as such in cache. It can be used as a test on records::
4687 By convention, new records are returned as existing.
4689 ids = filter(None, self._ids) # ids to check in database
4692 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4693 self._cr.execute(query, (ids,))
4694 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4695 [id for id in self._ids if not id]) # new ids
4696 existing = self.browse(ids)
4697 if len(existing) < len(self):
4698 # mark missing records in cache with a failed value
4699 exc = MissingError(_("Record does not exist or has been deleted."))
4700 (self - existing)._cache.update(FailedValue(exc))
4703 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4704 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4706 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4707 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4708 return self._check_recursion(cr, uid, ids, context, parent)
4710 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4712 Verifies that there is no loop in a hierarchical structure of records,
4713 by following the parent relationship using the **parent** field until a loop
4714 is detected or until a top-level record is found.
4716 :param cr: database cursor
4717 :param uid: current user id
4718 :param ids: list of ids of records to check
4719 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4720 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4723 parent = self._parent_name
4725 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4726 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4729 while current_id is not None:
4730 cr.execute(query, (current_id,))
4731 result = cr.fetchone()
4732 current_id = result[0] if result else None
4733 if current_id == id:
4737 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4739 Verifies that there is no loop in a hierarchical structure of records,
4740 by following the parent relationship using the **parent** field until a loop
4741 is detected or until a top-level record is found.
4743 :param cr: database cursor
4744 :param uid: current user id
4745 :param ids: list of ids of records to check
4746 :param field_name: field to check
4747 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4750 field = self._all_columns.get(field_name)
4751 field = field.column if field else None
4752 if not field or field._type != 'many2many' or field._obj != self._name:
4753 # field must be a many2many on itself
4754 raise ValueError('invalid field_name: %r' % (field_name,))
4756 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4760 for i in range(0, len(ids_parent), cr.IN_MAX):
4762 sub_ids_parent = ids_parent[i:j]
4763 cr.execute(query, (tuple(sub_ids_parent),))
4764 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4765 ids_parent = ids_parent2
4766 for i in ids_parent:
4771 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4772 """Retrieve the External ID(s) of any database record.
4774 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4776 :return: map of ids to the list of their fully qualified External IDs
4777 in the form ``module.key``, or an empty list when there's no External
4778 ID for a record, e.g.::
4780 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4783 ir_model_data = self.pool.get('ir.model.data')
4784 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4785 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4788 # can't use dict.fromkeys() as the list would be shared!
4790 for record in data_results:
4791 result[record['res_id']].append('%(module)s.%(name)s' % record)
4794 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4795 """Retrieve the External ID of any database record, if there
4796 is one. This method works as a possible implementation
4797 for a function field, to be able to add it to any
4798 model object easily, referencing it as ``Model.get_external_id``.
4800 When multiple External IDs exist for a record, only one
4801 of them is returned (randomly).
4803 :return: map of ids to their fully qualified XML ID,
4804 defaulting to an empty string when there's none
4805 (to be usable as a function field),
4808 { 'id': 'module.ext_id',
4811 results = self._get_xml_ids(cr, uid, ids)
4812 for k, v in results.iteritems():
4819 # backwards compatibility
4820 get_xml_id = get_external_id
4821 _get_xml_ids = _get_external_ids
4823 def print_report(self, cr, uid, ids, name, data, context=None):
4825 Render the report `name` for the given IDs. The report must be defined
4826 for this model, not another.
4828 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4829 assert self._name == report.table
4830 return report.create(cr, uid, ids, data, context)
4834 def is_transient(cls):
4835 """ Return whether the model is transient.
4837 See :class:`TransientModel`.
4840 return cls._transient
4842 def _transient_clean_rows_older_than(self, cr, seconds):
4843 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4844 # Never delete rows used in last 5 minutes
4845 seconds = max(seconds, 300)
4846 query = ("SELECT id FROM " + self._table + " WHERE"
4847 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4848 " < ((now() at time zone 'UTC') - interval %s)")
4849 cr.execute(query, ("%s seconds" % seconds,))
4850 ids = [x[0] for x in cr.fetchall()]
4851 self.unlink(cr, SUPERUSER_ID, ids)
4853 def _transient_clean_old_rows(self, cr, max_count):
4854 # Check how many rows we have in the table
4855 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4857 if res[0][0] <= max_count:
4858 return # max not reached, nothing to do
4859 self._transient_clean_rows_older_than(cr, 300)
4861 def _transient_vacuum(self, cr, uid, force=False):
4862 """Clean the transient records.
4864 This unlinks old records from the transient model tables whenever the
4865 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4866 Actual cleaning will happen only once every "_transient_check_time" calls.
4867 This means this method can be called frequently called (e.g. whenever
4868 a new record is created).
4869 Example with both max_hours and max_count active:
4870 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4871 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4872 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4873 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4874 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4875 would immediately cause the maximum to be reached again.
4876 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4878 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4879 _transient_check_time = 20 # arbitrary limit on vacuum executions
4880 self._transient_check_count += 1
4881 if not force and (self._transient_check_count < _transient_check_time):
4882 return True # no vacuum cleaning this time
4883 self._transient_check_count = 0
4885 # Age-based expiration
4886 if self._transient_max_hours:
4887 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4889 # Count-based expiration
4890 if self._transient_max_count:
4891 self._transient_clean_old_rows(cr, self._transient_max_count)
4895 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4896 """ Serializes one2many and many2many commands into record dictionaries
4897 (as if all the records came from the database via a read()). This
4898 method is aimed at onchange methods on one2many and many2many fields.
4900 Because commands might be creation commands, not all record dicts
4901 will contain an ``id`` field. Commands matching an existing record
4902 will have an ``id``.
4904 :param field_name: name of the one2many or many2many field matching the commands
4905 :type field_name: str
4906 :param commands: one2many or many2many commands to execute on ``field_name``
4907 :type commands: list((int|False, int|False, dict|False))
4908 :param fields: list of fields to read from the database, when applicable
4909 :type fields: list(str)
4910 :returns: records in a shape similar to that returned by ``read()``
4911 (except records may be missing the ``id`` field if they don't exist in db)
4914 result = [] # result (list of dict)
4915 record_ids = [] # ids of records to read
4916 updates = {} # {id: dict} of updates on particular records
4918 for command in commands or []:
4919 if not isinstance(command, (list, tuple)):
4920 record_ids.append(command)
4921 elif command[0] == 0:
4922 result.append(command[2])
4923 elif command[0] == 1:
4924 record_ids.append(command[1])
4925 updates.setdefault(command[1], {}).update(command[2])
4926 elif command[0] in (2, 3):
4927 record_ids = [id for id in record_ids if id != command[1]]
4928 elif command[0] == 4:
4929 record_ids.append(command[1])
4930 elif command[0] == 5:
4931 result, record_ids = [], []
4932 elif command[0] == 6:
4933 result, record_ids = [], list(command[2])
4935 # read the records and apply the updates
4936 other_model = self.pool[self._all_columns[field_name].column._obj]
4937 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4938 record.update(updates.get(record['id'], {}))
4939 result.append(record)
4943 # for backward compatibility
4944 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
4946 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
4948 Performs a ``search()`` followed by a ``read()``.
4950 :param cr: database cursor
4951 :param user: current user id
4952 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
4953 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
4954 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
4955 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
4956 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
4957 :param context: context arguments.
4958 :return: List of dictionaries containing the asked fields.
4959 :rtype: List of dictionaries.
4962 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
4966 if fields and fields == ['id']:
4967 # shortcut read if we only want the ids
4968 return [{'id': id} for id in record_ids]
4970 # read() ignores active_test, but it would forward it to any downstream search call
4971 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
4972 # was presumably only meant for the main search().
4973 # TODO: Move this to read() directly?
4974 read_ctx = dict(context or {})
4975 read_ctx.pop('active_test', None)
4977 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
4978 if len(result) <= 1:
4982 index = dict((r['id'], r) for r in result)
4983 return [index[x] for x in record_ids if x in index]
4985 def _register_hook(self, cr):
4986 """ stuff to do right after the registry is built """
4990 def _patch_method(cls, name, method):
4991 """ Monkey-patch a method for all instances of this model. This replaces
4992 the method called `name` by `method` in the given class.
4993 The original method is then accessible via ``method.origin``, and it
4994 can be restored with :meth:`~._revert_method`.
4999 def do_write(self, values):
5000 # do stuff, and call the original method
5001 return do_write.origin(self, values)
5003 # patch method write of model
5004 model._patch_method('write', do_write)
5006 # this will call do_write
5007 records = model.search([...])
5010 # restore the original method
5011 model._revert_method('write')
5013 origin = getattr(cls, name)
5014 method.origin = origin
5015 # propagate decorators from origin to method, and apply api decorator
5016 wrapped = api.guess(api.propagate(origin, method))
5017 wrapped.origin = origin
5018 setattr(cls, name, wrapped)
5021 def _revert_method(cls, name):
5022 """ Revert the original method called `name` in the given class.
5023 See :meth:`~._patch_method`.
5025 method = getattr(cls, name)
5026 setattr(cls, name, method.origin)
5031 # An instance represents an ordered collection of records in a given
5032 # execution environment. The instance object refers to the environment, and
5033 # the records themselves are represented by their cache dictionary. The 'id'
5034 # of each record is found in its corresponding cache dictionary.
5036 # This design has the following advantages:
5037 # - cache access is direct and thus fast;
5038 # - one can consider records without an 'id' (see new records);
5039 # - the global cache is only an index to "resolve" a record 'id'.
5043 def _browse(cls, env, ids):
5044 """ Create an instance attached to `env`; `ids` is a tuple of record
5047 records = object.__new__(cls)
5050 env.prefetch[cls._name].update(ids)
5054 def browse(self, cr, uid, arg=None, context=None):
5055 ids = _normalize_ids(arg)
5056 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5057 return self._browse(Environment(cr, uid, context or {}), ids)
5060 def browse(self, arg=None):
5061 """ browse([ids]) -> records
5063 Returns a recordset for the ids provided as parameter in the current
5066 Can take no ids, a single id or a sequence of ids.
5068 ids = _normalize_ids(arg)
5069 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5070 return self._browse(self.env, ids)
5073 # Internal properties, for manipulating the instance's implementation
5078 """ List of actual record ids in this recordset (ignores placeholder
5079 ids for records to create)
5081 return filter(None, list(self._ids))
5083 # backward-compatibility with former browse records
5084 _cr = property(lambda self: self.env.cr)
5085 _uid = property(lambda self: self.env.uid)
5086 _context = property(lambda self: self.env.context)
5089 # Conversion methods
5092 def ensure_one(self):
5093 """ Verifies that the current recorset holds a single record. Raises
5094 an exception otherwise.
5098 raise except_orm("ValueError", "Expected singleton: %s" % self)
5100 def with_env(self, env):
5101 """ Returns a new version of this recordset attached to the provided
5104 :type env: :class:`~openerp.api.Environment`
5106 return self._browse(env, self._ids)
5108 def sudo(self, user=SUPERUSER_ID):
5109 """ sudo([user=SUPERUSER])
5111 Returns a new version of this recordset attached to the provided
5114 return self.with_env(self.env(user=user))
5116 def with_context(self, *args, **kwargs):
5117 """ with_context([context][, **overrides]) -> records
5119 Returns a new version of this recordset attached to an extended
5122 The extended context is either the provided ``context`` in which
5123 ``overrides`` are merged or the *current* context in which
5124 ``overrides`` are merged e.g.::
5126 # current context is {'key1': True}
5127 r2 = records.with_context({}, key2=True)
5128 # -> r2._context is {'key2': True}
5129 r2 = records.with_context(key2=True)
5130 # -> r2._context is {'key1': True, 'key2': True}
5132 context = dict(args[0] if args else self._context, **kwargs)
5133 return self.with_env(self.env(context=context))
5135 def _convert_to_cache(self, values, update=False, validate=True):
5136 """ Convert the `values` dictionary into cached values.
5138 :param update: whether the conversion is made for updating `self`;
5139 this is necessary for interpreting the commands of *2many fields
5140 :param validate: whether values must be checked
5142 fields = self._fields
5143 target = self if update else self.browse()
5145 name: fields[name].convert_to_cache(value, target, validate=validate)
5146 for name, value in values.iteritems()
5150 def _convert_to_write(self, values):
5151 """ Convert the `values` dictionary into the format of :meth:`write`. """
5152 fields = self._fields
5154 for name, value in values.iteritems():
5156 value = fields[name].convert_to_write(value)
5157 if not isinstance(value, NewId):
5158 result[name] = value
5162 # Record traversal and update
5165 def _mapped_func(self, func):
5166 """ Apply function `func` on all records in `self`, and return the
5167 result as a list or a recordset (if `func` return recordsets).
5169 vals = [func(rec) for rec in self]
5170 val0 = vals[0] if vals else func(self)
5171 if isinstance(val0, BaseModel):
5172 return reduce(operator.or_, vals, val0)
5175 def mapped(self, func):
5176 """ Apply `func` on all records in `self`, and return the result as a
5177 list or a recordset (if `func` return recordsets). In the latter
5178 case, the order of the returned recordset is arbritrary.
5180 :param func: a function or a dot-separated sequence of field names
5182 if isinstance(func, basestring):
5184 for name in func.split('.'):
5185 recs = recs._mapped_func(operator.itemgetter(name))
5188 return self._mapped_func(func)
5190 def _mapped_cache(self, name_seq):
5191 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5192 field names, and only cached values are used.
5195 for name in name_seq.split('.'):
5196 field = recs._fields[name]
5197 null = field.null(self.env)
5198 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5201 def filtered(self, func):
5202 """ Select the records in `self` such that `func(rec)` is true, and
5203 return them as a recordset.
5205 :param func: a function or a dot-separated sequence of field names
5207 if isinstance(func, basestring):
5209 func = lambda rec: filter(None, rec.mapped(name))
5210 return self.browse([rec.id for rec in self if func(rec)])
5212 def sorted(self, key=None):
5213 """ Return the recordset `self` ordered by `key` """
5215 return self.search([('id', 'in', self.ids)])
5217 return self.browse(map(int, sorted(self, key=key)))
5219 def update(self, values):
5220 """ Update record `self[0]` with `values`. """
5221 for name, value in values.iteritems():
5225 # New records - represent records that do not exist in the database yet;
5226 # they are used to compute default values and perform onchanges.
5230 def new(self, values={}):
5231 """ new([values]) -> record
5233 Return a new record instance attached to the current environment and
5234 initialized with the provided ``value``. The record is *not* created
5235 in database, it only exists in memory.
5237 record = self.browse([NewId()])
5238 record._cache.update(record._convert_to_cache(values, update=True))
5240 if record.env.in_onchange:
5241 # The cache update does not set inverse fields, so do it manually.
5242 # This is useful for computing a function field on secondary
5243 # records, if that field depends on the main record.
5245 field = self._fields.get(name)
5247 for invf in field.inverse_fields:
5248 invf._update(record[name], record)
5253 # Dirty flag, to mark records modified (in draft mode)
5258 """ Return whether any record in `self` is dirty. """
5259 dirty = self.env.dirty
5260 return any(record in dirty for record in self)
5263 def _dirty(self, value):
5264 """ Mark the records in `self` as dirty. """
5266 map(self.env.dirty.add, self)
5268 map(self.env.dirty.discard, self)
5274 def __nonzero__(self):
5275 """ Test whether `self` is nonempty. """
5276 return bool(getattr(self, '_ids', True))
5279 """ Return the size of `self`. """
5280 return len(self._ids)
5283 """ Return an iterator over `self`. """
5284 for id in self._ids:
5285 yield self._browse(self.env, (id,))
5287 def __contains__(self, item):
5288 """ Test whether `item` is a subset of `self` or a field name. """
5289 if isinstance(item, BaseModel):
5290 if self._name == item._name:
5291 return set(item._ids) <= set(self._ids)
5292 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5293 if isinstance(item, basestring):
5294 return item in self._fields
5295 return item in self.ids
5297 def __add__(self, other):
5298 """ Return the concatenation of two recordsets. """
5299 if not isinstance(other, BaseModel) or self._name != other._name:
5300 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5301 return self.browse(self._ids + other._ids)
5303 def __sub__(self, other):
5304 """ Return the recordset of all the records in `self` that are not in `other`. """
5305 if not isinstance(other, BaseModel) or self._name != other._name:
5306 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5307 other_ids = set(other._ids)
5308 return self.browse([id for id in self._ids if id not in other_ids])
5310 def __and__(self, other):
5311 """ Return the intersection of two recordsets.
5312 Note that recordset order is not preserved.
5314 if not isinstance(other, BaseModel) or self._name != other._name:
5315 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5316 return self.browse(set(self._ids) & set(other._ids))
5318 def __or__(self, other):
5319 """ Return the union of two recordsets.
5320 Note that recordset order is not preserved.
5322 if not isinstance(other, BaseModel) or self._name != other._name:
5323 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5324 return self.browse(set(self._ids) | set(other._ids))
5326 def __eq__(self, other):
5327 """ Test whether two recordsets are equivalent (up to reordering). """
5328 if not isinstance(other, BaseModel):
5330 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5332 return self._name == other._name and set(self._ids) == set(other._ids)
5334 def __ne__(self, other):
5335 return not self == other
5337 def __lt__(self, other):
5338 if not isinstance(other, BaseModel) or self._name != other._name:
5339 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5340 return set(self._ids) < set(other._ids)
5342 def __le__(self, other):
5343 if not isinstance(other, BaseModel) or self._name != other._name:
5344 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5345 return set(self._ids) <= set(other._ids)
5347 def __gt__(self, other):
5348 if not isinstance(other, BaseModel) or self._name != other._name:
5349 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5350 return set(self._ids) > set(other._ids)
5352 def __ge__(self, other):
5353 if not isinstance(other, BaseModel) or self._name != other._name:
5354 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5355 return set(self._ids) >= set(other._ids)
5361 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5363 def __unicode__(self):
5364 return unicode(str(self))
5369 if hasattr(self, '_ids'):
5370 return hash((self._name, frozenset(self._ids)))
5372 return hash(self._name)
5374 def __getitem__(self, key):
5375 """ If `key` is an integer or a slice, return the corresponding record
5376 selection as an instance (attached to `self.env`).
5377 Otherwise read the field `key` of the first record in `self`.
5381 inst = model.search(dom) # inst is a recordset
5382 r4 = inst[3] # fourth record in inst
5383 rs = inst[10:20] # subset of inst
5384 nm = rs['name'] # name of first record in inst
5386 if isinstance(key, basestring):
5387 # important: one must call the field's getter
5388 return self._fields[key].__get__(self, type(self))
5389 elif isinstance(key, slice):
5390 return self._browse(self.env, self._ids[key])
5392 return self._browse(self.env, (self._ids[key],))
5394 def __setitem__(self, key, value):
5395 """ Assign the field `key` to `value` in record `self`. """
5396 # important: one must call the field's setter
5397 return self._fields[key].__set__(self, value)
5400 # Cache and recomputation management
5405 """ Return the cache of `self`, mapping field names to values. """
5406 return RecordCache(self)
5409 def _in_cache_without(self, field):
5410 """ Make sure `self` is present in cache (for prefetching), and return
5411 the records of model `self` in cache that have no value for `field`
5412 (:class:`Field` instance).
5415 prefetch_ids = env.prefetch[self._name]
5416 prefetch_ids.update(self._ids)
5417 ids = filter(None, prefetch_ids - set(env.cache[field]))
5418 return self.browse(ids)
5422 """ Clear the records cache.
5425 The record cache is automatically invalidated.
5427 self.invalidate_cache()
5430 def invalidate_cache(self, fnames=None, ids=None):
5431 """ Invalidate the record caches after some records have been modified.
5432 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5434 :param fnames: the list of modified fields, or ``None`` for all fields
5435 :param ids: the list of modified record ids, or ``None`` for all
5439 return self.env.invalidate_all()
5440 fields = self._fields.values()
5442 fields = map(self._fields.__getitem__, fnames)
5444 # invalidate fields and inverse fields, too
5445 spec = [(f, ids) for f in fields] + \
5446 [(invf, None) for f in fields for invf in f.inverse_fields]
5447 self.env.invalidate(spec)
5450 def modified(self, fnames):
5451 """ Notify that fields have been modified on `self`. This invalidates
5452 the cache, and prepares the recomputation of stored function fields
5453 (new-style fields only).
5455 :param fnames: iterable of field names that have been modified on
5458 # each field knows what to invalidate and recompute
5460 for fname in fnames:
5461 spec += self._fields[fname].modified(self)
5465 for env in self.env.all
5466 for field in env.cache
5468 # invalidate non-stored fields.function which are currently cached
5469 spec += [(f, None) for f in self.pool.pure_function_fields
5470 if f in cached_fields]
5472 self.env.invalidate(spec)
5474 def _recompute_check(self, field):
5475 """ If `field` must be recomputed on some record in `self`, return the
5476 corresponding records that must be recomputed.
5478 for env in [self.env] + list(iter(self.env.all)):
5479 if env.todo.get(field) and env.todo[field] & self:
5480 return env.todo[field]
5482 def _recompute_todo(self, field):
5483 """ Mark `field` to be recomputed. """
5484 todo = self.env.todo
5485 todo[field] = (todo.get(field) or self.browse()) | self
5487 def _recompute_done(self, field):
5488 """ Mark `field` as being recomputed. """
5489 todo = self.env.todo
5491 recs = todo.pop(field) - self
5496 def recompute(self):
5497 """ Recompute stored function fields. The fields and records to
5498 recompute have been determined by method :meth:`modified`.
5500 for env in list(iter(self.env.all)):
5502 field, recs = next(env.todo.iteritems())
5503 # evaluate the fields to recompute, and save them to database
5504 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5506 values = rec._convert_to_write({
5507 f.name: rec[f.name] for f in field.computed_fields
5510 except MissingError:
5512 # mark the computed fields as done
5513 map(recs._recompute_done, field.computed_fields)
5516 # Generic onchange method
5519 def _has_onchange(self, field, other_fields):
5520 """ Return whether `field` should trigger an onchange event in the
5521 presence of `other_fields`.
5523 # test whether self has an onchange method for field, or field is a
5524 # dependency of any field in other_fields
5525 return field.name in self._onchange_methods or \
5526 any(dep in other_fields for dep in field.dependents)
5529 def _onchange_spec(self, view_info=None):
5530 """ Return the onchange spec from a view description; if not given, the
5531 result of ``self.fields_view_get()`` is used.
5535 # for traversing the XML arch and populating result
5536 def process(node, info, prefix):
5537 if node.tag == 'field':
5538 name = node.attrib['name']
5539 names = "%s.%s" % (prefix, name) if prefix else name
5540 if not result.get(names):
5541 result[names] = node.attrib.get('on_change')
5542 # traverse the subviews included in relational fields
5543 for subinfo in info['fields'][name].get('views', {}).itervalues():
5544 process(etree.fromstring(subinfo['arch']), subinfo, names)
5547 process(child, info, prefix)
5549 if view_info is None:
5550 view_info = self.fields_view_get()
5551 process(etree.fromstring(view_info['arch']), view_info, '')
5554 def _onchange_eval(self, field_name, onchange, result):
5555 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5556 on record `self`. Value assignments are applied on `self`, while
5557 domain and warning messages are put in dictionary `result`.
5559 onchange = onchange.strip()
5562 if onchange in ("1", "true"):
5563 for method in self._onchange_methods.get(field_name, ()):
5564 method_res = method(self)
5567 if 'domain' in method_res:
5568 result.setdefault('domain', {}).update(method_res['domain'])
5569 if 'warning' in method_res:
5570 result['warning'] = method_res['warning']
5574 match = onchange_v7.match(onchange)
5576 method, params = match.groups()
5578 # evaluate params -> tuple
5579 global_vars = {'context': self._context, 'uid': self._uid}
5580 if self._context.get('field_parent'):
5581 class RawRecord(object):
5582 def __init__(self, record):
5583 self._record = record
5584 def __getattr__(self, name):
5585 field = self._record._fields[name]
5586 value = self._record[name]
5587 return field.convert_to_onchange(value)
5588 record = self[self._context['field_parent']]
5589 global_vars['parent'] = RawRecord(record)
5591 key: self._fields[key].convert_to_onchange(val)
5592 for key, val in self._cache.iteritems()
5594 params = eval("[%s]" % params, global_vars, field_vars)
5596 # call onchange method
5597 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5598 method_res = getattr(self._model, method)(*args)
5599 if not isinstance(method_res, dict):
5601 if 'value' in method_res:
5602 method_res['value'].pop('id', None)
5603 self.update(self._convert_to_cache(method_res['value'], validate=False))
5604 if 'domain' in method_res:
5605 result.setdefault('domain', {}).update(method_res['domain'])
5606 if 'warning' in method_res:
5607 result['warning'] = method_res['warning']
5610 def onchange(self, values, field_name, field_onchange):
5611 """ Perform an onchange on the given field.
5613 :param values: dictionary mapping field names to values, giving the
5614 current state of modification
5615 :param field_name: name of the modified field_name
5616 :param field_onchange: dictionary mapping field names to their
5621 if field_name and field_name not in self._fields:
5624 # determine subfields for field.convert_to_write() below
5626 subfields = defaultdict(set)
5627 for dotname in field_onchange:
5629 secondary.append(dotname)
5630 name, subname = dotname.split('.')
5631 subfields[name].add(subname)
5633 # create a new record with values, and attach `self` to it
5634 with env.do_in_onchange():
5635 record = self.new(values)
5636 values = dict(record._cache)
5637 # attach `self` with a different context (for cache consistency)
5638 record._origin = self.with_context(__onchange=True)
5640 # determine which field should be triggered an onchange
5641 todo = set([field_name]) if field_name else set(values)
5644 # dummy assignment: trigger invalidations on the record
5646 value = record[name]
5647 field = self._fields[name]
5648 if not field_name and field.type == 'many2one' and field.delegate and not value:
5649 # do not nullify all fields of parent record for new records
5651 record[name] = value
5653 result = {'value': {}}
5661 with env.do_in_onchange():
5662 # apply field-specific onchange methods
5663 if field_onchange.get(name):
5664 record._onchange_eval(name, field_onchange[name], result)
5666 # force re-evaluation of function fields on secondary records
5667 for field_seq in secondary:
5668 record.mapped(field_seq)
5670 # determine which fields have been modified
5671 for name, oldval in values.iteritems():
5672 newval = record[name]
5673 if newval != oldval or getattr(newval, '_dirty', False):
5674 field = self._fields[name]
5675 result['value'][name] = field.convert_to_write(
5676 newval, record._origin, subfields.get(name),
5680 # At the moment, the client does not support updates on a *2many field
5681 # while this one is modified by the user.
5682 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5683 result['value'].pop(field_name, None)
5688 class RecordCache(MutableMapping):
5689 """ Implements a proxy dictionary to read/update the cache of a record.
5690 Upon iteration, it looks like a dictionary mapping field names to
5691 values. However, fields may be used as keys as well.
5693 def __init__(self, records):
5694 self._recs = records
5696 def contains(self, field):
5697 """ Return whether `records[0]` has a value for `field` in cache. """
5698 if isinstance(field, basestring):
5699 field = self._recs._fields[field]
5700 return self._recs.id in self._recs.env.cache[field]
5702 def __contains__(self, field):
5703 """ Return whether `records[0]` has a regular value for `field` in cache. """
5704 if isinstance(field, basestring):
5705 field = self._recs._fields[field]
5706 dummy = SpecialValue(None)
5707 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5708 return not isinstance(value, SpecialValue)
5710 def __getitem__(self, field):
5711 """ Return the cached value of `field` for `records[0]`. """
5712 if isinstance(field, basestring):
5713 field = self._recs._fields[field]
5714 value = self._recs.env.cache[field][self._recs.id]
5715 return value.get() if isinstance(value, SpecialValue) else value
5717 def __setitem__(self, field, value):
5718 """ Assign the cached value of `field` for all records in `records`. """
5719 if isinstance(field, basestring):
5720 field = self._recs._fields[field]
5721 values = dict.fromkeys(self._recs._ids, value)
5722 self._recs.env.cache[field].update(values)
5724 def update(self, *args, **kwargs):
5725 """ Update the cache of all records in `records`. If the argument is a
5726 `SpecialValue`, update all fields (except "magic" columns).
5728 if args and isinstance(args[0], SpecialValue):
5729 values = dict.fromkeys(self._recs._ids, args[0])
5730 for name, field in self._recs._fields.iteritems():
5732 self._recs.env.cache[field].update(values)
5734 return super(RecordCache, self).update(*args, **kwargs)
5736 def __delitem__(self, field):
5737 """ Remove the cached value of `field` for all `records`. """
5738 if isinstance(field, basestring):
5739 field = self._recs._fields[field]
5740 field_cache = self._recs.env.cache[field]
5741 for id in self._recs._ids:
5742 field_cache.pop(id, None)
5745 """ Iterate over the field names with a regular value in cache. """
5746 cache, id = self._recs.env.cache, self._recs.id
5747 dummy = SpecialValue(None)
5748 for name, field in self._recs._fields.iteritems():
5749 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5753 """ Return the number of fields with a regular value in cache. """
5754 return sum(1 for name in self)
5756 class Model(BaseModel):
5757 """Main super-class for regular database-persisted OpenERP models.
5759 OpenERP models are created by inheriting from this class::
5764 The system will later instantiate the class once per database (on
5765 which the class' module is installed).
5768 _register = False # not visible in ORM registry, meant to be python-inherited only
5769 _transient = False # True in a TransientModel
5771 class TransientModel(BaseModel):
5772 """Model super-class for transient records, meant to be temporarily
5773 persisted, and regularly vaccuum-cleaned.
5775 A TransientModel has a simplified access rights management,
5776 all users can create new records, and may only access the
5777 records they created. The super-user has unrestricted access
5778 to all TransientModel records.
5781 _register = False # not visible in ORM registry, meant to be python-inherited only
5784 class AbstractModel(BaseModel):
5785 """Abstract Model super-class for creating an abstract class meant to be
5786 inherited by regular models (Models or TransientModels) but not meant to
5787 be usable on its own, or persisted.
5789 Technical note: we don't want to make AbstractModel the super-class of
5790 Model or BaseModel because it would not make sense to put the main
5791 definition of persistence methods such as create() in it, and still we
5792 should be able to override them within an AbstractModel.
5794 _auto = False # don't create any database backend for AbstractModels
5795 _register = False # not visible in ORM registry, meant to be python-inherited only
5798 def itemgetter_tuple(items):
5799 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5800 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5805 return lambda gettable: (gettable[items[0]],)
5806 return operator.itemgetter(*items)
5808 def convert_pgerror_23502(model, fields, info, e):
5809 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5810 r'not-null constraint\n',
5812 field_name = m and m.group('field')
5813 if not m or field_name not in fields:
5814 return {'message': unicode(e)}
5815 message = _(u"Missing required value for the field '%s'.") % field_name
5816 field = fields.get(field_name)
5818 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5821 'field': field_name,
5824 def convert_pgerror_23505(model, fields, info, e):
5825 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5827 field_name = m and m.group('field')
5828 if not m or field_name not in fields:
5829 return {'message': unicode(e)}
5830 message = _(u"The value for the field '%s' already exists.") % field_name
5831 field = fields.get(field_name)
5833 message = _(u"%s This might be '%s' in the current model, or a field "
5834 u"of the same name in an o2m.") % (message, field['string'])
5837 'field': field_name,
5840 PGERROR_TO_OE = defaultdict(
5841 # shape of mapped converters
5842 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5843 # not_null_violation
5844 '23502': convert_pgerror_23502,
5845 # unique constraint error
5846 '23505': convert_pgerror_23505,
5849 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5850 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5852 Various implementations were tested on the corpus of all browse() calls
5853 performed during a full crawler run (after having installed all website_*
5854 modules) and this one was the most efficient overall.
5856 A possible bit of correctness was sacrificed by not doing any test on
5857 Iterable and just assuming that any non-atomic type was an iterable of
5862 # much of the corpus is falsy objects (empty list, tuple or set, None)
5866 # `type in set` is significantly faster (because more restrictive) than
5867 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5868 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5869 # (and looks much worse) in most cases, but over millions of calls it
5870 # does have a very minor effect.
5871 if arg.__class__ in atoms:
5876 # keep those imports here to avoid dependency cycle errors
5877 from .osv import expression
5878 from .fields import Field, SpecialValue, FailedValue
5880 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: