1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
51 from collections import defaultdict, MutableMapping
52 from inspect import getmembers
55 import dateutil.relativedelta
57 from lxml import etree
60 from . import SUPERUSER_ID
63 from .api import Environment
64 from .exceptions import except_orm, AccessError, MissingError, ValidationError
65 from .osv import fields
66 from .osv.query import Query
67 from .tools import lazy_property, ormcache
68 from .tools.config import config
69 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
70 from .tools.safe_eval import safe_eval as eval
71 from .tools.translate import _
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
77 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
78 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
80 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def check_object_name(name):
84 """ Check if the given name is a valid openerp object name.
86 The _name attribute in osv and osv_memory object is subject to
87 some restrictions. This function returns True or False whether
88 the given name is allowed or not.
90 TODO: this is an approximation. The goal in this approximation
91 is to disallow uppercase characters (in some places, we quote
92 table/column names and in other not, which leads to this kind
95 psycopg2.ProgrammingError: relation "xxx" does not exist).
97 The same restriction should apply to both osv and osv_memory
98 objects for consistency.
101 if regex_object_name.match(name) is None:
105 def raise_on_invalid_object_name(name):
106 if not check_object_name(name):
107 msg = "The _name attribute %s is not valid." % name
109 raise except_orm('ValueError', msg)
111 POSTGRES_CONFDELTYPES = {
119 def intersect(la, lb):
120 return filter(lambda x: x in lb, la)
123 """ Test whether functions `f` and `g` are identical or have the same name """
124 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
126 def fix_import_export_id_paths(fieldname):
128 Fixes the id fields in import and exports, and splits field paths
131 :param str fieldname: name of the field to import/export
132 :return: split field name
135 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
136 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
137 return fixed_external_id.split('/')
139 def pg_varchar(size=0):
140 """ Returns the VARCHAR declaration for the provided size:
142 * If no size (or an empty or negative size is provided) return an
144 * Otherwise return a VARCHAR(n)
146 :type int size: varchar size, optional
150 if not isinstance(size, int):
151 raise TypeError("VARCHAR parameter should be an int, got %s"
154 return 'VARCHAR(%d)' % size
157 FIELDS_TO_PGTYPES = {
158 fields.boolean: 'bool',
159 fields.integer: 'int4',
163 fields.datetime: 'timestamp',
164 fields.binary: 'bytea',
165 fields.many2one: 'int4',
166 fields.serialized: 'text',
169 def get_pg_type(f, type_override=None):
171 :param fields._column f: field to get a Postgres type for
172 :param type type_override: use the provided type for dispatching instead of the field's own type
173 :returns: (postgres_identification_type, postgres_type_specification)
176 field_type = type_override or type(f)
178 if field_type in FIELDS_TO_PGTYPES:
179 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
180 elif issubclass(field_type, fields.float):
181 # Explicit support for "falsy" digits (0, False) to indicate a
182 # NUMERIC field with no fixed precision. The values will be saved
183 # in the database with all significant digits.
184 # FLOAT8 type is still the default when there is no precision because
185 # it is faster for most operations (sums, etc.)
186 if f.digits is not None:
187 pg_type = ('numeric', 'NUMERIC')
189 pg_type = ('float8', 'DOUBLE PRECISION')
190 elif issubclass(field_type, (fields.char, fields.reference)):
191 pg_type = ('varchar', pg_varchar(f.size))
192 elif issubclass(field_type, fields.selection):
193 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
194 or getattr(f, 'size', None) == -1:
195 pg_type = ('int4', 'INTEGER')
197 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
198 elif issubclass(field_type, fields.function):
199 if f._type == 'selection':
200 pg_type = ('varchar', pg_varchar())
202 pg_type = get_pg_type(f, getattr(fields, f._type))
204 _logger.warning('%s type not supported!', field_type)
210 class MetaModel(api.Meta):
211 """ Metaclass for the models.
213 This class is used as the metaclass for the class :class:`BaseModel` to
214 discover the models defined in a module (without instanciating them).
215 If the automatic discovery is not needed, it is possible to set the model's
216 ``_register`` attribute to False.
220 module_to_models = {}
222 def __init__(self, name, bases, attrs):
223 if not self._register:
224 self._register = True
225 super(MetaModel, self).__init__(name, bases, attrs)
228 if not hasattr(self, '_module'):
229 # The (OpenERP) module name can be in the `openerp.addons` namespace
230 # or not. For instance, module `sale` can be imported as
231 # `openerp.addons.sale` (the right way) or `sale` (for backward
233 module_parts = self.__module__.split('.')
234 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
235 module_name = self.__module__.split('.')[2]
237 module_name = self.__module__.split('.')[0]
238 self._module = module_name
240 # Remember which models to instanciate for this module.
242 self.module_to_models.setdefault(self._module, []).append(self)
244 # check for new-api conversion error: leave comma after field definition
245 for key, val in attrs.iteritems():
246 if type(val) is tuple and len(val) == 1 and isinstance(val[0], Field):
247 _logger.error("Trailing comma after field definition: %s.%s", self, key)
249 # transform columns into new-style fields (enables field inheritance)
250 for name, column in self._columns.iteritems():
251 if name in self.__dict__:
252 _logger.warning("In class %s, field %r overriding an existing value", self, name)
253 setattr(self, name, column.to_field())
257 """ Pseudo-ids for new records. """
258 def __nonzero__(self):
261 IdType = (int, long, basestring, NewId)
264 # maximum number of prefetched records
267 # special columns automatically created by the ORM
268 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
269 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
271 class BaseModel(object):
272 """ Base class for OpenERP models.
274 OpenERP models are created by inheriting from this class' subclasses:
276 * :class:`Model` for regular database-persisted models
278 * :class:`TransientModel` for temporary data, stored in the database but
279 automatically vaccuumed every so often
281 * :class:`AbstractModel` for abstract super classes meant to be shared by
282 multiple inheriting model
284 The system automatically instantiates every model once per database. Those
285 instances represent the available models on each database, and depend on
286 which modules are installed on that database. The actual class of each
287 instance is built from the Python classes that create and inherit from the
290 Every model instance is a "recordset", i.e., an ordered collection of
291 records of the model. Recordsets are returned by methods like
292 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
293 explicit representation: a record is represented as a recordset of one
296 To create a class that should not be instantiated, the _register class
297 attribute may be set to False.
299 __metaclass__ = MetaModel
300 _auto = True # create database backend
301 _register = False # Set to false if the model shouldn't be automatically discovered.
308 _parent_name = 'parent_id'
309 _parent_store = False
310 _parent_order = False
316 _translate = True # set to False to disable translations export for this model
318 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
319 # to include in the _read_group, if grouped on this field
323 _transient = False # True in a TransientModel
326 # { 'parent_model': 'm2o_field', ... }
329 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
330 # model from which it is inherits'd, r is the (local) field towards m, f
331 # is the _column object itself, and n is the original (i.e. top-most)
334 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
335 # field_column_obj, origina_parent_model), ... }
338 # Mapping field name/column_info object
339 # This is similar to _inherit_fields but:
340 # 1. includes self fields,
341 # 2. uses column_info instead of a triple.
342 # Warning: _all_columns is deprecated, use _fields instead
347 _sql_constraints = []
349 # model dependencies, for models backed up by sql views:
350 # {model_name: field_names, ...}
353 CONCURRENCY_CHECK_FIELD = '__last_update'
355 def log(self, cr, uid, id, message, secondary=False, context=None):
356 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
358 def view_init(self, cr, uid, fields_list, context=None):
359 """Override this method to do specific things when a view on the object is opened."""
362 def _field_create(self, cr, context=None):
363 """ Create entries in ir_model_fields for all the model's fields.
365 If necessary, also create an entry in ir_model, and if called from the
366 modules loading scheme (by receiving 'module' in the context), also
367 create entries in ir_model_data (for the model and the fields).
369 - create an entry in ir_model (if there is not already one),
370 - create an entry in ir_model_data (if there is not already one, and if
371 'module' is in the context),
372 - update ir_model_fields with the fields found in _columns
373 (TODO there is some redundancy as _columns is updated from
374 ir_model_fields in __init__).
379 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
381 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
382 model_id = cr.fetchone()[0]
383 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
385 model_id = cr.fetchone()[0]
386 if 'module' in context:
387 name_id = 'model_'+self._name.replace('.', '_')
388 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
390 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
391 (name_id, context['module'], 'ir.model', model_id)
394 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
396 for rec in cr.dictfetchall():
397 cols[rec['name']] = rec
399 ir_model_fields_obj = self.pool.get('ir.model.fields')
401 # sparse field should be created at the end, as it depends on its serialized field already existing
402 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
403 for (k, f) in model_fields:
405 'model_id': model_id,
408 'field_description': f.string,
410 'relation': f._obj or '',
411 'select_level': tools.ustr(int(f.select)),
412 'readonly': (f.readonly and 1) or 0,
413 'required': (f.required and 1) or 0,
414 'selectable': (f.selectable and 1) or 0,
415 'translate': (f.translate and 1) or 0,
416 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
417 'serialization_field_id': None,
419 if getattr(f, 'serialization_field', None):
420 # resolve link to serialization_field if specified by name
421 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
422 if not serialization_field_id:
423 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
424 vals['serialization_field_id'] = serialization_field_id[0]
426 # When its a custom field,it does not contain f.select
427 if context.get('field_state', 'base') == 'manual':
428 if context.get('field_name', '') == k:
429 vals['select_level'] = context.get('select', '0')
430 #setting value to let the problem NOT occur next time
432 vals['select_level'] = cols[k]['select_level']
435 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
436 id = cr.fetchone()[0]
438 cr.execute("""INSERT INTO ir_model_fields (
439 id, model_id, model, name, field_description, ttype,
440 relation,state,select_level,relation_field, translate, serialization_field_id
442 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
444 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
445 vals['relation'], 'base',
446 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
448 if 'module' in context:
449 name1 = 'field_' + self._table + '_' + k
450 cr.execute("select name from ir_model_data where name=%s", (name1,))
452 name1 = name1 + "_" + str(id)
453 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
454 (name1, context['module'], 'ir.model.fields', id)
457 for key, val in vals.items():
458 if cols[k][key] != vals[key]:
459 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
460 cr.execute("""UPDATE ir_model_fields SET
461 model_id=%s, field_description=%s, ttype=%s, relation=%s,
462 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
464 model=%s AND name=%s""", (
465 vals['model_id'], vals['field_description'], vals['ttype'],
467 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
470 self.invalidate_cache(cr, SUPERUSER_ID)
473 def _add_field(cls, name, field):
474 """ Add the given `field` under the given `name` in the class """
475 # add field as an attribute and in cls._fields (for reflection)
476 if not isinstance(getattr(cls, name, field), Field):
477 _logger.warning("In model %r, field %r overriding existing value", cls._name, name)
478 setattr(cls, name, field)
479 cls._fields[name] = field
481 # basic setup of field
482 field.set_class_name(cls, name)
484 if field.store or field.column:
485 cls._columns[name] = field.to_column()
487 # remove potential column that may be overridden by field
488 cls._columns.pop(name, None)
491 def _pop_field(cls, name):
492 """ Remove the field with the given `name` from the model.
493 This method should only be used for manual fields.
495 field = cls._fields.pop(name)
496 cls._columns.pop(name, None)
497 cls._all_columns.pop(name, None)
498 if hasattr(cls, name):
503 def _add_magic_fields(cls):
504 """ Introduce magic fields on the current class
506 * id is a "normal" field (with a specific getter)
507 * create_uid, create_date, write_uid and write_date have become
509 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
510 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
511 to get the same structure as the previous
512 ``(now() at time zone 'UTC')::timestamp``::
514 # select (now() at time zone 'UTC')::timestamp;
516 ----------------------------
517 2013-06-18 08:30:37.292809
519 >>> str(datetime.datetime.utcnow())
520 '2013-06-18 08:31:32.821177'
522 def add(name, field):
523 """ add `field` with the given `name` if it does not exist yet """
524 if name not in cls._fields:
525 cls._add_field(name, field)
530 # this field 'id' must override any other column or field
531 cls._add_field('id', fields.Id(automatic=True))
533 add('display_name', fields.Char(string='Display Name', automatic=True,
534 compute='_compute_display_name'))
537 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
538 add('create_date', fields.Datetime(string='Created on', automatic=True))
539 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
540 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
541 last_modified_name = 'compute_concurrency_field_with_access'
543 last_modified_name = 'compute_concurrency_field'
545 # this field must override any other column or field
546 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
547 string='Last Modified on', compute=last_modified_name, automatic=True))
550 def compute_concurrency_field(self):
551 self[self.CONCURRENCY_CHECK_FIELD] = \
552 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
555 @api.depends('create_date', 'write_date')
556 def compute_concurrency_field_with_access(self):
557 self[self.CONCURRENCY_CHECK_FIELD] = \
558 self.write_date or self.create_date or \
559 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
562 # Goal: try to apply inheritance at the instanciation level and
563 # put objects in the pool var
566 def _build_model(cls, pool, cr):
567 """ Instanciate a given model.
569 This class method instanciates the class of some model (i.e. a class
570 deriving from osv or osv_memory). The class might be the class passed
571 in argument or, if it inherits from another class, a class constructed
572 by combining the two classes.
576 # IMPORTANT: the registry contains an instance for each model. The class
577 # of each model carries inferred metadata that is shared among the
578 # model's instances for this registry, but not among registries. Hence
579 # we cannot use that "registry class" for combining model classes by
580 # inheritance, since it confuses the metadata inference process.
582 # Keep links to non-inherited constraints in cls; this is useful for
583 # instance when exporting translations
584 cls._local_constraints = cls.__dict__.get('_constraints', [])
585 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
587 # determine inherited models
588 parents = getattr(cls, '_inherit', [])
589 parents = [parents] if isinstance(parents, basestring) else (parents or [])
591 # determine the model's name
592 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
594 # determine the module that introduced the model
595 original_module = pool[name]._original_module if name in parents else cls._module
597 # build the class hierarchy for the model
598 for parent in parents:
599 if parent not in pool:
600 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
601 'You may need to add a dependency on the parent class\' module.' % (name, parent))
602 parent_model = pool[parent]
604 # do no use the class of parent_model, since that class contains
605 # inferred metadata; use its ancestor instead
606 parent_class = type(parent_model).__base__
608 # don't inherit custom fields
609 columns = dict((key, val)
610 for key, val in parent_class._columns.iteritems()
613 columns.update(cls._columns)
615 inherits = dict(parent_class._inherits)
616 inherits.update(cls._inherits)
618 depends = dict(parent_class._depends)
619 for m, fs in cls._depends.iteritems():
620 depends[m] = depends.get(m, []) + fs
622 old_constraints = parent_class._constraints
623 new_constraints = cls._constraints
624 # filter out from old_constraints the ones overridden by a
625 # constraint with the same function name in new_constraints
626 constraints = new_constraints + [oldc
627 for oldc in old_constraints
628 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
629 for newc in new_constraints)
632 sql_constraints = cls._sql_constraints + \
633 parent_class._sql_constraints
639 '_inherits': inherits,
641 '_constraints': constraints,
642 '_sql_constraints': sql_constraints,
644 cls = type(name, (cls, parent_class), attrs)
646 # introduce the "registry class" of the model;
647 # duplicate some attributes so that the ORM can modify them
651 '_columns': dict(cls._columns),
652 '_defaults': {}, # filled by Field._determine_default()
653 '_inherits': dict(cls._inherits),
654 '_depends': dict(cls._depends),
655 '_constraints': list(cls._constraints),
656 '_sql_constraints': list(cls._sql_constraints),
657 '_original_module': original_module,
659 cls = type(cls._name, (cls,), attrs)
661 # instantiate the model, and initialize it
662 model = object.__new__(cls)
663 model.__init__(pool, cr)
667 def _init_function_fields(cls, pool, cr):
668 # initialize the list of non-stored function fields for this model
669 pool._pure_function_fields[cls._name] = []
671 # process store of low-level function fields
672 for fname, column in cls._columns.iteritems():
673 if hasattr(column, 'digits_change'):
674 column.digits_change(cr)
675 # filter out existing store about this field
676 pool._store_function[cls._name] = [
678 for stored in pool._store_function.get(cls._name, [])
679 if (stored[0], stored[1]) != (cls._name, fname)
681 if not isinstance(column, fields.function):
684 # register it on the pool for invalidation
685 pool._pure_function_fields[cls._name].append(fname)
687 # process store parameter
690 get_ids = lambda self, cr, uid, ids, c={}: ids
691 store = {cls._name: (get_ids, None, column.priority, None)}
692 for model, spec in store.iteritems():
694 (fnct, fields2, order, length) = spec
696 (fnct, fields2, order) = spec
699 raise except_orm('Error',
700 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
701 pool._store_function.setdefault(model, [])
702 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
703 if t not in pool._store_function[model]:
704 pool._store_function[model].append(t)
705 pool._store_function[model].sort(key=lambda x: x[4])
708 def _init_manual_fields(cls, cr):
709 # Check whether the query is already done
710 if cls.pool.fields_by_model is not None:
711 manual_fields = cls.pool.fields_by_model.get(cls._name, [])
713 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
714 manual_fields = cr.dictfetchall()
716 for field in manual_fields:
717 if field['name'] in cls._fields:
721 'string': field['field_description'],
722 'required': bool(field['required']),
723 'readonly': bool(field['readonly']),
725 # FIXME: ignore field['serialization_field_id']
726 if field['ttype'] in ('char', 'text', 'html'):
727 attrs['translate'] = bool(field['translate'])
728 attrs['size'] = field['size'] or None
729 elif field['ttype'] in ('selection', 'reference'):
730 attrs['selection'] = eval(field['selection'])
731 elif field['ttype'] == 'many2one':
732 attrs['comodel_name'] = field['relation']
733 attrs['ondelete'] = field['on_delete']
734 attrs['domain'] = eval(field['domain']) if field['domain'] else None
735 elif field['ttype'] == 'one2many':
736 attrs['comodel_name'] = field['relation']
737 attrs['inverse_name'] = field['relation_field']
738 attrs['domain'] = eval(field['domain']) if field['domain'] else None
739 elif field['ttype'] == 'many2many':
740 attrs['comodel_name'] = field['relation']
741 _rel1 = field['relation'].replace('.', '_')
742 _rel2 = field['model'].replace('.', '_')
743 attrs['relation'] = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
744 attrs['column1'] = 'id1'
745 attrs['column2'] = 'id2'
746 attrs['domain'] = eval(field['domain']) if field['domain'] else None
747 cls._add_field(field['name'], Field.by_type[field['ttype']](**attrs))
750 def _init_constraints_onchanges(cls):
751 # store sql constraint error messages
752 for (key, _, msg) in cls._sql_constraints:
753 cls.pool._sql_error[cls._table + '_' + key] = msg
755 # collect constraint and onchange methods
756 cls._constraint_methods = []
757 cls._onchange_methods = defaultdict(list)
758 for attr, func in getmembers(cls, callable):
759 if hasattr(func, '_constrains'):
760 cls._constraint_methods.append(func)
761 if hasattr(func, '_onchange'):
762 for name in func._onchange:
763 cls._onchange_methods[name].append(func)
766 # In the past, this method was registering the model class in the server.
767 # This job is now done entirely by the metaclass MetaModel.
769 # Do not create an instance here. Model instances are created by method
773 def __init__(self, pool, cr):
774 """ Initialize a model and make it part of the given registry.
776 - copy the stored fields' functions in the registry,
777 - retrieve custom fields and add them in the model,
778 - ensure there is a many2one for each _inherits'd parent,
779 - update the children's _columns,
780 - give a chance to each field to initialize itself.
785 # link the class to the registry, and update the registry
787 cls._model = self # backward compatibility
788 pool.add(cls._name, self)
790 # determine description, table, sequence and log_access
791 if not cls._description:
792 cls._description = cls._name
794 cls._table = cls._name.replace('.', '_')
795 if not cls._sequence:
796 cls._sequence = cls._table + '_id_seq'
797 if not hasattr(cls, '_log_access'):
798 # If _log_access is not specified, it is the same value as _auto.
799 cls._log_access = cls._auto
802 if cls.is_transient():
803 cls._transient_check_count = 0
804 cls._transient_max_count = config.get('osv_memory_count_limit')
805 cls._transient_max_hours = config.get('osv_memory_age_limit')
806 assert cls._log_access, \
807 "TransientModels must have log_access turned on, " \
808 "in order to implement their access rights policy"
810 # retrieve new-style fields (from above registry class) and duplicate
811 # them (to avoid clashes with inheritance between different models)
813 above = cls.__bases__[0]
814 for attr, field in getmembers(above, Field.__instancecheck__):
815 cls._add_field(attr, field.new())
817 # introduce magic fields
818 cls._add_magic_fields()
820 # register stuff about low-level function fields and custom fields
821 cls._init_function_fields(pool, cr)
823 # register constraints and onchange methods
824 cls._init_constraints_onchanges()
826 # prepare ormcache, which must be shared by all instances of the model
831 def _is_an_ordinary_table(self):
832 self.env.cr.execute("""\
836 AND relkind = %s""", [self._table, 'r'])
837 return bool(self.env.cr.fetchone())
839 def __export_xml_id(self):
840 """ Return a valid xml_id for the record `self`. """
841 if not self._is_an_ordinary_table():
843 "You can not export the column ID of model %s, because the "
844 "table %s is not an ordinary table."
845 % (self._name, self._table))
846 ir_model_data = self.sudo().env['ir.model.data']
847 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
850 return '%s.%s' % (data[0].module, data[0].name)
855 name = '%s_%s' % (self._table, self.id)
856 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
858 name = '%s_%s_%s' % (self._table, self.id, postfix)
859 ir_model_data.create({
862 'module': '__export__',
865 return '__export__.' + name
868 def __export_rows(self, fields):
869 """ Export fields of the records in `self`.
871 :param fields: list of lists of fields to traverse
872 :return: list of lists of corresponding values
876 # main line of record, initially empty
877 current = [''] * len(fields)
878 lines.append(current)
880 # list of primary fields followed by secondary field(s)
883 # process column by column
884 for i, path in enumerate(fields):
889 if name in primary_done:
893 current[i] = str(record.id)
895 current[i] = record.__export_xml_id()
897 field = record._fields[name]
900 # this part could be simpler, but it has to be done this way
901 # in order to reproduce the former behavior
902 if not isinstance(value, BaseModel):
903 current[i] = field.convert_to_export(value, self.env)
905 primary_done.append(name)
907 # This is a special case, its strange behavior is intended!
908 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
909 xml_ids = [r.__export_xml_id() for r in value]
910 current[i] = ','.join(xml_ids) or False
913 # recursively export the fields that follow name
914 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
915 lines2 = value.__export_rows(fields2)
917 # merge first line with record's main line
918 for j, val in enumerate(lines2[0]):
921 # check value of current field
923 # assign xml_ids, and forget about remaining lines
924 xml_ids = [item[1] for item in value.name_get()]
925 current[i] = ','.join(xml_ids)
927 # append the other lines at the end
935 def export_data(self, fields_to_export, raw_data=False):
936 """ Export fields for selected objects
938 :param fields_to_export: list of fields
939 :param raw_data: True to return value in native Python type
940 :rtype: dictionary with a *datas* matrix
942 This method is used when exporting data via client menu
944 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
946 self = self.with_context(export_raw_data=True)
947 return {'datas': self.__export_rows(fields_to_export)}
949 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
952 Use :meth:`~load` instead
954 Import given data in given module
956 This method is used when importing data via client menu.
958 Example of fields to import for a sale.order::
961 partner_id, (=name_search)
962 order_line/.id, (=database_id)
964 order_line/product_id/id, (=xml id)
965 order_line/price_unit,
966 order_line/product_uom_qty,
967 order_line/product_uom/id (=xml_id)
969 This method returns a 4-tuple with the following structure::
971 (return_code, errored_resource, error_message, unused)
973 * The first item is a return code, it is ``-1`` in case of
974 import error, or the last imported row number in case of success
975 * The second item contains the record data dict that failed to import
976 in case of error, otherwise it's 0
977 * The third item contains an error message string in case of error,
979 * The last item is currently unused, with no specific semantics
981 :param fields: list of fields to import
982 :param datas: data to import
983 :param mode: 'init' or 'update' for record creation
984 :param current_module: module name
985 :param noupdate: flag for record creation
986 :param filename: optional file to store partial import state for recovery
987 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
988 :rtype: (int, dict or 0, str or 0, str or 0)
990 context = dict(context) if context is not None else {}
991 context['_import_current_module'] = current_module
993 fields = map(fix_import_export_id_paths, fields)
994 ir_model_data_obj = self.pool.get('ir.model.data')
997 if m['type'] == 'error':
998 raise Exception(m['message'])
1000 if config.get('import_partial') and filename:
1001 with open(config.get('import_partial'), 'rb') as partial_import_file:
1002 data = pickle.load(partial_import_file)
1003 position = data.get(filename, 0)
1007 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1008 self._extract_records(cr, uid, fields, datas,
1009 context=context, log=log),
1010 context=context, log=log):
1011 ir_model_data_obj._update(cr, uid, self._name,
1012 current_module, res, mode=mode, xml_id=xml_id,
1013 noupdate=noupdate, res_id=res_id, context=context)
1014 position = info.get('rows', {}).get('to', 0) + 1
1015 if config.get('import_partial') and filename and (not (position%100)):
1016 with open(config.get('import_partial'), 'rb') as partial_import:
1017 data = pickle.load(partial_import)
1018 data[filename] = position
1019 with open(config.get('import_partial'), 'wb') as partial_import:
1020 pickle.dump(data, partial_import)
1021 if context.get('defer_parent_store_computation'):
1022 self._parent_store_compute(cr)
1024 except Exception, e:
1026 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1028 if context.get('defer_parent_store_computation'):
1029 self._parent_store_compute(cr)
1030 return position, 0, 0, 0
1032 def load(self, cr, uid, fields, data, context=None):
1034 Attempts to load the data matrix, and returns a list of ids (or
1035 ``False`` if there was an error and no id could be generated) and a
1038 The ids are those of the records created and saved (in database), in
1039 the same order they were extracted from the file. They can be passed
1040 directly to :meth:`~read`
1042 :param fields: list of fields to import, at the same index as the corresponding data
1043 :type fields: list(str)
1044 :param data: row-major matrix of data to import
1045 :type data: list(list(str))
1046 :param dict context:
1047 :returns: {ids: list(int)|False, messages: [Message]}
1049 cr.execute('SAVEPOINT model_load')
1052 fields = map(fix_import_export_id_paths, fields)
1053 ModelData = self.pool['ir.model.data'].clear_caches()
1055 fg = self.fields_get(cr, uid, context=context)
1062 for id, xid, record, info in self._convert_records(cr, uid,
1063 self._extract_records(cr, uid, fields, data,
1064 context=context, log=messages.append),
1065 context=context, log=messages.append):
1067 cr.execute('SAVEPOINT model_load_save')
1068 except psycopg2.InternalError, e:
1069 # broken transaction, exit and hope the source error was
1071 if not any(message['type'] == 'error' for message in messages):
1072 messages.append(dict(info, type='error',message=
1073 u"Unknown database error: '%s'" % e))
1076 ids.append(ModelData._update(cr, uid, self._name,
1077 current_module, record, mode=mode, xml_id=xid,
1078 noupdate=noupdate, res_id=id, context=context))
1079 cr.execute('RELEASE SAVEPOINT model_load_save')
1080 except psycopg2.Warning, e:
1081 messages.append(dict(info, type='warning', message=str(e)))
1082 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1083 except psycopg2.Error, e:
1084 messages.append(dict(
1086 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1087 # Failed to write, log to messages, rollback savepoint (to
1088 # avoid broken transaction) and keep going
1089 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1090 except Exception, e:
1091 message = (_('Unknown error during import:') +
1092 ' %s: %s' % (type(e), unicode(e)))
1093 moreinfo = _('Resolve other errors first')
1094 messages.append(dict(info, type='error',
1097 # Failed for some reason, perhaps due to invalid data supplied,
1098 # rollback savepoint and keep going
1099 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1100 if any(message['type'] == 'error' for message in messages):
1101 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1103 return {'ids': ids, 'messages': messages}
1105 def _extract_records(self, cr, uid, fields_, data,
1106 context=None, log=lambda a: None):
1107 """ Generates record dicts from the data sequence.
1109 The result is a generator of dicts mapping field names to raw
1110 (unconverted, unvalidated) values.
1112 For relational fields, if sub-fields were provided the value will be
1113 a list of sub-records
1115 The following sub-fields may be set on the record (by key):
1116 * None is the name_get for the record (to use with name_create/name_search)
1117 * "id" is the External ID for the record
1118 * ".id" is the Database ID for the record
1120 from openerp.fields import Char, Integer
1121 fields = dict(self._fields)
1122 # Fake fields to avoid special cases in extractor
1123 fields[None] = Char('rec_name')
1124 fields['id'] = Char('External ID')
1125 fields['.id'] = Integer('Database ID')
1127 # m2o fields can't be on multiple lines so exclude them from the
1128 # is_relational field rows filter, but special-case it later on to
1129 # be handled with relational fields (as it can have subfields)
1130 is_relational = lambda field: fields[field].relational
1131 get_o2m_values = itemgetter_tuple(
1132 [index for index, field in enumerate(fields_)
1133 if fields[field[0]].type == 'one2many'])
1134 get_nono2m_values = itemgetter_tuple(
1135 [index for index, field in enumerate(fields_)
1136 if fields[field[0]].type != 'one2many'])
1137 # Checks if the provided row has any non-empty non-relational field
1138 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1139 return any(g(row)) and not any(f(row))
1143 if index >= len(data): return
1146 # copy non-relational fields to record dict
1147 record = dict((field[0], value)
1148 for field, value in itertools.izip(fields_, row)
1149 if not is_relational(field[0]))
1151 # Get all following rows which have relational values attached to
1152 # the current record (no non-relational values)
1153 record_span = itertools.takewhile(
1154 only_o2m_values, itertools.islice(data, index + 1, None))
1155 # stitch record row back on for relational fields
1156 record_span = list(itertools.chain([row], record_span))
1157 for relfield in set(
1158 field[0] for field in fields_
1159 if is_relational(field[0])):
1160 # FIXME: how to not use _obj without relying on fields_get?
1161 Model = self.pool[fields[relfield].comodel_name]
1163 # get only cells for this sub-field, should be strictly
1164 # non-empty, field path [None] is for name_get field
1165 indices, subfields = zip(*((index, field[1:] or [None])
1166 for index, field in enumerate(fields_)
1167 if field[0] == relfield))
1169 # return all rows which have at least one value for the
1170 # subfields of relfield
1171 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1172 record[relfield] = [subrecord
1173 for subrecord, _subinfo in Model._extract_records(
1174 cr, uid, subfields, relfield_data,
1175 context=context, log=log)]
1177 yield record, {'rows': {
1179 'to': index + len(record_span) - 1
1181 index += len(record_span)
1183 def _convert_records(self, cr, uid, records,
1184 context=None, log=lambda a: None):
1185 """ Converts records from the source iterable (recursive dicts of
1186 strings) into forms which can be written to the database (via
1187 self.create or (ir.model.data)._update)
1189 :returns: a list of triplets of (id, xid, record)
1190 :rtype: list((int|None, str|None, dict))
1192 if context is None: context = {}
1193 Converter = self.pool['ir.fields.converter']
1194 Translation = self.pool['ir.translation']
1195 fields = dict(self._fields)
1197 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1198 context.get('lang'))
1200 for f, field in fields.iteritems())
1202 convert = Converter.for_model(cr, uid, self, context=context)
1204 def _log(base, field, exception):
1205 type = 'warning' if isinstance(exception, Warning) else 'error'
1206 # logs the logical (not human-readable) field name for automated
1207 # processing of response, but injects human readable in message
1208 record = dict(base, type=type, field=field,
1209 message=unicode(exception.args[0]) % base)
1210 if len(exception.args) > 1 and exception.args[1]:
1211 record.update(exception.args[1])
1214 stream = CountingStream(records)
1215 for record, extras in stream:
1218 # name_get/name_create
1219 if None in record: pass
1226 dbid = int(record['.id'])
1228 # in case of overridden id column
1229 dbid = record['.id']
1230 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1233 record=stream.index,
1235 message=_(u"Unknown database identifier '%s'") % dbid))
1238 converted = convert(record, lambda field, err:\
1239 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1241 yield dbid, xid, converted, dict(extras, record=stream.index)
1244 def _validate_fields(self, field_names):
1245 field_names = set(field_names)
1247 # old-style constraint methods
1248 trans = self.env['ir.translation']
1249 cr, uid, context = self.env.args
1252 for fun, msg, names in self._constraints:
1254 # validation must be context-independent; call `fun` without context
1255 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1257 except Exception, e:
1258 _logger.debug('Exception while validating constraint', exc_info=True)
1260 extra_error = tools.ustr(e)
1263 res_msg = msg(self._model, cr, uid, ids, context=context)
1264 if isinstance(res_msg, tuple):
1265 template, params = res_msg
1266 res_msg = template % params
1268 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1270 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1272 _("Field(s) `%s` failed against a constraint: %s") %
1273 (', '.join(names), res_msg)
1276 raise ValidationError('\n'.join(errors))
1278 # new-style constraint methods
1279 for check in self._constraint_methods:
1280 if set(check._constrains) & field_names:
1283 except ValidationError, e:
1285 except Exception, e:
1286 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1289 def default_get(self, fields_list):
1290 """ default_get(fields) -> default_values
1292 Return default values for the fields in `fields_list`. Default
1293 values are determined by the context, user defaults, and the model
1296 :param fields_list: a list of field names
1297 :return: a dictionary mapping each field name to its corresponding
1298 default value, if it has one.
1301 # trigger view init hook
1302 self.view_init(fields_list)
1305 parent_fields = defaultdict(list)
1307 for name in fields_list:
1308 # 1. look up context
1309 key = 'default_' + name
1310 if key in self._context:
1311 defaults[name] = self._context[key]
1314 # 2. look up ir_values
1315 # Note: performance is good, because get_defaults_dict is cached!
1316 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1317 if name in ir_values_dict:
1318 defaults[name] = ir_values_dict[name]
1321 field = self._fields.get(name)
1323 # 3. look up property fields
1324 # TODO: get rid of this one
1325 if field and field.company_dependent:
1326 defaults[name] = self.env['ir.property'].get(name, self._name)
1329 # 4. look up field.default
1330 if field and field.default:
1331 defaults[name] = field.default(self)
1334 # 5. delegate to parent model
1335 if field and field.inherited:
1336 field = field.related_field
1337 parent_fields[field.model_name].append(field.name)
1339 # convert default values to the right format
1340 defaults = self._convert_to_cache(defaults, validate=False)
1341 defaults = self._convert_to_write(defaults)
1343 # add default values for inherited fields
1344 for model, names in parent_fields.iteritems():
1345 defaults.update(self.env[model].default_get(names))
1349 def fields_get_keys(self, cr, user, context=None):
1350 res = self._columns.keys()
1351 # TODO I believe this loop can be replace by
1352 # res.extend(self._inherit_fields.key())
1353 for parent in self._inherits:
1354 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1357 def _rec_name_fallback(self, cr, uid, context=None):
1358 rec_name = self._rec_name
1359 if rec_name not in self._columns:
1360 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1364 # Overload this method if you need a window title which depends on the context
1366 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1369 def user_has_groups(self, cr, uid, groups, context=None):
1370 """Return true if the user is at least member of one of the groups
1371 in groups_str. Typically used to resolve `groups` attribute
1372 in view and model definitions.
1374 :param str groups: comma-separated list of fully-qualified group
1375 external IDs, e.g.: ``base.group_user,base.group_system``
1376 :return: True if the current user is a member of one of the
1379 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1380 for group_ext_id in groups.split(','))
1382 def _get_default_form_view(self, cr, user, context=None):
1383 """ Generates a default single-line form view using all fields
1384 of the current model except the m2m and o2m ones.
1386 :param cr: database cursor
1387 :param int user: user id
1388 :param dict context: connection context
1389 :returns: a form view as an lxml document
1390 :rtype: etree._Element
1392 view = etree.Element('form', string=self._description)
1393 group = etree.SubElement(view, 'group', col="4")
1394 for fname, field in self._fields.iteritems():
1395 if field.automatic or field.type in ('one2many', 'many2many'):
1398 etree.SubElement(group, 'field', name=fname)
1399 if field.type == 'text':
1400 etree.SubElement(group, 'newline')
1403 def _get_default_search_view(self, cr, user, context=None):
1404 """ Generates a single-field search view, based on _rec_name.
1406 :param cr: database cursor
1407 :param int user: user id
1408 :param dict context: connection context
1409 :returns: a tree view as an lxml document
1410 :rtype: etree._Element
1412 view = etree.Element('search', string=self._description)
1413 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1416 def _get_default_tree_view(self, cr, user, context=None):
1417 """ Generates a single-field tree view, based on _rec_name.
1419 :param cr: database cursor
1420 :param int user: user id
1421 :param dict context: connection context
1422 :returns: a tree view as an lxml document
1423 :rtype: etree._Element
1425 view = etree.Element('tree', string=self._description)
1426 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1429 def _get_default_calendar_view(self, cr, user, context=None):
1430 """ Generates a default calendar view by trying to infer
1431 calendar fields from a number of pre-set attribute names
1433 :param cr: database cursor
1434 :param int user: user id
1435 :param dict context: connection context
1436 :returns: a calendar view
1437 :rtype: etree._Element
1439 def set_first_of(seq, in_, to):
1440 """Sets the first value of `seq` also found in `in_` to
1441 the `to` attribute of the view being closed over.
1443 Returns whether it's found a suitable value (and set it on
1444 the attribute) or not
1452 view = etree.Element('calendar', string=self._description)
1453 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1455 if self._date_name not in self._columns:
1457 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1458 if dt in self._columns:
1459 self._date_name = dt
1464 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1465 view.set('date_start', self._date_name)
1467 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1468 self._columns, 'color')
1470 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1471 self._columns, 'date_stop'):
1472 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1473 self._columns, 'date_delay'):
1475 _('Invalid Object Architecture!'),
1476 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1480 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1481 """ fields_view_get([view_id | view_type='form'])
1483 Get the detailed composition of the requested view like fields, model, view architecture
1485 :param view_id: id of the view or None
1486 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1487 :param toolbar: true to include contextual actions
1488 :param submenu: deprecated
1489 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1490 :raise AttributeError:
1491 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1492 * if some tag other than 'position' is found in parent view
1493 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1497 View = self.pool['ir.ui.view']
1500 'model': self._name,
1501 'field_parent': False,
1504 # try to find a view_id if none provided
1506 # <view_type>_view_ref in context can be used to overrride the default view
1507 view_ref_key = view_type + '_view_ref'
1508 view_ref = context.get(view_ref_key)
1511 module, view_ref = view_ref.split('.', 1)
1512 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1513 view_ref_res = cr.fetchone()
1515 view_id = view_ref_res[0]
1517 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1518 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1522 # otherwise try to find the lowest priority matching ir.ui.view
1523 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1525 # context for post-processing might be overriden
1528 # read the view with inherited views applied
1529 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1530 result['arch'] = root_view['arch']
1531 result['name'] = root_view['name']
1532 result['type'] = root_view['type']
1533 result['view_id'] = root_view['id']
1534 result['field_parent'] = root_view['field_parent']
1535 # override context fro postprocessing
1536 if root_view.get('model') != self._name:
1537 ctx = dict(context, base_model_name=root_view.get('model'))
1539 # fallback on default views methods if no ir.ui.view could be found
1541 get_func = getattr(self, '_get_default_%s_view' % view_type)
1542 arch_etree = get_func(cr, uid, context)
1543 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1544 result['type'] = view_type
1545 result['name'] = 'default'
1546 except AttributeError:
1547 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1549 # Apply post processing, groups and modifiers etc...
1550 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1551 result['arch'] = xarch
1552 result['fields'] = xfields
1554 # Add related action information if aksed
1556 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1562 ir_values_obj = self.pool.get('ir.values')
1563 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1564 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1565 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1566 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1567 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1568 #When multi="True" set it will display only in More of the list view
1569 resrelate = [clean(action) for action in resrelate
1570 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1572 for x in itertools.chain(resprint, resaction, resrelate):
1573 x['string'] = x['name']
1575 result['toolbar'] = {
1577 'action': resaction,
1582 def get_formview_id(self, cr, uid, id, context=None):
1583 """ Return an view id to open the document with. This method is meant to be
1584 overridden in addons that want to give specific view ids for example.
1586 :param int id: id of the document to open
1590 def get_formview_action(self, cr, uid, id, context=None):
1591 """ Return an action to open the document. This method is meant to be
1592 overridden in addons that want to give specific view ids for example.
1594 :param int id: id of the document to open
1596 view_id = self.get_formview_id(cr, uid, id, context=context)
1598 'type': 'ir.actions.act_window',
1599 'res_model': self._name,
1600 'view_type': 'form',
1601 'view_mode': 'form',
1602 'views': [(view_id, 'form')],
1603 'target': 'current',
1607 def get_access_action(self, cr, uid, id, context=None):
1608 """ Return an action to open the document. This method is meant to be
1609 overridden in addons that want to give specific access to the document.
1610 By default it opens the formview of the document.
1612 :paramt int id: id of the document to open
1614 return self.get_formview_action(cr, uid, id, context=context)
1616 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1617 return self.pool['ir.ui.view'].postprocess_and_fields(
1618 cr, uid, self._name, node, view_id, context=context)
1620 def search_count(self, cr, user, args, context=None):
1621 """ search_count(args) -> int
1623 Returns the number of records in the current model matching :ref:`the
1624 provided domain <reference/orm/domains>`.
1626 res = self.search(cr, user, args, context=context, count=True)
1627 if isinstance(res, list):
1631 @api.returns('self')
1632 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1633 """ search(args[, offset=0][, limit=None][, order=None])
1635 Searches for records based on the ``args``
1636 :ref:`search domain <reference/orm/domains>`.
1638 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1639 list to match all records.
1640 :param int offset: number of results to ignore (default: none)
1641 :param int limit: maximum number of records to return (default: all)
1642 :param str order: sort string
1643 :returns: at most ``limit`` records matching the search criteria
1645 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1647 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1650 # display_name, name_get, name_create, name_search
1653 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1654 def _compute_display_name(self):
1655 names = dict(self.name_get())
1657 record.display_name = names.get(record.id, False)
1661 """ name_get() -> [(id, name), ...]
1663 Returns a textual representation for the records in ``self``.
1664 By default this is the value of the ``display_name`` field.
1666 :return: list of pairs ``(id, text_repr)`` for each records
1670 name = self._rec_name
1671 if name in self._fields:
1672 convert = self._fields[name].convert_to_display_name
1674 result.append((record.id, convert(record[name])))
1677 result.append((record.id, "%s,%s" % (record._name, record.id)))
1682 def name_create(self, name):
1683 """ name_create(name) -> record
1685 Create a new record by calling :meth:`~.create` with only one value
1686 provided: the display name of the new record.
1688 The new record will be initialized with any default values
1689 applicable to this model, or provided through the context. The usual
1690 behavior of :meth:`~.create` applies.
1692 :param name: display name of the record to create
1694 :return: the :meth:`~.name_get` pair value of the created record
1697 record = self.create({self._rec_name: name})
1698 return record.name_get()[0]
1700 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1704 def name_search(self, name='', args=None, operator='ilike', limit=100):
1705 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1707 Search for records that have a display name matching the given
1708 `name` pattern when compared with the given `operator`, while also
1709 matching the optional search domain (`args`).
1711 This is used for example to provide suggestions based on a partial
1712 value for a relational field. Sometimes be seen as the inverse
1713 function of :meth:`~.name_get`, but it is not guaranteed to be.
1715 This method is equivalent to calling :meth:`~.search` with a search
1716 domain based on ``display_name`` and then :meth:`~.name_get` on the
1717 result of the search.
1719 :param str name: the name pattern to match
1720 :param list args: optional search domain (see :meth:`~.search` for
1721 syntax), specifying further restrictions
1722 :param str operator: domain operator for matching `name`, such as
1723 ``'like'`` or ``'='``.
1724 :param int limit: optional max number of records to return
1726 :return: list of pairs ``(id, text_repr)`` for all matching records.
1728 return self._name_search(name, args, operator, limit=limit)
1730 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1731 # private implementation of name_search, allows passing a dedicated user
1732 # for the name_get part to solve some access rights issues
1733 args = list(args or [])
1734 # optimize out the default criterion of ``ilike ''`` that matches everything
1735 if not self._rec_name:
1736 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1737 elif not (name == '' and operator == 'ilike'):
1738 args += [(self._rec_name, operator, name)]
1739 access_rights_uid = name_get_uid or user
1740 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1741 res = self.name_get(cr, access_rights_uid, ids, context)
1744 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1747 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1749 fields = self._columns.keys() + self._inherit_fields.keys()
1750 #FIXME: collect all calls to _get_source into one SQL call.
1752 res[lang] = {'code': lang}
1754 if f in self._columns:
1755 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1757 res[lang][f] = res_trans
1759 res[lang][f] = self._columns[f].string
1760 for table in self._inherits:
1761 cols = intersect(self._inherit_fields.keys(), fields)
1762 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1765 res[lang]['code'] = lang
1766 for f in res2[lang]:
1767 res[lang][f] = res2[lang][f]
1770 def write_string(self, cr, uid, id, langs, vals, context=None):
1771 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1772 #FIXME: try to only call the translation in one SQL
1775 if field in self._columns:
1776 src = self._columns[field].string
1777 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1778 for table in self._inherits:
1779 cols = intersect(self._inherit_fields.keys(), vals)
1781 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1784 def _add_missing_default_values(self, cr, uid, values, context=None):
1785 # avoid overriding inherited values when parent is set
1787 for tables, parent_field in self._inherits.items():
1788 if parent_field in values:
1789 avoid_tables.append(tables)
1791 # compute missing fields
1792 missing_defaults = set()
1793 for field in self._columns.keys():
1794 if not field in values:
1795 missing_defaults.add(field)
1796 for field in self._inherit_fields.keys():
1797 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1798 missing_defaults.add(field)
1799 # discard magic fields
1800 missing_defaults -= set(MAGIC_COLUMNS)
1802 if missing_defaults:
1803 # override defaults with the provided values, never allow the other way around
1804 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1806 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1807 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1808 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1809 defaults[dv] = [(6, 0, defaults[dv])]
1810 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1811 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1812 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1813 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1814 defaults.update(values)
1818 def clear_caches(self):
1819 """ Clear the caches
1821 This clears the caches associated to methods decorated with
1822 ``tools.ormcache`` or ``tools.ormcache_multi``.
1825 self._ormcache.clear()
1826 self.pool._any_cache_cleared = True
1827 except AttributeError:
1831 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1832 aggregated_fields, count_field,
1833 read_group_result, read_group_order=None, context=None):
1834 """Helper method for filling in empty groups for all possible values of
1835 the field being grouped by"""
1837 # self._group_by_full should map groupable fields to a method that returns
1838 # a list of all aggregated values that we want to display for this field,
1839 # in the form of a m2o-like pair (key,label).
1840 # This is useful to implement kanban views for instance, where all columns
1841 # should be displayed even if they don't contain any record.
1843 # Grab the list of all groups that should be displayed, including all present groups
1844 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1845 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1846 read_group_order=read_group_order,
1847 access_rights_uid=openerp.SUPERUSER_ID,
1850 result_template = dict.fromkeys(aggregated_fields, False)
1851 result_template[groupby + '_count'] = 0
1852 if remaining_groupbys:
1853 result_template['__context'] = {'group_by': remaining_groupbys}
1855 # Merge the left_side (current results as dicts) with the right_side (all
1856 # possible values as m2o pairs). Both lists are supposed to be using the
1857 # same ordering, and can be merged in one pass.
1860 def append_left(left_side):
1861 grouped_value = left_side[groupby] and left_side[groupby][0]
1862 if not grouped_value in known_values:
1863 result.append(left_side)
1864 known_values[grouped_value] = left_side
1866 known_values[grouped_value].update({count_field: left_side[count_field]})
1867 def append_right(right_side):
1868 grouped_value = right_side[0]
1869 if not grouped_value in known_values:
1870 line = dict(result_template)
1871 line[groupby] = right_side
1872 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1874 known_values[grouped_value] = line
1875 while read_group_result or all_groups:
1876 left_side = read_group_result[0] if read_group_result else None
1877 right_side = all_groups[0] if all_groups else None
1878 assert left_side is None or left_side[groupby] is False \
1879 or isinstance(left_side[groupby], (tuple,list)), \
1880 'M2O-like pair expected, got %r' % left_side[groupby]
1881 assert right_side is None or isinstance(right_side, (tuple,list)), \
1882 'M2O-like pair expected, got %r' % right_side
1883 if left_side is None:
1884 append_right(all_groups.pop(0))
1885 elif right_side is None:
1886 append_left(read_group_result.pop(0))
1887 elif left_side[groupby] == right_side:
1888 append_left(read_group_result.pop(0))
1889 all_groups.pop(0) # discard right_side
1890 elif not left_side[groupby] or not left_side[groupby][0]:
1891 # left side == "Undefined" entry, not present on right_side
1892 append_left(read_group_result.pop(0))
1894 append_right(all_groups.pop(0))
1898 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1901 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1903 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1904 to the query if order should be computed against m2o field.
1905 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1906 :param aggregated_fields: list of aggregated fields in the query
1907 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1908 These dictionaries contains the qualified name of each groupby
1909 (fully qualified SQL name for the corresponding field),
1910 and the (non raw) field name.
1911 :param osv.Query query: the query under construction
1912 :return: (groupby_terms, orderby_terms)
1915 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1916 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1918 return groupby_terms, orderby_terms
1920 self._check_qorder(orderby)
1921 for order_part in orderby.split(','):
1922 order_split = order_part.split()
1923 order_field = order_split[0]
1924 if order_field in groupby_fields:
1926 if self._fields[order_field.split(':')[0]].type == 'many2one':
1927 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1929 orderby_terms.append(order_clause)
1930 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1932 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1933 orderby_terms.append(order)
1934 elif order_field in aggregated_fields:
1935 orderby_terms.append(order_part)
1937 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1938 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1939 self._name, order_part)
1940 return groupby_terms, orderby_terms
1942 def _read_group_process_groupby(self, gb, query, context):
1944 Helper method to collect important information about groupbys: raw
1945 field name, type, time informations, qualified name, ...
1947 split = gb.split(':')
1948 field_type = self._fields[split[0]].type
1949 gb_function = split[1] if len(split) == 2 else None
1950 temporal = field_type in ('date', 'datetime')
1951 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1952 qualified_field = self._inherits_join_calc(split[0], query)
1955 # Careful with week/year formats:
1956 # - yyyy (lower) must always be used, *except* for week+year formats
1957 # - YYYY (upper) must always be used for week+year format
1958 # e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
1959 # and W1 2006 for others
1961 # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
1962 # such as 2006-01-01 being formatted as "January 2005" in some locales.
1963 # Cfr: http://babel.pocoo.org/docs/dates/#date-fields
1964 'day': 'dd MMM yyyy', # yyyy = normal year
1965 'week': "'W'w YYYY", # w YYYY = ISO week-year
1966 'month': 'MMMM yyyy',
1967 'quarter': 'QQQ yyyy',
1971 'day': dateutil.relativedelta.relativedelta(days=1),
1972 'week': datetime.timedelta(days=7),
1973 'month': dateutil.relativedelta.relativedelta(months=1),
1974 'quarter': dateutil.relativedelta.relativedelta(months=3),
1975 'year': dateutil.relativedelta.relativedelta(years=1)
1978 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
1979 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
1980 if field_type == 'boolean':
1981 qualified_field = "coalesce(%s,false)" % qualified_field
1986 'display_format': display_formats[gb_function or 'month'] if temporal else None,
1987 'interval': time_intervals[gb_function or 'month'] if temporal else None,
1988 'tz_convert': tz_convert,
1989 'qualified_field': qualified_field
1992 def _read_group_prepare_data(self, key, value, groupby_dict, context):
1994 Helper method to sanitize the data received by read_group. The None
1995 values are converted to False, and the date/datetime are formatted,
1996 and corrected according to the timezones.
1998 value = False if value is None else value
1999 gb = groupby_dict.get(key)
2000 if gb and gb['type'] in ('date', 'datetime') and value:
2001 if isinstance(value, basestring):
2002 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2003 value = datetime.datetime.strptime(value, dt_format)
2004 if gb['tz_convert']:
2005 value = pytz.timezone(context['tz']).localize(value)
2008 def _read_group_get_domain(self, groupby, value):
2010 Helper method to construct the domain corresponding to a groupby and
2011 a given value. This is mostly relevant for date/datetime.
2013 if groupby['type'] in ('date', 'datetime') and value:
2014 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2015 domain_dt_begin = value
2016 domain_dt_end = value + groupby['interval']
2017 if groupby['tz_convert']:
2018 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2019 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2020 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2021 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2022 if groupby['type'] == 'many2one' and value:
2024 return [(groupby['field'], '=', value)]
2026 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2028 Helper method to format the data contained in the dictianary data by
2029 adding the domain corresponding to its values, the groupbys in the
2030 context and by properly formatting the date/datetime values.
2032 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2033 for k,v in data.iteritems():
2034 gb = groupby_dict.get(k)
2035 if gb and gb['type'] in ('date', 'datetime') and v:
2036 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2038 data['__domain'] = domain_group + domain
2039 if len(groupby) - len(annotated_groupbys) >= 1:
2040 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2044 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2046 Get the list of records in list view grouped by the given ``groupby`` fields
2048 :param cr: database cursor
2049 :param uid: current user id
2050 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2051 :param list fields: list of fields present in the list view specified on the object
2052 :param list groupby: list of groupby descriptions by which the records will be grouped.
2053 A groupby description is either a field (then it will be grouped by that field)
2054 or a string 'field:groupby_function'. Right now, the only functions supported
2055 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2056 date/datetime fields.
2057 :param int offset: optional number of records to skip
2058 :param int limit: optional max number of records to return
2059 :param dict context: context arguments, like lang, time zone.
2060 :param list orderby: optional ``order by`` specification, for
2061 overriding the natural sort ordering of the
2062 groups, see also :py:meth:`~osv.osv.osv.search`
2063 (supported only for many2one fields currently)
2064 :param bool lazy: if true, the results are only grouped by the first groupby and the
2065 remaining groupbys are put in the __context key. If false, all the groupbys are
2067 :return: list of dictionaries(one dictionary for each record) containing:
2069 * the values of fields grouped by the fields in ``groupby`` argument
2070 * __domain: list of tuples specifying the search criteria
2071 * __context: dictionary with argument like ``groupby``
2072 :rtype: [{'field_name_1': value, ...]
2073 :raise AccessError: * if user has no read rights on the requested object
2074 * if user tries to bypass access rules for read on the requested object
2078 self.check_access_rights(cr, uid, 'read')
2079 query = self._where_calc(cr, uid, domain, context=context)
2080 fields = fields or self._columns.keys()
2082 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2083 groupby_list = groupby[:1] if lazy else groupby
2084 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2085 for gb in groupby_list]
2086 groupby_fields = [g['field'] for g in annotated_groupbys]
2087 order = orderby or ','.join([g for g in groupby_list])
2088 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2090 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2091 for gb in groupby_fields:
2092 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2093 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2094 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2095 if not (gb in self._fields):
2096 # Don't allow arbitrary values, as this would be a SQL injection vector!
2097 raise except_orm(_('Invalid group_by'),
2098 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2100 aggregated_fields = [
2102 if f not in ('id', 'sequence')
2103 if f not in groupby_fields
2104 if f in self._fields
2105 if self._fields[f].type in ('integer', 'float')
2106 if getattr(self._fields[f].base_field.column, '_classic_write')
2109 field_formatter = lambda f: (self._fields[f].group_operator or 'sum', self._inherits_join_calc(f, query), f)
2110 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2112 for gb in annotated_groupbys:
2113 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2115 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2116 from_clause, where_clause, where_clause_params = query.get_sql()
2117 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2118 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2121 count_field += '_count'
2123 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2124 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2127 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2135 'table': self._table,
2136 'count_field': count_field,
2137 'extra_fields': prefix_terms(',', select_terms),
2138 'from': from_clause,
2139 'where': prefix_term('WHERE', where_clause),
2140 'groupby': prefix_terms('GROUP BY', groupby_terms),
2141 'orderby': prefix_terms('ORDER BY', orderby_terms),
2142 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2143 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2145 cr.execute(query, where_clause_params)
2146 fetched_data = cr.dictfetchall()
2148 if not groupby_fields:
2151 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2153 data_ids = [r['id'] for r in fetched_data]
2154 many2onefields = list(set(many2onefields))
2155 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2156 for d in fetched_data:
2157 d.update(data_dict[d['id']])
2159 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2160 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2161 if lazy and groupby_fields[0] in self._group_by_full:
2162 # Right now, read_group only fill results in lazy mode (by default).
2163 # If you need to have the empty groups in 'eager' mode, then the
2164 # method _read_group_fill_results need to be completely reimplemented
2166 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2167 aggregated_fields, count_field, result, read_group_order=order,
2171 def _inherits_join_add(self, current_model, parent_model_name, query):
2173 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2174 :param current_model: current model object
2175 :param parent_model_name: name of the parent model for which the clauses should be added
2176 :param query: query object on which the JOIN should be added
2178 inherits_field = current_model._inherits[parent_model_name]
2179 parent_model = self.pool[parent_model_name]
2180 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2183 def _inherits_join_calc(self, field, query):
2185 Adds missing table select and join clause(s) to ``query`` for reaching
2186 the field coming from an '_inherits' parent table (no duplicates).
2188 :param field: name of inherited field to reach
2189 :param query: query object on which the JOIN should be added
2190 :return: qualified name of field, to be used in SELECT clause
2192 current_table = self
2193 parent_alias = '"%s"' % current_table._table
2194 while field in current_table._inherit_fields and not field in current_table._columns:
2195 parent_model_name = current_table._inherit_fields[field][0]
2196 parent_table = self.pool[parent_model_name]
2197 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2198 current_table = parent_table
2199 return '%s."%s"' % (parent_alias, field)
2201 def _parent_store_compute(self, cr):
2202 if not self._parent_store:
2204 _logger.info('Computing parent left and right for table %s...', self._table)
2205 def browse_rec(root, pos=0):
2207 where = self._parent_name+'='+str(root)
2209 where = self._parent_name+' IS NULL'
2210 if self._parent_order:
2211 where += ' order by '+self._parent_order
2212 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2214 for id in cr.fetchall():
2215 pos2 = browse_rec(id[0], pos2)
2216 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2218 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2219 if self._parent_order:
2220 query += ' order by ' + self._parent_order
2223 for (root,) in cr.fetchall():
2224 pos = browse_rec(root, pos)
2225 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2228 def _update_store(self, cr, f, k):
2229 _logger.info("storing computed values of fields.function '%s'", k)
2230 ss = self._columns[k]._symbol_set
2231 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2232 cr.execute('select id from '+self._table)
2233 ids_lst = map(lambda x: x[0], cr.fetchall())
2235 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2236 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2237 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2238 for key, val in res.items():
2241 # if val is a many2one, just write the ID
2242 if type(val) == tuple:
2244 if val is not False:
2245 cr.execute(update_query, (ss[1](val), key))
2248 def _check_selection_field_value(self, field, value):
2249 """ Check whether value is among the valid values for the given
2250 selection/reference field, and raise an exception if not.
2252 field = self._fields[field]
2253 field.convert_to_cache(value, self)
2255 def _check_removed_columns(self, cr, log=False):
2256 # iterate on the database columns to drop the NOT NULL constraints
2257 # of fields which were required but have been removed (or will be added by another module)
2258 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2259 columns += MAGIC_COLUMNS
2260 cr.execute("SELECT a.attname, a.attnotnull"
2261 " FROM pg_class c, pg_attribute a"
2262 " WHERE c.relname=%s"
2263 " AND c.oid=a.attrelid"
2264 " AND a.attisdropped=%s"
2265 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2266 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2268 for column in cr.dictfetchall():
2270 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2271 column['attname'], self._table, self._name)
2272 if column['attnotnull']:
2273 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2274 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2275 self._table, column['attname'])
2277 def _save_constraint(self, cr, constraint_name, type):
2279 Record the creation of a constraint for this model, to make it possible
2280 to delete it later when the module is uninstalled. Type can be either
2281 'f' or 'u' depending on the constraint being a foreign key or not.
2283 if not self._module:
2284 # no need to save constraints for custom models as they're not part
2287 assert type in ('f', 'u')
2289 SELECT 1 FROM ir_model_constraint, ir_module_module
2290 WHERE ir_model_constraint.module=ir_module_module.id
2291 AND ir_model_constraint.name=%s
2292 AND ir_module_module.name=%s
2293 """, (constraint_name, self._module))
2296 INSERT INTO ir_model_constraint
2297 (name, date_init, date_update, module, model, type)
2298 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2299 (SELECT id FROM ir_module_module WHERE name=%s),
2300 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2301 (constraint_name, self._module, self._name, type))
2303 def _save_relation_table(self, cr, relation_table):
2305 Record the creation of a many2many for this model, to make it possible
2306 to delete it later when the module is uninstalled.
2309 SELECT 1 FROM ir_model_relation, ir_module_module
2310 WHERE ir_model_relation.module=ir_module_module.id
2311 AND ir_model_relation.name=%s
2312 AND ir_module_module.name=%s
2313 """, (relation_table, self._module))
2315 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2316 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2317 (SELECT id FROM ir_module_module WHERE name=%s),
2318 (SELECT id FROM ir_model WHERE model=%s))""",
2319 (relation_table, self._module, self._name))
2320 self.invalidate_cache(cr, SUPERUSER_ID)
2322 # checked version: for direct m2o starting from `self`
2323 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2324 assert self.is_transient() or not dest_model.is_transient(), \
2325 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2326 if self.is_transient() and not dest_model.is_transient():
2327 # TransientModel relationships to regular Models are annoying
2328 # usually because they could block deletion due to the FKs.
2329 # So unless stated otherwise we default them to ondelete=cascade.
2330 ondelete = ondelete or 'cascade'
2331 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2332 self._foreign_keys.add(fk_def)
2333 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2335 # unchecked version: for custom cases, such as m2m relationships
2336 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2337 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2338 self._foreign_keys.add(fk_def)
2339 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2341 def _drop_constraint(self, cr, source_table, constraint_name):
2342 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2344 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2345 # Find FK constraint(s) currently established for the m2o field,
2346 # and see whether they are stale or not
2347 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2348 cl2.relname as foreign_table
2349 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2350 pg_attribute as att1, pg_attribute as att2
2351 WHERE con.conrelid = cl1.oid
2352 AND cl1.relname = %s
2353 AND con.confrelid = cl2.oid
2354 AND array_lower(con.conkey, 1) = 1
2355 AND con.conkey[1] = att1.attnum
2356 AND att1.attrelid = cl1.oid
2357 AND att1.attname = %s
2358 AND array_lower(con.confkey, 1) = 1
2359 AND con.confkey[1] = att2.attnum
2360 AND att2.attrelid = cl2.oid
2361 AND att2.attname = %s
2362 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2363 constraints = cr.dictfetchall()
2365 if len(constraints) == 1:
2366 # Is it the right constraint?
2368 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2369 or cons['foreign_table'] != dest_model._table:
2370 # Wrong FK: drop it and recreate
2371 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2372 source_table, cons['constraint_name'])
2373 self._drop_constraint(cr, source_table, cons['constraint_name'])
2375 # it's all good, nothing to do!
2378 # Multiple FKs found for the same field, drop them all, and re-create
2379 for cons in constraints:
2380 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2381 source_table, cons['constraint_name'])
2382 self._drop_constraint(cr, source_table, cons['constraint_name'])
2384 # (re-)create the FK
2385 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2388 def _set_default_value_on_column(self, cr, column_name, context=None):
2389 # ideally, we should use default_get(), but it fails due to ir.values
2393 default = self._defaults.get(column_name)
2394 if callable(default):
2395 default = default(self, cr, SUPERUSER_ID, context)
2397 column = self._columns[column_name]
2398 ss = column._symbol_set
2399 db_default = ss[1](default)
2400 # Write default if non-NULL, except for booleans for which False means
2401 # the same as NULL - this saves us an expensive query on large tables.
2402 write_default = (db_default is not None if column._type != 'boolean'
2405 _logger.debug("Table '%s': setting default value of new column %s to %r",
2406 self._table, column_name, default)
2407 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2408 self._table, column_name, ss[0], column_name)
2409 cr.execute(query, (db_default,))
2410 # this is a disgrace
2413 def _auto_init(self, cr, context=None):
2416 Call _field_create and, unless _auto is False:
2418 - create the corresponding table in database for the model,
2419 - possibly add the parent columns in database,
2420 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2421 'write_date' in database if _log_access is True (the default),
2422 - report on database columns no more existing in _columns,
2423 - remove no more existing not null constraints,
2424 - alter existing database columns to match _columns,
2425 - create database tables to match _columns,
2426 - add database indices to match _columns,
2427 - save in self._foreign_keys a list a foreign keys to create (see
2431 self._foreign_keys = set()
2432 raise_on_invalid_object_name(self._name)
2435 store_compute = False
2436 stored_fields = [] # new-style stored fields with compute
2438 update_custom_fields = context.get('update_custom_fields', False)
2439 self._field_create(cr, context=context)
2440 create = not self._table_exist(cr)
2444 self._create_table(cr)
2447 cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
2448 has_rows = cr.rowcount
2451 if self._parent_store:
2452 if not self._parent_columns_exist(cr):
2453 self._create_parent_columns(cr)
2454 store_compute = True
2456 self._check_removed_columns(cr, log=False)
2458 # iterate on the "object columns"
2459 column_data = self._select_column_data(cr)
2461 for k, f in self._columns.iteritems():
2462 if k == 'id': # FIXME: maybe id should be a regular column?
2464 # Don't update custom (also called manual) fields
2465 if f.manual and not update_custom_fields:
2468 if isinstance(f, fields.one2many):
2469 self._o2m_raise_on_missing_reference(cr, f)
2471 elif isinstance(f, fields.many2many):
2472 self._m2m_raise_or_create_relation(cr, f)
2475 res = column_data.get(k)
2477 # The field is not found as-is in database, try if it
2478 # exists with an old name.
2479 if not res and hasattr(f, 'oldname'):
2480 res = column_data.get(f.oldname)
2482 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2484 column_data[k] = res
2485 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2486 self._table, f.oldname, k)
2488 # The field already exists in database. Possibly
2489 # change its type, rename it, drop it or change its
2492 f_pg_type = res['typname']
2493 f_pg_size = res['size']
2494 f_pg_notnull = res['attnotnull']
2495 if isinstance(f, fields.function) and not f.store and\
2496 not getattr(f, 'nodrop', False):
2497 _logger.info('column %s (%s) converted to a function, removed from table %s',
2498 k, f.string, self._table)
2499 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2501 _schema.debug("Table '%s': dropped column '%s' with cascade",
2505 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2510 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2511 ('varchar', 'text', 'TEXT', ''),
2512 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2513 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2514 ('timestamp', 'date', 'date', '::date'),
2515 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2516 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2518 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2520 with cr.savepoint():
2521 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2522 except psycopg2.NotSupportedError:
2523 # In place alter table cannot be done because a view is depending of this field.
2524 # Do a manual copy. This will drop the view (that will be recreated later)
2525 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2526 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2527 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2528 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2530 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2531 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2533 if (f_pg_type==c[0]) and (f._type==c[1]):
2534 if f_pg_type != f_obj_type:
2536 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2537 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2538 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2539 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2541 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2542 self._table, k, c[0], c[1])
2545 if f_pg_type != f_obj_type:
2549 newname = k + '_moved' + str(i)
2550 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2551 "WHERE c.relname=%s " \
2552 "AND a.attname=%s " \
2553 "AND c.oid=a.attrelid ", (self._table, newname))
2554 if not cr.fetchone()[0]:
2558 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2559 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2560 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2561 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2562 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2563 self._table, k, f_pg_type, f._type, newname)
2565 # if the field is required and hasn't got a NOT NULL constraint
2566 if f.required and f_pg_notnull == 0:
2568 self._set_default_value_on_column(cr, k, context=context)
2569 # add the NOT NULL constraint
2571 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2573 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2576 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2577 "If you want to have it, you should update the records and execute manually:\n"\
2578 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2579 _schema.warning(msg, self._table, k, self._table, k)
2581 elif not f.required and f_pg_notnull == 1:
2582 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2584 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2587 indexname = '%s_%s_index' % (self._table, k)
2588 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2589 res2 = cr.dictfetchall()
2590 if not res2 and f.select:
2591 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2593 if f._type == 'text':
2594 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2595 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2596 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2597 " because there is a length limit for indexable btree values!\n"\
2598 "Use a search view instead if you simply want to make the field searchable."
2599 _schema.warning(msg, self._table, f._type, k)
2600 if res2 and not f.select:
2601 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2603 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2604 _schema.debug(msg, self._table, k, f._type)
2606 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2607 dest_model = self.pool[f._obj]
2608 if dest_model._auto and dest_model._table != 'ir_actions':
2609 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2611 # The field doesn't exist in database. Create it if necessary.
2613 if not isinstance(f, fields.function) or f.store:
2614 # add the missing field
2615 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2616 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2617 _schema.debug("Table '%s': added column '%s' with definition=%s",
2618 self._table, k, get_pg_type(f)[1])
2622 self._set_default_value_on_column(cr, k, context=context)
2624 # remember the functions to call for the stored fields
2625 if isinstance(f, fields.function):
2627 if f.store is not True: # i.e. if f.store is a dict
2628 order = f.store[f.store.keys()[0]][2]
2629 todo_end.append((order, self._update_store, (f, k)))
2631 # remember new-style stored fields with compute method
2632 if k in self._fields and self._fields[k].depends:
2633 stored_fields.append(self._fields[k])
2635 # and add constraints if needed
2636 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2637 if f._obj not in self.pool:
2638 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2639 dest_model = self.pool[f._obj]
2640 ref = dest_model._table
2641 # ir_actions is inherited so foreign key doesn't work on it
2642 if dest_model._auto and ref != 'ir_actions':
2643 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2645 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2649 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2650 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2653 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2654 "Try to re-run: openerp-server --update=module\n"\
2655 "If it doesn't work, update records and execute manually:\n"\
2656 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2657 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2661 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2662 create = not bool(cr.fetchone())
2664 cr.commit() # start a new transaction
2667 self._add_sql_constraints(cr)
2670 self._execute_sql(cr)
2673 self._parent_store_compute(cr)
2677 # trigger computation of new-style stored fields with a compute
2679 _logger.info("Storing computed values of %s fields %s",
2680 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2681 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2682 recs = recs.search([])
2684 map(recs._recompute_todo, stored_fields)
2687 todo_end.append((1000, func, ()))
2691 def _auto_end(self, cr, context=None):
2692 """ Create the foreign keys recorded by _auto_init. """
2693 for t, k, r, d in self._foreign_keys:
2694 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2695 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2697 del self._foreign_keys
2700 def _table_exist(self, cr):
2701 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2705 def _create_table(self, cr):
2706 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2707 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2708 _schema.debug("Table '%s': created", self._table)
2711 def _parent_columns_exist(self, cr):
2712 cr.execute("""SELECT c.relname
2713 FROM pg_class c, pg_attribute a
2714 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2715 """, (self._table, 'parent_left'))
2719 def _create_parent_columns(self, cr):
2720 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2721 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2722 if 'parent_left' not in self._columns:
2723 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2725 _schema.debug("Table '%s': added column '%s' with definition=%s",
2726 self._table, 'parent_left', 'INTEGER')
2727 elif not self._columns['parent_left'].select:
2728 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2730 if 'parent_right' not in self._columns:
2731 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2733 _schema.debug("Table '%s': added column '%s' with definition=%s",
2734 self._table, 'parent_right', 'INTEGER')
2735 elif not self._columns['parent_right'].select:
2736 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2738 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2739 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2740 self._parent_name, self._name)
2745 def _select_column_data(self, cr):
2746 # attlen is the number of bytes necessary to represent the type when
2747 # the type has a fixed size. If the type has a varying size attlen is
2748 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2749 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2750 "FROM pg_class c,pg_attribute a,pg_type t " \
2751 "WHERE c.relname=%s " \
2752 "AND c.oid=a.attrelid " \
2753 "AND a.atttypid=t.oid", (self._table,))
2754 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2757 def _o2m_raise_on_missing_reference(self, cr, f):
2758 # TODO this check should be a method on fields.one2many.
2759 if f._obj in self.pool:
2760 other = self.pool[f._obj]
2761 # TODO the condition could use fields_get_keys().
2762 if f._fields_id not in other._columns.keys():
2763 if f._fields_id not in other._inherit_fields.keys():
2764 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2766 def _m2m_raise_or_create_relation(self, cr, f):
2767 m2m_tbl, col1, col2 = f._sql_names(self)
2768 # do not create relations for custom fields as they do not belong to a module
2769 # they will be automatically removed when dropping the corresponding ir.model.field
2770 # table name for custom relation all starts with x_, see __init__
2771 if not m2m_tbl.startswith('x_'):
2772 self._save_relation_table(cr, m2m_tbl)
2773 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2774 if not cr.dictfetchall():
2775 if f._obj not in self.pool:
2776 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2777 dest_model = self.pool[f._obj]
2778 ref = dest_model._table
2779 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2780 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2781 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2782 if not cr.fetchall():
2783 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2784 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2785 if not cr.fetchall():
2786 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2788 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2789 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2790 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2792 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2795 def _add_sql_constraints(self, cr):
2798 Modify this model's database table constraints so they match the one in
2802 def unify_cons_text(txt):
2803 return txt.lower().replace(', ',',').replace(' (','(')
2805 for (key, con, _) in self._sql_constraints:
2806 conname = '%s_%s' % (self._table, key)
2808 self._save_constraint(cr, conname, 'u')
2809 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2810 existing_constraints = cr.dictfetchall()
2814 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2815 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2816 self._table, conname, con),
2817 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2822 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2823 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2824 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2830 if not existing_constraints:
2831 # constraint does not exists:
2832 sql_actions['add']['execute'] = True
2833 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2834 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2835 # constraint exists but its definition has changed:
2836 sql_actions['drop']['execute'] = True
2837 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2838 sql_actions['add']['execute'] = True
2839 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2841 # we need to add the constraint:
2842 sql_actions = [item for item in sql_actions.values()]
2843 sql_actions.sort(key=lambda x: x['order'])
2844 for sql_action in [action for action in sql_actions if action['execute']]:
2846 cr.execute(sql_action['query'])
2848 _schema.debug(sql_action['msg_ok'])
2850 _schema.warning(sql_action['msg_err'])
2854 def _execute_sql(self, cr):
2855 """ Execute the SQL code from the _sql attribute (if any)."""
2856 if hasattr(self, "_sql"):
2857 for line in self._sql.split(';'):
2858 line2 = line.replace('\n', '').strip()
2864 # Update objects that uses this one to update their _inherits fields
2868 def _inherits_reload(cls):
2869 """ Recompute the _inherit_fields mapping, and inherited fields. """
2872 for parent_model, parent_field in cls._inherits.iteritems():
2873 parent = cls.pool[parent_model]
2874 # old-api struct for _inherit_fields
2875 for name, column in parent._columns.iteritems():
2876 struct[name] = (parent_model, parent_field, column, parent_model)
2877 for name, source in parent._inherit_fields.iteritems():
2878 struct[name] = (parent_model, parent_field, source[2], source[3])
2879 # new-api fields for _fields
2880 for name, field in parent._fields.iteritems():
2881 fields[name] = field.new(
2883 related=(parent_field, name),
2888 cls._inherit_fields = struct
2889 cls._all_columns = cls._get_column_infos()
2891 # add inherited fields that are not redefined locally
2892 for name, field in fields.iteritems():
2893 if name not in cls._fields:
2894 cls._add_field(name, field)
2897 def _get_column_infos(cls):
2898 """Returns a dict mapping all fields names (direct fields and
2899 inherited field via _inherits) to a ``column_info`` struct
2900 giving detailed columns """
2902 # do not inverse for loops, since local fields may hide inherited ones!
2903 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2904 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2905 for k, col in cls._columns.iteritems():
2906 result[k] = fields.column_info(k, col)
2910 def _inherits_check(cls):
2911 for table, field_name in cls._inherits.items():
2912 if field_name not in cls._columns:
2913 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2914 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2915 required=True, ondelete="cascade")
2916 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2917 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2918 cls._columns[field_name].required = True
2919 cls._columns[field_name].ondelete = "cascade"
2921 # reflect fields with delegate=True in dictionary cls._inherits
2922 for field in cls._fields.itervalues():
2923 if field.type == 'many2one' and not field.related and field.delegate:
2924 if not field.required:
2925 _logger.warning("Field %s with delegate=True must be required.", field)
2926 field.required = True
2927 if field.ondelete.lower() not in ('cascade', 'restrict'):
2928 field.ondelete = 'cascade'
2929 cls._inherits[field.comodel_name] = field.name
2932 def _prepare_setup_fields(self):
2933 """ Prepare the setup of fields once the models have been loaded. """
2934 type(self)._setup_done = False
2935 for name, field in self._fields.items():
2937 del self._fields[name]
2942 def _setup_fields(self):
2943 """ Setup the fields (dependency triggers, etc). """
2947 cls._setup_done = True
2949 # first make sure that parent models are all set up
2950 for parent in self._inherits:
2951 self.env[parent]._setup_fields()
2953 # retrieve custom fields
2954 if not self._context.get('_setup_fields_partial'):
2955 cls._init_manual_fields(self._cr)
2957 # retrieve inherited fields
2958 cls._inherits_check()
2959 cls._inherits_reload()
2962 for field in cls._fields.itervalues():
2963 field.setup(self.env)
2965 # update columns (fields may have changed)
2966 for name, field in cls._fields.iteritems():
2968 cls._columns[name] = field.to_column()
2970 # group fields by compute to determine field.computed_fields
2971 fields_by_compute = defaultdict(list)
2972 for field in cls._fields.itervalues():
2974 field.computed_fields = fields_by_compute[field.compute]
2975 field.computed_fields.append(field)
2977 field.computed_fields = []
2980 for func in cls._constraint_methods:
2981 if not all(name in cls._fields for name in func._constrains):
2982 _logger.warning("@constrains%r parameters must be field names", func._constrains)
2983 for name in cls._onchange_methods:
2984 if name not in cls._fields:
2985 func = cls._onchange_methods[name]
2986 _logger.warning("@onchange%r parameters must be field names", func._onchange)
2989 for name in cls._defaults:
2990 assert name in cls._fields, \
2991 "Model %s has a default for nonexiting field %s" % (cls._name, name)
2995 assert cls._rec_name in cls._fields, \
2996 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
2997 elif 'name' in cls._fields:
2998 cls._rec_name = 'name'
2999 elif 'x_name' in cls._fields:
3000 cls._rec_name = 'x_name'
3002 def fields_get(self, cr, user, allfields=None, context=None, write_access=True, attributes=None):
3003 """ fields_get([fields][, attributes])
3005 Return the definition of each field.
3007 The returned value is a dictionary (indiced by field name) of
3008 dictionaries. The _inherits'd fields are included. The string, help,
3009 and selection (if present) attributes are translated.
3011 :param allfields: list of fields to document, all if empty or not provided
3012 :param attributes: list of description attributes to return for each field, all if empty or not provided
3014 recs = self.browse(cr, user, [], context)
3016 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3017 readonly = not (has_access('write') or has_access('create'))
3020 for fname, field in self._fields.iteritems():
3021 if allfields and fname not in allfields:
3023 if not field.setup_done:
3025 if field.groups and not recs.user_has_groups(field.groups):
3028 description = field.get_description(recs.env)
3030 description['readonly'] = True
3031 description['states'] = {}
3033 description = {k: v for k, v in description.iteritems()
3035 res[fname] = description
3039 def get_empty_list_help(self, cr, user, help, context=None):
3040 """ Generic method giving the help message displayed when having
3041 no result to display in a list or kanban view. By default it returns
3042 the help given in parameter that is generally the help message
3043 defined in the action.
3047 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3049 Check the user access rights on the given fields. This raises Access
3050 Denied if the user does not have the rights. Otherwise it returns the
3051 fields (as is if the fields is not falsy, or the readable/writable
3052 fields if fields is falsy).
3054 if user == SUPERUSER_ID:
3055 return fields or list(self._fields)
3058 """ determine whether user has access to field `fname` """
3059 field = self._fields.get(fname)
3060 if field and field.groups:
3061 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3066 fields = filter(valid, self._fields)
3068 invalid_fields = set(filter(lambda name: not valid(name), fields))
3070 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3071 operation, user, self._name, ', '.join(invalid_fields))
3073 _('The requested operation cannot be completed due to security restrictions. '
3074 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3075 (self._description, operation))
3079 # add explicit old-style implementation to read()
3081 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3082 records = self.browse(cr, user, ids, context)
3083 result = BaseModel.read(records, fields, load=load)
3084 return result if isinstance(ids, list) else (bool(result) and result[0])
3086 # new-style implementation of read()
3088 def read(self, fields=None, load='_classic_read'):
3091 Reads the requested fields for the records in `self`, low-level/RPC
3092 method. In Python code, prefer :meth:`~.browse`.
3094 :param fields: list of field names to return (default is all fields)
3095 :return: a list of dictionaries mapping field names to their values,
3096 with one dictionary per record
3097 :raise AccessError: if user has no read rights on some of the given
3100 # check access rights
3101 self.check_access_rights('read')
3102 fields = self.check_field_access_rights('read', fields)
3104 # split fields into stored and computed fields
3105 stored, computed = [], []
3107 if name in self._columns:
3109 elif name in self._fields:
3110 computed.append(name)
3112 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3114 # fetch stored fields from the database to the cache
3115 self._read_from_database(stored)
3117 # retrieve results from records; this takes values from the cache and
3118 # computes remaining fields
3120 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3121 use_name_get = (load == '_classic_read')
3124 values = {'id': record.id}
3125 for name, field in name_fields:
3126 values[name] = field.convert_to_read(record[name], use_name_get)
3127 result.append(values)
3128 except MissingError:
3134 def _prefetch_field(self, field):
3135 """ Read from the database in order to fetch `field` (:class:`Field`
3136 instance) for `self` in cache.
3138 # fetch the records of this model without field_name in their cache
3139 records = self._in_cache_without(field)
3141 if len(records) > PREFETCH_MAX:
3142 records = records[:PREFETCH_MAX] | self
3144 # determine which fields can be prefetched
3145 if not self.env.in_draft and \
3146 self._context.get('prefetch_fields', True) and \
3147 self._columns[field.name]._prefetch:
3148 # prefetch all classic and many2one fields that the user can access
3150 for fname, fcolumn in self._columns.iteritems()
3151 if fcolumn._prefetch
3152 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3155 fnames = {field.name}
3157 # important: never prefetch fields to recompute!
3158 get_recs_todo = self.env.field_todo
3159 for fname in list(fnames):
3160 if get_recs_todo(self._fields[fname]):
3161 if fname == field.name:
3162 records -= get_recs_todo(field)
3164 fnames.discard(fname)
3166 # fetch records with read()
3167 assert self in records and field.name in fnames
3170 result = records.read(list(fnames), load='_classic_write')
3174 # check the cache, and update it if necessary
3175 if not self._cache.contains(field):
3176 for values in result:
3177 record = self.browse(values.pop('id'))
3178 record._cache.update(record._convert_to_cache(values, validate=False))
3179 if not self._cache.contains(field):
3180 e = AccessError("No value found for %s.%s" % (self, field.name))
3181 self._cache[field] = FailedValue(e)
3184 def _read_from_database(self, field_names):
3185 """ Read the given fields of the records in `self` from the database,
3186 and store them in cache. Access errors are also stored in cache.
3189 cr, user, context = env.args
3191 # FIXME: The query construction needs to be rewritten using the internal Query
3192 # object, as in search(), to avoid ambiguous column references when
3193 # reading/sorting on a table that is auto_joined to another table with
3194 # common columns (e.g. the magical columns)
3196 # Construct a clause for the security rules.
3197 # 'tables' holds the list of tables necessary for the SELECT, including
3198 # the ir.rule clauses, and contains at least self._table.
3199 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3201 # determine the fields that are stored as columns in self._table
3202 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3204 # we need fully-qualified column names in case len(tables) > 1
3206 if isinstance(self._columns.get(f), fields.binary) and \
3207 context.get('bin_size_%s' % f, context.get('bin_size')):
3208 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3209 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3211 return '%s."%s"' % (self._table, f)
3212 qual_names = map(qualify, set(fields_pre + ['id']))
3214 query = """ SELECT %(qual_names)s FROM %(tables)s
3215 WHERE %(table)s.id IN %%s AND (%(extra)s)
3218 'qual_names': ",".join(qual_names),
3219 'tables': ",".join(tables),
3220 'table': self._table,
3221 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3222 'order': self._parent_order or self._order,
3226 for sub_ids in cr.split_for_in_conditions(self.ids):
3227 cr.execute(query, [tuple(sub_ids)] + rule_params)
3228 result.extend(cr.dictfetchall())
3230 ids = [vals['id'] for vals in result]
3233 # translate the fields if necessary
3234 if context.get('lang'):
3235 ir_translation = env['ir.translation']
3236 for f in fields_pre:
3237 if self._columns[f].translate:
3238 #TODO: optimize out of this loop
3239 res_trans = ir_translation._get_ids(
3240 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3242 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3244 # apply the symbol_get functions of the fields we just read
3245 for f in fields_pre:
3246 symbol_get = self._columns[f]._symbol_get
3249 vals[f] = symbol_get(vals[f])
3251 # store result in cache for POST fields
3253 record = self.browse(vals['id'])
3254 record._cache.update(record._convert_to_cache(vals, validate=False))
3256 # determine the fields that must be processed now
3257 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3259 # Compute POST fields, grouped by multi
3260 by_multi = defaultdict(list)
3261 for f in fields_post:
3262 by_multi[self._columns[f]._multi].append(f)
3264 for multi, fs in by_multi.iteritems():
3266 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3267 assert res2 is not None, \
3268 'The function field "%s" on the "%s" model returned None\n' \
3269 '(a dictionary was expected).' % (fs[0], self._name)
3271 # TOCHECK : why got string instend of dict in python2.6
3272 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3273 multi_fields = res2.get(vals['id'], {})
3276 vals[f] = multi_fields.get(f, [])
3279 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3282 vals[f] = res2[vals['id']]
3286 # Warn about deprecated fields now that fields_pre and fields_post are computed
3287 for f in field_names:
3288 column = self._columns[f]
3289 if column.deprecated:
3290 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3292 # store result in cache
3294 record = self.browse(vals.pop('id'))
3295 record._cache.update(record._convert_to_cache(vals, validate=False))
3297 # store failed values in cache for the records that could not be read
3298 fetched = self.browse(ids)
3299 missing = self - fetched
3301 extras = fetched - self
3304 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3305 ', '.join(map(repr, missing._ids)),
3306 ', '.join(map(repr, extras._ids)),
3308 # store an access error exception in existing records
3310 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3311 (self._name, 'read')
3313 forbidden = missing.exists()
3314 forbidden._cache.update(FailedValue(exc))
3315 # store a missing error exception in non-existing records
3317 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3319 (missing - forbidden)._cache.update(FailedValue(exc))
3322 def get_metadata(self):
3324 Returns some metadata about the given records.
3326 :return: list of ownership dictionaries for each requested record
3327 :rtype: list of dictionaries with the following keys:
3330 * create_uid: user who created the record
3331 * create_date: date when the record was created
3332 * write_uid: last user who changed the record
3333 * write_date: date of the last change to the record
3334 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3337 if self._log_access:
3338 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3339 quoted_table = '"%s"' % self._table
3340 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3341 query = '''SELECT %s, __imd.module, __imd.name
3342 FROM %s LEFT JOIN ir_model_data __imd
3343 ON (__imd.model = %%s and __imd.res_id = %s.id)
3344 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3345 self._cr.execute(query, (self._name, tuple(self.ids)))
3346 res = self._cr.dictfetchall()
3348 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3349 names = dict(self.env['res.users'].browse(uids).name_get())
3353 value = r[key] = r[key] or False
3354 if key in ('write_uid', 'create_uid') and value in names:
3355 r[key] = (value, names[value])
3356 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3357 del r['name'], r['module']
3360 def _check_concurrency(self, cr, ids, context):
3363 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3365 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3366 for sub_ids in cr.split_for_in_conditions(ids):
3369 id_ref = "%s,%s" % (self._name, id)
3370 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3372 ids_to_check.extend([id, update_date])
3373 if not ids_to_check:
3375 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3378 # mention the first one only to keep the error message readable
3379 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3381 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3382 """Verify the returned rows after applying record rules matches
3383 the length of `ids`, and raise an appropriate exception if it does not.
3387 ids, result_ids = set(ids), set(result_ids)
3388 missing_ids = ids - result_ids
3390 # Attempt to distinguish record rule restriction vs deleted records,
3391 # to provide a more specific error message - check if the missinf
3392 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3393 forbidden_ids = [x[0] for x in cr.fetchall()]
3395 # the missing ids are (at least partially) hidden by access rules
3396 if uid == SUPERUSER_ID:
3398 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3399 raise except_orm(_('Access Denied'),
3400 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3401 (self._description, operation))
3403 # If we get here, the missing_ids are not in the database
3404 if operation in ('read','unlink'):
3405 # No need to warn about deleting an already deleted record.
3406 # And no error when reading a record that was deleted, to prevent spurious
3407 # errors for non-transactional search/read sequences coming from clients
3409 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3410 raise except_orm(_('Missing document(s)'),
3411 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3414 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3415 """Verifies that the operation given by ``operation`` is allowed for the user
3416 according to the access rights."""
3417 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3419 def check_access_rule(self, cr, uid, ids, operation, context=None):
3420 """Verifies that the operation given by ``operation`` is allowed for the user
3421 according to ir.rules.
3423 :param operation: one of ``write``, ``unlink``
3424 :raise except_orm: * if current ir.rules do not permit this operation.
3425 :return: None if the operation is allowed
3427 if uid == SUPERUSER_ID:
3430 if self.is_transient():
3431 # Only one single implicit access rule for transient models: owner only!
3432 # This is ok to hardcode because we assert that TransientModels always
3433 # have log_access enabled so that the create_uid column is always there.
3434 # And even with _inherits, these fields are always present in the local
3435 # table too, so no need for JOINs.
3436 cr.execute("""SELECT distinct create_uid
3438 WHERE id IN %%s""" % self._table, (tuple(ids),))
3439 uids = [x[0] for x in cr.fetchall()]
3440 if len(uids) != 1 or uids[0] != uid:
3441 raise except_orm(_('Access Denied'),
3442 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3444 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3446 where_clause = ' and ' + ' and '.join(where_clause)
3447 for sub_ids in cr.split_for_in_conditions(ids):
3448 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3449 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3450 [sub_ids] + where_params)
3451 returned_ids = [x['id'] for x in cr.dictfetchall()]
3452 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3454 def create_workflow(self, cr, uid, ids, context=None):
3455 """Create a workflow instance for each given record IDs."""
3456 from openerp import workflow
3458 workflow.trg_create(uid, self._name, res_id, cr)
3459 # self.invalidate_cache(cr, uid, context=context) ?
3462 def delete_workflow(self, cr, uid, ids, context=None):
3463 """Delete the workflow instances bound to the given record IDs."""
3464 from openerp import workflow
3466 workflow.trg_delete(uid, self._name, res_id, cr)
3467 self.invalidate_cache(cr, uid, context=context)
3470 def step_workflow(self, cr, uid, ids, context=None):
3471 """Reevaluate the workflow instances of the given record IDs."""
3472 from openerp import workflow
3474 workflow.trg_write(uid, self._name, res_id, cr)
3475 # self.invalidate_cache(cr, uid, context=context) ?
3478 def signal_workflow(self, cr, uid, ids, signal, context=None):
3479 """Send given workflow signal and return a dict mapping ids to workflow results"""
3480 from openerp import workflow
3483 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3484 # self.invalidate_cache(cr, uid, context=context) ?
3487 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3488 """ Rebind the workflow instance bound to the given 'old' record IDs to
3489 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3491 from openerp import workflow
3492 for old_id, new_id in old_new_ids:
3493 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3494 self.invalidate_cache(cr, uid, context=context)
3497 def unlink(self, cr, uid, ids, context=None):
3500 Deletes the records of the current set
3502 :raise AccessError: * if user has no unlink rights on the requested object
3503 * if user tries to bypass access rules for unlink on the requested object
3504 :raise UserError: if the record is default property for other records
3509 if isinstance(ids, (int, long)):
3512 result_store = self._store_get_values(cr, uid, ids, self._fields.keys(), context)
3514 # for recomputing new-style fields
3515 recs = self.browse(cr, uid, ids, context)
3516 recs.modified(self._fields)
3518 self._check_concurrency(cr, ids, context)
3520 self.check_access_rights(cr, uid, 'unlink')
3522 ir_property = self.pool.get('ir.property')
3524 # Check if the records are used as default properties.
3525 domain = [('res_id', '=', False),
3526 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3528 if ir_property.search(cr, uid, domain, context=context):
3529 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3531 # Delete the records' properties.
3532 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3533 ir_property.unlink(cr, uid, property_ids, context=context)
3535 self.delete_workflow(cr, uid, ids, context=context)
3537 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3538 pool_model_data = self.pool.get('ir.model.data')
3539 ir_values_obj = self.pool.get('ir.values')
3540 ir_attachment_obj = self.pool.get('ir.attachment')
3541 for sub_ids in cr.split_for_in_conditions(ids):
3542 cr.execute('delete from ' + self._table + ' ' \
3543 'where id IN %s', (sub_ids,))
3545 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3546 # as these are not connected with real database foreign keys, and would be dangling references.
3547 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3548 # to avoid possible side-effects during admin calls.
3549 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3550 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3551 # Step 2. Marching towards the real deletion of referenced records
3553 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3555 # For the same reason, removing the record relevant to ir_values
3556 ir_value_ids = ir_values_obj.search(cr, uid,
3557 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3560 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3562 # For the same reason, removing the record relevant to ir_attachment
3563 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3564 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3565 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3566 if ir_attachment_ids:
3567 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3569 # invalidate the *whole* cache, since the orm does not handle all
3570 # changes made in the database, like cascading delete!
3571 recs.invalidate_cache()
3573 for order, obj_name, store_ids, fields in result_store:
3574 if obj_name == self._name:
3575 effective_store_ids = set(store_ids) - set(ids)
3577 effective_store_ids = store_ids
3578 if effective_store_ids:
3579 obj = self.pool[obj_name]
3580 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3581 rids = map(lambda x: x[0], cr.fetchall())
3583 obj._store_set_values(cr, uid, rids, fields, context)
3585 # recompute new-style fields
3594 def write(self, vals):
3597 Updates all records in the current set with the provided values.
3599 :param dict vals: fields to update and the value to set on them e.g::
3601 {'foo': 1, 'bar': "Qux"}
3603 will set the field ``foo`` to ``1`` and the field ``bar`` to
3604 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3606 :raise AccessError: * if user has no write rights on the requested object
3607 * if user tries to bypass access rules for write on the requested object
3608 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3609 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3611 * For numeric fields (:class:`~openerp.fields.Integer`,
3612 :class:`~openerp.fields.Float`) the value should be of the
3614 * For :class:`~openerp.fields.Boolean`, the value should be a
3615 :class:`python:bool`
3616 * For :class:`~openerp.fields.Selection`, the value should match the
3617 selection values (generally :class:`python:str`, sometimes
3618 :class:`python:int`)
3619 * For :class:`~openerp.fields.Many2one`, the value should be the
3620 database identifier of the record to set
3621 * Other non-relational fields use a string for value
3625 for historical and compatibility reasons,
3626 :class:`~openerp.fields.Date` and
3627 :class:`~openerp.fields.Datetime` fields use strings as values
3628 (written and read) rather than :class:`~python:datetime.date` or
3629 :class:`~python:datetime.datetime`. These date strings are
3630 UTC-only and formatted according to
3631 :const:`openerp.tools.misc.DEFAULT_SERVER_DATE_FORMAT` and
3632 :const:`openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT`
3633 * .. _openerp/models/relationals/format:
3635 :class:`~openerp.fields.One2many` and
3636 :class:`~openerp.fields.Many2many` use a special "commands" format to
3637 manipulate the set of records stored in/associated with the field.
3639 This format is a list of triplets executed sequentially, where each
3640 triplet is a command to execute on the set of records. Not all
3641 commands apply in all situations. Possible commands are:
3644 adds a new record created from the provided ``value`` dict.
3646 updates an existing record of id ``id`` with the values in
3647 ``values``. Can not be used in :meth:`~.create`.
3649 removes the record of id ``id`` from the set, then deletes it
3650 (from the database). Can not be used in :meth:`~.create`.
3652 removes the record of id ``id`` from the set, but does not
3653 delete it. Can not be used on
3654 :class:`~openerp.fields.One2many`. Can not be used in
3657 adds an existing record of id ``id`` to the set. Can not be
3658 used on :class:`~openerp.fields.One2many`.
3660 removes all records from the set, equivalent to using the
3661 command ``3`` on every record explicitly. Can not be used on
3662 :class:`~openerp.fields.One2many`. Can not be used in
3665 replaces all existing records in the set by the ``ids`` list,
3666 equivalent to using the command ``5`` followed by a command
3667 ``4`` for each ``id`` in ``ids``. Can not be used on
3668 :class:`~openerp.fields.One2many`.
3670 .. note:: Values marked as ``_`` in the list above are ignored and
3671 can be anything, generally ``0`` or ``False``.
3676 self._check_concurrency(self._ids)
3677 self.check_access_rights('write')
3679 # No user-driven update of these columns
3680 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3681 vals.pop(field, None)
3683 # split up fields into old-style and pure new-style ones
3684 old_vals, new_vals, unknown = {}, {}, []
3685 for key, val in vals.iteritems():
3686 field = self._fields.get(key)
3688 if field.column or field.inherited:
3690 if field.inverse and not field.inherited:
3696 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3698 # write old-style fields with (low-level) method _write
3700 self._write(old_vals)
3702 # put the values of pure new-style fields into cache, and inverse them
3705 record._cache.update(record._convert_to_cache(new_vals, update=True))
3706 for key in new_vals:
3707 self._fields[key].determine_inverse(self)
3711 def _write(self, cr, user, ids, vals, context=None):
3712 # low-level implementation of write()
3717 self.check_field_access_rights(cr, user, 'write', vals.keys())
3718 deleted_related = defaultdict(list)
3719 for field in vals.keys():
3721 if field in self._columns:
3722 fobj = self._columns[field]
3723 elif field in self._inherit_fields:
3724 fobj = self._inherit_fields[field][2]
3727 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3728 for wtuple in vals[field]:
3729 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3730 deleted_related[fobj._obj].append(wtuple[1])
3735 for group in groups:
3736 module = group.split(".")[0]
3737 grp = group.split(".")[1]
3738 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3739 (grp, module, 'res.groups', user))
3740 readonly = cr.fetchall()
3741 if readonly[0][0] >= 1:
3748 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3750 # for recomputing new-style fields
3751 recs = self.browse(cr, user, ids, context)
3752 modified_fields = list(vals)
3753 if self._log_access:
3754 modified_fields += ['write_date', 'write_uid']
3755 recs.modified(modified_fields)
3757 parents_changed = []
3758 parent_order = self._parent_order or self._order
3759 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3760 # The parent_left/right computation may take up to
3761 # 5 seconds. No need to recompute the values if the
3762 # parent is the same.
3763 # Note: to respect parent_order, nodes must be processed in
3764 # order, so ``parents_changed`` must be ordered properly.
3765 parent_val = vals[self._parent_name]
3767 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3768 (self._table, self._parent_name, self._parent_name, parent_order)
3769 cr.execute(query, (tuple(ids), parent_val))
3771 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3772 (self._table, self._parent_name, parent_order)
3773 cr.execute(query, (tuple(ids),))
3774 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3776 updates = [] # list of (column, expr) or (column, pattern, value)
3780 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3782 ffield = self._fields.get(field)
3783 if ffield and ffield.deprecated:
3784 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, ffield.deprecated)
3785 if field in self._columns:
3786 column = self._columns[field]
3787 if hasattr(column, 'selection') and vals[field]:
3788 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3789 if column._classic_write and not hasattr(column, '_fnct_inv'):
3790 if (not totranslate) or not column.translate:
3791 updates.append((field, '%s', column._symbol_set[1](vals[field])))
3792 direct.append(field)
3794 upd_todo.append(field)
3796 updend.append(field)
3798 if self._log_access:
3799 updates.append(('write_uid', '%s', user))
3800 updates.append(('write_date', "(now() at time zone 'UTC')"))
3801 direct.append('write_uid')
3802 direct.append('write_date')
3805 self.check_access_rule(cr, user, ids, 'write', context=context)
3806 query = 'UPDATE "%s" SET %s WHERE id IN %%s' % (
3807 self._table, ','.join('"%s"=%s' % u[:2] for u in updates),
3809 params = tuple(u[2] for u in updates if len(u) > 2)
3810 for sub_ids in cr.split_for_in_conditions(ids):
3811 cr.execute(query, params + (sub_ids,))
3812 if cr.rowcount != len(sub_ids):
3813 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3818 if self._columns[f].translate:
3819 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3822 # Inserting value to DB
3823 context_wo_lang = dict(context, lang=None)
3824 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3825 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3827 # invalidate and mark new-style fields to recompute; do this before
3828 # setting other fields, because it can require the value of computed
3829 # fields, e.g., a one2many checking constraints on records
3830 recs.modified(direct)
3832 # call the 'set' method of fields which are not classic_write
3833 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3835 # default element in context must be removed when call a one2many or many2many
3836 rel_context = context.copy()
3837 for c in context.items():
3838 if c[0].startswith('default_'):
3839 del rel_context[c[0]]
3841 for field in upd_todo:
3843 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3845 # for recomputing new-style fields
3846 recs.modified(upd_todo)
3848 unknown_fields = updend[:]
3849 for table in self._inherits:
3850 col = self._inherits[table]
3852 for sub_ids in cr.split_for_in_conditions(ids):
3853 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3854 'where id IN %s', (sub_ids,))
3855 nids.extend([x[0] for x in cr.fetchall()])
3859 if self._inherit_fields[val][0] == table:
3861 unknown_fields.remove(val)
3863 self.pool[table].write(cr, user, nids, v, context)
3867 'No such field(s) in model %s: %s.',
3868 self._name, ', '.join(unknown_fields))
3870 # check Python constraints
3871 recs._validate_fields(vals)
3873 # TODO: use _order to set dest at the right position and not first node of parent
3874 # We can't defer parent_store computation because the stored function
3875 # fields that are computer may refer (directly or indirectly) to
3876 # parent_left/right (via a child_of domain)
3879 self.pool._init_parent[self._name] = True
3881 order = self._parent_order or self._order
3882 parent_val = vals[self._parent_name]
3884 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3886 clause, params = '%s IS NULL' % (self._parent_name,), ()
3888 for id in parents_changed:
3889 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3890 pleft, pright = cr.fetchone()
3891 distance = pright - pleft + 1
3893 # Positions of current siblings, to locate proper insertion point;
3894 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3895 # after each update, in case several nodes are sequentially inserted one
3896 # next to the other (i.e computed incrementally)
3897 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3898 parents = cr.fetchall()
3900 # Find Position of the element
3902 for (parent_pright, parent_id) in parents:
3905 position = parent_pright and parent_pright + 1 or 1
3907 # It's the first node of the parent
3912 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3913 position = cr.fetchone()[0] + 1
3915 if pleft < position <= pright:
3916 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3918 if pleft < position:
3919 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3920 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3921 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3923 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3924 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3925 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3926 recs.invalidate_cache(['parent_left', 'parent_right'])
3928 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3932 for order, model_name, ids_to_update, fields_to_recompute in result:
3933 key = (model_name, tuple(fields_to_recompute))
3934 done.setdefault(key, {})
3935 # avoid to do several times the same computation
3937 for id in ids_to_update:
3938 if id not in done[key]:
3939 done[key][id] = True
3940 if id not in deleted_related[model_name]:
3942 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3944 # recompute new-style fields
3945 if context.get('recompute', True):
3948 self.step_workflow(cr, user, ids, context=context)
3952 # TODO: Should set perm to user.xxx
3955 @api.returns('self', lambda value: value.id)
3956 def create(self, vals):
3957 """ create(vals) -> record
3959 Creates a new record for the model.
3961 The new record is initialized using the values from ``vals`` and
3962 if necessary those from :meth:`~.default_get`.
3965 values for the model's fields, as a dictionary::
3967 {'field_name': field_value, ...}
3969 see :meth:`~.write` for details
3970 :return: new record created
3971 :raise AccessError: * if user has no create rights on the requested object
3972 * if user tries to bypass access rules for create on the requested object
3973 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3974 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3976 self.check_access_rights('create')
3978 # add missing defaults, and drop fields that may not be set by user
3979 vals = self._add_missing_default_values(vals)
3980 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3981 vals.pop(field, None)
3983 # split up fields into old-style and pure new-style ones
3984 old_vals, new_vals, unknown = {}, {}, []
3985 for key, val in vals.iteritems():
3986 field = self._fields.get(key)
3988 if field.column or field.inherited:
3990 if field.inverse and not field.inherited:
3996 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3998 # create record with old-style fields
3999 record = self.browse(self._create(old_vals))
4001 # put the values of pure new-style fields into cache, and inverse them
4002 record._cache.update(record._convert_to_cache(new_vals))
4003 for key in new_vals:
4004 self._fields[key].determine_inverse(record)
4008 def _create(self, cr, user, vals, context=None):
4009 # low-level implementation of create()
4013 if self.is_transient():
4014 self._transient_vacuum(cr, user)
4017 for v in self._inherits:
4018 if self._inherits[v] not in vals:
4021 tocreate[v] = {'id': vals[self._inherits[v]]}
4024 # list of column assignments defined as tuples like:
4025 # (column_name, format_string, column_value)
4026 # (column_name, sql_formula)
4027 # Those tuples will be used by the string formatting for the INSERT
4029 ('id', "nextval('%s')" % self._sequence),
4034 for v in vals.keys():
4035 if v in self._inherit_fields and v not in self._columns:
4036 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4037 tocreate[table][v] = vals[v]
4040 if (v not in self._inherit_fields) and (v not in self._columns):
4042 unknown_fields.append(v)
4045 'No such field(s) in model %s: %s.',
4046 self._name, ', '.join(unknown_fields))
4048 for table in tocreate:
4049 if self._inherits[table] in vals:
4050 del vals[self._inherits[table]]
4052 record_id = tocreate[table].pop('id', None)
4054 if record_id is None or not record_id:
4055 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4057 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4059 updates.append((self._inherits[table], '%s', record_id))
4061 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4062 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4064 for bool_field in bool_fields:
4065 if bool_field not in vals:
4066 vals[bool_field] = False
4068 for field in vals.keys():
4070 if field in self._columns:
4071 fobj = self._columns[field]
4073 fobj = self._inherit_fields[field][2]
4079 for group in groups:
4080 module = group.split(".")[0]
4081 grp = group.split(".")[1]
4082 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4083 (grp, module, 'res.groups', user))
4084 readonly = cr.fetchall()
4085 if readonly[0][0] >= 1:
4088 elif readonly[0][0] == 0:
4096 current_field = self._columns[field]
4097 if current_field._classic_write:
4098 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4100 #for the function fields that receive a value, we set them directly in the database
4101 #(they may be required), but we also need to trigger the _fct_inv()
4102 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4103 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4104 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4105 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4106 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4107 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4108 #after the release but, definitively, the behavior shouldn't be different for related and function
4110 upd_todo.append(field)
4112 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4113 #related. See the above TODO comment for further explanations.
4114 if not isinstance(current_field, fields.related):
4115 upd_todo.append(field)
4116 if field in self._columns \
4117 and hasattr(current_field, 'selection') \
4119 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4120 if self._log_access:
4121 updates.append(('create_uid', '%s', user))
4122 updates.append(('write_uid', '%s', user))
4123 updates.append(('create_date', "(now() at time zone 'UTC')"))
4124 updates.append(('write_date', "(now() at time zone 'UTC')"))
4126 # the list of tuples used in this formatting corresponds to
4127 # tuple(field_name, format, value)
4128 # In some case, for example (id, create_date, write_date) we does not
4129 # need to read the third value of the tuple, because the real value is
4130 # encoded in the second value (the format).
4132 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4134 ', '.join('"%s"' % u[0] for u in updates),
4135 ', '.join(u[1] for u in updates)
4137 tuple([u[2] for u in updates if len(u) > 2])
4140 id_new, = cr.fetchone()
4141 recs = self.browse(cr, user, id_new, context)
4143 if self._parent_store and not context.get('defer_parent_store_computation'):
4145 self.pool._init_parent[self._name] = True
4147 parent = vals.get(self._parent_name, False)
4149 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4151 result_p = cr.fetchall()
4152 for (pleft,) in result_p:
4157 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4158 pleft_old = cr.fetchone()[0]
4161 cr.execute('select max(parent_right) from '+self._table)
4162 pleft = cr.fetchone()[0] or 0
4163 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4164 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4165 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4166 recs.invalidate_cache(['parent_left', 'parent_right'])
4168 # invalidate and mark new-style fields to recompute; do this before
4169 # setting other fields, because it can require the value of computed
4170 # fields, e.g., a one2many checking constraints on records
4171 recs.modified([u[0] for u in updates])
4173 # call the 'set' method of fields which are not classic_write
4174 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4176 # default element in context must be remove when call a one2many or many2many
4177 rel_context = context.copy()
4178 for c in context.items():
4179 if c[0].startswith('default_'):
4180 del rel_context[c[0]]
4183 for field in upd_todo:
4184 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4186 # for recomputing new-style fields
4187 recs.modified(upd_todo)
4189 # check Python constraints
4190 recs._validate_fields(vals)
4192 if context.get('recompute', True):
4193 result += self._store_get_values(cr, user, [id_new],
4194 list(set(vals.keys() + self._inherits.values())),
4198 for order, model_name, ids, fields2 in result:
4199 if not (model_name, ids, fields2) in done:
4200 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4201 done.append((model_name, ids, fields2))
4202 # recompute new-style fields
4205 if self._log_create and context.get('recompute', True):
4206 message = self._description + \
4208 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4209 "' " + _("created.")
4210 self.log(cr, user, id_new, message, True, context=context)
4212 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4213 self.create_workflow(cr, user, [id_new], context=context)
4216 def _store_get_values(self, cr, uid, ids, fields, context):
4217 """Returns an ordered list of fields.function to call due to
4218 an update operation on ``fields`` of records with ``ids``,
4219 obtained by calling the 'store' triggers of these fields,
4220 as setup by their 'store' attribute.
4222 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4224 if fields is None: fields = []
4225 stored_functions = self.pool._store_function.get(self._name, [])
4227 # use indexed names for the details of the stored_functions:
4228 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4230 # only keep store triggers that should be triggered for the ``fields``
4232 triggers_to_compute = (
4233 f for f in stored_functions
4234 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4238 target_id_results = {}
4239 for store_trigger in triggers_to_compute:
4240 target_func_id_ = id(store_trigger[target_ids_func_])
4241 if target_func_id_ not in target_id_results:
4242 # use admin user for accessing objects having rules defined on store fields
4243 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4244 target_ids = target_id_results[target_func_id_]
4246 # the compound key must consider the priority and model name
4247 key = (store_trigger[priority_], store_trigger[model_name_])
4248 for target_id in target_ids:
4249 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4251 # Here to_compute_map looks like:
4252 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4253 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4254 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4257 # Now we need to generate the batch function calls list
4259 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4261 for ((priority,model), id_map) in to_compute_map.iteritems():
4262 trigger_ids_maps = {}
4263 # function_ids_maps =
4264 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4265 for target_id, triggers in id_map.iteritems():
4266 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4267 for triggers, target_ids in trigger_ids_maps.iteritems():
4268 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4269 [t[func_field_to_compute_] for t in triggers]))
4272 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4275 def _store_set_values(self, cr, uid, ids, fields, context):
4276 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4277 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4282 if self._log_access:
4283 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4287 field_dict.setdefault(r[0], [])
4288 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4289 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4290 for i in self.pool._store_function.get(self._name, []):
4292 up_write_date = write_date + datetime.timedelta(hours=i[5])
4293 if datetime.datetime.now() < up_write_date:
4295 field_dict[r[0]].append(i[1])
4301 if self._columns[f]._multi not in keys:
4302 keys.append(self._columns[f]._multi)
4303 todo.setdefault(self._columns[f]._multi, [])
4304 todo[self._columns[f]._multi].append(f)
4308 # use admin user for accessing objects having rules defined on store fields
4309 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4310 for id, value in result.items():
4312 for f in value.keys():
4313 if f in field_dict[id]:
4315 updates = [] # list of (column, pattern, value)
4319 column = self._columns[v]
4320 if column._type == 'many2one':
4322 value[v] = value[v][0]
4325 updates.append((v, '%s', column._symbol_set[1](value[v])))
4327 query = 'UPDATE "%s" SET %s WHERE id = %%s' % (
4328 self._table, ','.join('"%s"=%s' % u[:2] for u in updates),
4330 params = tuple(u[2] for u in updates)
4331 cr.execute(query, params + (id,))
4335 column = self._columns[f]
4336 # use admin user for accessing objects having rules defined on store fields
4337 result = column.get(cr, self, ids, f, SUPERUSER_ID, context=context)
4338 for r in result.keys():
4340 if r in field_dict.keys():
4341 if f in field_dict[r]:
4343 for id, value in result.items():
4344 if column._type == 'many2one':
4349 query = 'UPDATE "%s" SET "%s"=%%s WHERE id = %%s' % (
4352 cr.execute(query, (column._symbol_set[1](value), id))
4354 # invalidate and mark new-style fields to recompute
4355 self.browse(cr, uid, ids, context).modified(fields)
4359 # TODO: ameliorer avec NULL
4360 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4361 """Computes the WHERE clause needed to implement an OpenERP domain.
4362 :param domain: the domain to compute
4364 :param active_test: whether the default filtering of records with ``active``
4365 field set to ``False`` should be applied.
4366 :return: the query expressing the given domain as provided in domain
4367 :rtype: osv.query.Query
4372 # if the object has a field named 'active', filter out all inactive
4373 # records unless they were explicitely asked for
4374 if 'active' in self._fields and active_test and context.get('active_test', True):
4376 # the item[0] trick below works for domain items and '&'/'|'/'!'
4378 if not any(item[0] == 'active' for item in domain):
4379 domain.insert(0, ('active', '=', 1))
4381 domain = [('active', '=', 1)]
4384 e = expression.expression(cr, user, domain, self, context)
4385 tables = e.get_tables()
4386 where_clause, where_params = e.to_sql()
4387 where_clause = where_clause and [where_clause] or []
4389 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4391 return Query(tables, where_clause, where_params)
4393 def _check_qorder(self, word):
4394 if not regex_order.match(word):
4395 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4398 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4399 """Add what's missing in ``query`` to implement all appropriate ir.rules
4400 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4402 :param query: the current query object
4404 if uid == SUPERUSER_ID:
4407 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4408 """ :param parent_model: name of the parent model, if the added
4409 clause comes from a parent model
4413 # as inherited rules are being applied, we need to add the missing JOIN
4414 # to reach the parent table (if it was not JOINed yet in the query)
4415 parent_alias = self._inherits_join_add(self, parent_model, query)
4416 # inherited rules are applied on the external table -> need to get the alias and replace
4417 parent_table = self.pool[parent_model]._table
4418 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4419 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4421 for table in added_tables:
4422 # table is just a table name -> switch to the full alias
4423 if table == '"%s"' % parent_table:
4424 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4425 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4427 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4428 added_tables = new_tables
4429 query.where_clause += added_clause
4430 query.where_clause_params += added_params
4431 for table in added_tables:
4432 if table not in query.tables:
4433 query.tables.append(table)
4437 # apply main rules on the object
4438 rule_obj = self.pool.get('ir.rule')
4439 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4440 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4442 # apply ir.rules from the parents (through _inherits)
4443 for inherited_model in self._inherits:
4444 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4445 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4446 parent_model=inherited_model)
4448 def _generate_m2o_order_by(self, order_field, query):
4450 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4451 either native m2o fields or function/related fields that are stored, including
4452 intermediate JOINs for inheritance if required.
4454 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4456 if order_field not in self._columns and order_field in self._inherit_fields:
4457 # also add missing joins for reaching the table containing the m2o field
4458 qualified_field = self._inherits_join_calc(order_field, query)
4459 order_field_column = self._inherit_fields[order_field][2]
4461 qualified_field = '"%s"."%s"' % (self._table, order_field)
4462 order_field_column = self._columns[order_field]
4464 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4465 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4466 _logger.debug("Many2one function/related fields must be stored " \
4467 "to be used as ordering fields! Ignoring sorting for %s.%s",
4468 self._name, order_field)
4471 # figure out the applicable order_by for the m2o
4472 dest_model = self.pool[order_field_column._obj]
4473 m2o_order = dest_model._order
4474 if not regex_order.match(m2o_order):
4475 # _order is complex, can't use it here, so we default to _rec_name
4476 m2o_order = dest_model._rec_name
4478 # extract the field names, to be able to qualify them and add desc/asc
4480 for order_part in m2o_order.split(","):
4481 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4482 m2o_order = m2o_order_list
4484 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4485 # as we don't want to exclude results that have NULL values for the m2o
4486 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4487 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4488 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4489 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4491 def _generate_order_by(self, order_spec, query):
4493 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4494 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4496 :raise" except_orm in case order_spec is malformed
4498 order_by_clause = ''
4499 order_spec = order_spec or self._order
4501 order_by_elements = []
4502 self._check_qorder(order_spec)
4503 for order_part in order_spec.split(','):
4504 order_split = order_part.strip().split(' ')
4505 order_field = order_split[0].strip()
4506 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4509 if order_field == 'id':
4510 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4511 elif order_field in self._columns:
4512 order_column = self._columns[order_field]
4513 if order_column._classic_read:
4514 inner_clause = '"%s"."%s"' % (self._table, order_field)
4515 elif order_column._type == 'many2one':
4516 inner_clause = self._generate_m2o_order_by(order_field, query)
4518 continue # ignore non-readable or "non-joinable" fields
4519 elif order_field in self._inherit_fields:
4520 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4521 order_column = parent_obj._columns[order_field]
4522 if order_column._classic_read:
4523 inner_clause = self._inherits_join_calc(order_field, query)
4524 elif order_column._type == 'many2one':
4525 inner_clause = self._generate_m2o_order_by(order_field, query)
4527 continue # ignore non-readable or "non-joinable" fields
4529 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4530 if order_column and order_column._type == 'boolean':
4531 inner_clause = "COALESCE(%s, false)" % inner_clause
4533 if isinstance(inner_clause, list):
4534 for clause in inner_clause:
4535 order_by_elements.append("%s %s" % (clause, order_direction))
4537 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4538 if order_by_elements:
4539 order_by_clause = ",".join(order_by_elements)
4541 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4543 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4545 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4546 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4547 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4548 This is ok at the security level because this method is private and not callable through XML-RPC.
4550 :param access_rights_uid: optional user ID to use when checking access rights
4551 (not for ir.rules, this is only for ir.model.access)
4555 self.check_access_rights(cr, access_rights_uid or user, 'read')
4557 # For transient models, restrict acces to the current user, except for the super-user
4558 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4559 args = expression.AND(([('create_uid', '=', user)], args or []))
4561 query = self._where_calc(cr, user, args, context=context)
4562 self._apply_ir_rules(cr, user, query, 'read', context=context)
4563 order_by = self._generate_order_by(order, query)
4564 from_clause, where_clause, where_clause_params = query.get_sql()
4566 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4569 # Ignore order, limit and offset when just counting, they don't make sense and could
4571 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4572 cr.execute(query_str, where_clause_params)
4576 limit_str = limit and ' limit %d' % limit or ''
4577 offset_str = offset and ' offset %d' % offset or ''
4578 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4579 cr.execute(query_str, where_clause_params)
4582 # TDE note: with auto_join, we could have several lines about the same result
4583 # i.e. a lead with several unread messages; we uniquify the result using
4584 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4585 def _uniquify_list(seq):
4587 return [x for x in seq if x not in seen and not seen.add(x)]
4589 return _uniquify_list([x[0] for x in res])
4591 # returns the different values ever entered for one field
4592 # this is used, for example, in the client when the user hits enter on
4594 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4597 if field in self._inherit_fields:
4598 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4600 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4602 def copy_data(self, cr, uid, id, default=None, context=None):
4604 Copy given record's data with all its fields values
4606 :param cr: database cursor
4607 :param uid: current user id
4608 :param id: id of the record to copy
4609 :param default: field values to override in the original values of the copied record
4610 :type default: dictionary
4611 :param context: context arguments, like lang, time zone
4612 :type context: dictionary
4613 :return: dictionary containing all the field values
4619 # avoid recursion through already copied records in case of circular relationship
4620 seen_map = context.setdefault('__copy_data_seen', {})
4621 if id in seen_map.setdefault(self._name, []):
4623 seen_map[self._name].append(id)
4627 if 'state' not in default:
4628 if 'state' in self._defaults:
4629 if callable(self._defaults['state']):
4630 default['state'] = self._defaults['state'](self, cr, uid, context)
4632 default['state'] = self._defaults['state']
4634 # build a black list of fields that should not be copied
4635 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4636 whitelist = set(name for name, field in self._fields.iteritems() if not field.inherited)
4638 def blacklist_given_fields(obj):
4639 # blacklist the fields that are given by inheritance
4640 for other, field_to_other in obj._inherits.items():
4641 blacklist.add(field_to_other)
4642 if field_to_other in default:
4643 # all the fields of 'other' are given by the record: default[field_to_other],
4644 # except the ones redefined in self
4645 blacklist.update(set(self.pool[other]._fields) - whitelist)
4647 blacklist_given_fields(self.pool[other])
4648 # blacklist deprecated fields
4649 for name, field in obj._fields.iteritems():
4650 if field.deprecated:
4653 blacklist_given_fields(self)
4656 fields_to_copy = dict((f,fi) for f, fi in self._fields.iteritems()
4659 if f not in blacklist)
4661 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4665 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4668 for f, field in fields_to_copy.iteritems():
4669 if field.type == 'many2one':
4670 res[f] = data[f] and data[f][0]
4671 elif field.type == 'one2many':
4672 other = self.pool[field.comodel_name]
4673 # duplicate following the order of the ids because we'll rely on
4674 # it later for copying translations in copy_translation()!
4675 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4676 # the lines are duplicated using the wrong (old) parent, but then
4677 # are reassigned to the correct one thanks to the (0, 0, ...)
4678 res[f] = [(0, 0, line) for line in lines if line]
4679 elif field.type == 'many2many':
4680 res[f] = [(6, 0, data[f])]
4686 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4690 # avoid recursion through already copied records in case of circular relationship
4691 seen_map = context.setdefault('__copy_translations_seen',{})
4692 if old_id in seen_map.setdefault(self._name,[]):
4694 seen_map[self._name].append(old_id)
4696 trans_obj = self.pool.get('ir.translation')
4698 for field_name, field in self._fields.iteritems():
4701 # removing the lang to compare untranslated values
4702 context_wo_lang = dict(context, lang=None)
4703 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4704 # we must recursively copy the translations for o2o and o2m
4705 if field.type == 'one2many':
4706 target_obj = self.pool[field.comodel_name]
4707 # here we rely on the order of the ids to match the translations
4708 # as foreseen in copy_data()
4709 old_children = sorted(r.id for r in old_record[field_name])
4710 new_children = sorted(r.id for r in new_record[field_name])
4711 for (old_child, new_child) in zip(old_children, new_children):
4712 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4713 # and for translatable fields we keep them for copy
4714 elif getattr(field, 'translate', False):
4715 if field_name in self._columns:
4716 trans_name = self._name + "," + field_name
4719 elif field_name in self._inherit_fields:
4720 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4721 # get the id of the parent record to set the translation
4722 inherit_field_name = self._inherit_fields[field_name][1]
4723 target_id = new_record[inherit_field_name].id
4724 source_id = old_record[inherit_field_name].id
4728 trans_ids = trans_obj.search(cr, uid, [
4729 ('name', '=', trans_name),
4730 ('res_id', '=', source_id)
4732 user_lang = context.get('lang')
4733 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4735 # remove source to avoid triggering _set_src
4736 del record['source']
4737 record.update({'res_id': target_id})
4738 if user_lang and user_lang == record['lang']:
4739 # 'source' to force the call to _set_src
4740 # 'value' needed if value is changed in copy(), want to see the new_value
4741 record['source'] = old_record[field_name]
4742 record['value'] = new_record[field_name]
4743 trans_obj.create(cr, uid, record, context=context)
4745 @api.returns('self', lambda value: value.id)
4746 def copy(self, cr, uid, id, default=None, context=None):
4747 """ copy(default=None)
4749 Duplicate record with given id updating it with default values
4751 :param dict default: dictionary of field values to override in the
4752 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4753 :returns: new record
4758 context = context.copy()
4759 data = self.copy_data(cr, uid, id, default, context)
4760 new_id = self.create(cr, uid, data, context)
4761 self.copy_translations(cr, uid, id, new_id, context)
4765 @api.returns('self')
4767 """ exists() -> records
4769 Returns the subset of records in `self` that exist, and marks deleted
4770 records as such in cache. It can be used as a test on records::
4775 By convention, new records are returned as existing.
4777 ids = filter(None, self._ids) # ids to check in database
4780 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4781 self._cr.execute(query, (ids,))
4782 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4783 [id for id in self._ids if not id]) # new ids
4784 existing = self.browse(ids)
4785 if len(existing) < len(self):
4786 # mark missing records in cache with a failed value
4787 exc = MissingError(_("Record does not exist or has been deleted."))
4788 (self - existing)._cache.update(FailedValue(exc))
4791 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4792 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4794 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4795 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4796 return self._check_recursion(cr, uid, ids, context, parent)
4798 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4800 Verifies that there is no loop in a hierarchical structure of records,
4801 by following the parent relationship using the **parent** field until a loop
4802 is detected or until a top-level record is found.
4804 :param cr: database cursor
4805 :param uid: current user id
4806 :param ids: list of ids of records to check
4807 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4808 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4811 parent = self._parent_name
4813 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4814 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4817 while current_id is not None:
4818 cr.execute(query, (current_id,))
4819 result = cr.fetchone()
4820 current_id = result[0] if result else None
4821 if current_id == id:
4825 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4827 Verifies that there is no loop in a hierarchical structure of records,
4828 by following the parent relationship using the **parent** field until a loop
4829 is detected or until a top-level record is found.
4831 :param cr: database cursor
4832 :param uid: current user id
4833 :param ids: list of ids of records to check
4834 :param field_name: field to check
4835 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4838 field = self._fields.get(field_name)
4839 if not (field and field.type == 'many2many' and
4840 field.comodel_name == self._name and field.store):
4841 # field must be a many2many on itself
4842 raise ValueError('invalid field_name: %r' % (field_name,))
4844 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % \
4845 (field.column2, field.relation, field.column1)
4849 for i in range(0, len(ids_parent), cr.IN_MAX):
4851 sub_ids_parent = ids_parent[i:j]
4852 cr.execute(query, (tuple(sub_ids_parent),))
4853 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4854 ids_parent = ids_parent2
4855 for i in ids_parent:
4860 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4861 """Retrieve the External ID(s) of any database record.
4863 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4865 :return: map of ids to the list of their fully qualified External IDs
4866 in the form ``module.key``, or an empty list when there's no External
4867 ID for a record, e.g.::
4869 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4872 ir_model_data = self.pool.get('ir.model.data')
4873 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4874 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4877 # can't use dict.fromkeys() as the list would be shared!
4879 for record in data_results:
4880 result[record['res_id']].append('%(module)s.%(name)s' % record)
4883 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4884 """Retrieve the External ID of any database record, if there
4885 is one. This method works as a possible implementation
4886 for a function field, to be able to add it to any
4887 model object easily, referencing it as ``Model.get_external_id``.
4889 When multiple External IDs exist for a record, only one
4890 of them is returned (randomly).
4892 :return: map of ids to their fully qualified XML ID,
4893 defaulting to an empty string when there's none
4894 (to be usable as a function field),
4897 { 'id': 'module.ext_id',
4900 results = self._get_xml_ids(cr, uid, ids)
4901 for k, v in results.iteritems():
4908 # backwards compatibility
4909 get_xml_id = get_external_id
4910 _get_xml_ids = _get_external_ids
4912 def print_report(self, cr, uid, ids, name, data, context=None):
4914 Render the report `name` for the given IDs. The report must be defined
4915 for this model, not another.
4917 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4918 assert self._name == report.table
4919 return report.create(cr, uid, ids, data, context)
4923 def is_transient(cls):
4924 """ Return whether the model is transient.
4926 See :class:`TransientModel`.
4929 return cls._transient
4931 def _transient_clean_rows_older_than(self, cr, seconds):
4932 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4933 # Never delete rows used in last 5 minutes
4934 seconds = max(seconds, 300)
4935 query = ("SELECT id FROM " + self._table + " WHERE"
4936 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4937 " < ((now() at time zone 'UTC') - interval %s)")
4938 cr.execute(query, ("%s seconds" % seconds,))
4939 ids = [x[0] for x in cr.fetchall()]
4940 self.unlink(cr, SUPERUSER_ID, ids)
4942 def _transient_clean_old_rows(self, cr, max_count):
4943 # Check how many rows we have in the table
4944 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4946 if res[0][0] <= max_count:
4947 return # max not reached, nothing to do
4948 self._transient_clean_rows_older_than(cr, 300)
4950 def _transient_vacuum(self, cr, uid, force=False):
4951 """Clean the transient records.
4953 This unlinks old records from the transient model tables whenever the
4954 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4955 Actual cleaning will happen only once every "_transient_check_time" calls.
4956 This means this method can be called frequently called (e.g. whenever
4957 a new record is created).
4958 Example with both max_hours and max_count active:
4959 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4960 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4961 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4962 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4963 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4964 would immediately cause the maximum to be reached again.
4965 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4967 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4968 _transient_check_time = 20 # arbitrary limit on vacuum executions
4969 self._transient_check_count += 1
4970 if not force and (self._transient_check_count < _transient_check_time):
4971 return True # no vacuum cleaning this time
4972 self._transient_check_count = 0
4974 # Age-based expiration
4975 if self._transient_max_hours:
4976 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4978 # Count-based expiration
4979 if self._transient_max_count:
4980 self._transient_clean_old_rows(cr, self._transient_max_count)
4984 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4985 """ Serializes one2many and many2many commands into record dictionaries
4986 (as if all the records came from the database via a read()). This
4987 method is aimed at onchange methods on one2many and many2many fields.
4989 Because commands might be creation commands, not all record dicts
4990 will contain an ``id`` field. Commands matching an existing record
4991 will have an ``id``.
4993 :param field_name: name of the one2many or many2many field matching the commands
4994 :type field_name: str
4995 :param commands: one2many or many2many commands to execute on ``field_name``
4996 :type commands: list((int|False, int|False, dict|False))
4997 :param fields: list of fields to read from the database, when applicable
4998 :type fields: list(str)
4999 :returns: records in a shape similar to that returned by ``read()``
5000 (except records may be missing the ``id`` field if they don't exist in db)
5003 result = [] # result (list of dict)
5004 record_ids = [] # ids of records to read
5005 updates = {} # {id: dict} of updates on particular records
5007 for command in commands or []:
5008 if not isinstance(command, (list, tuple)):
5009 record_ids.append(command)
5010 elif command[0] == 0:
5011 result.append(command[2])
5012 elif command[0] == 1:
5013 record_ids.append(command[1])
5014 updates.setdefault(command[1], {}).update(command[2])
5015 elif command[0] in (2, 3):
5016 record_ids = [id for id in record_ids if id != command[1]]
5017 elif command[0] == 4:
5018 record_ids.append(command[1])
5019 elif command[0] == 5:
5020 result, record_ids = [], []
5021 elif command[0] == 6:
5022 result, record_ids = [], list(command[2])
5024 # read the records and apply the updates
5025 other_model = self.pool[self._fields[field_name].comodel_name]
5026 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
5027 record.update(updates.get(record['id'], {}))
5028 result.append(record)
5032 # for backward compatibility
5033 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5035 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5037 Performs a ``search()`` followed by a ``read()``.
5039 :param cr: database cursor
5040 :param user: current user id
5041 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5042 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5043 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5044 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5045 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5046 :param context: context arguments.
5047 :return: List of dictionaries containing the asked fields.
5048 :rtype: List of dictionaries.
5051 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5055 if fields and fields == ['id']:
5056 # shortcut read if we only want the ids
5057 return [{'id': id} for id in record_ids]
5059 # read() ignores active_test, but it would forward it to any downstream search call
5060 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5061 # was presumably only meant for the main search().
5062 # TODO: Move this to read() directly?
5063 read_ctx = dict(context or {})
5064 read_ctx.pop('active_test', None)
5066 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5067 if len(result) <= 1:
5071 index = dict((r['id'], r) for r in result)
5072 return [index[x] for x in record_ids if x in index]
5074 def _register_hook(self, cr):
5075 """ stuff to do right after the registry is built """
5079 def _patch_method(cls, name, method):
5080 """ Monkey-patch a method for all instances of this model. This replaces
5081 the method called `name` by `method` in the given class.
5082 The original method is then accessible via ``method.origin``, and it
5083 can be restored with :meth:`~._revert_method`.
5088 def do_write(self, values):
5089 # do stuff, and call the original method
5090 return do_write.origin(self, values)
5092 # patch method write of model
5093 model._patch_method('write', do_write)
5095 # this will call do_write
5096 records = model.search([...])
5099 # restore the original method
5100 model._revert_method('write')
5102 origin = getattr(cls, name)
5103 method.origin = origin
5104 # propagate decorators from origin to method, and apply api decorator
5105 wrapped = api.guess(api.propagate(origin, method))
5106 wrapped.origin = origin
5107 setattr(cls, name, wrapped)
5110 def _revert_method(cls, name):
5111 """ Revert the original method called `name` in the given class.
5112 See :meth:`~._patch_method`.
5114 method = getattr(cls, name)
5115 setattr(cls, name, method.origin)
5120 # An instance represents an ordered collection of records in a given
5121 # execution environment. The instance object refers to the environment, and
5122 # the records themselves are represented by their cache dictionary. The 'id'
5123 # of each record is found in its corresponding cache dictionary.
5125 # This design has the following advantages:
5126 # - cache access is direct and thus fast;
5127 # - one can consider records without an 'id' (see new records);
5128 # - the global cache is only an index to "resolve" a record 'id'.
5132 def _browse(cls, env, ids):
5133 """ Create an instance attached to `env`; `ids` is a tuple of record
5136 records = object.__new__(cls)
5139 env.prefetch[cls._name].update(ids)
5143 def browse(self, cr, uid, arg=None, context=None):
5144 ids = _normalize_ids(arg)
5145 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5146 return self._browse(Environment(cr, uid, context or {}), ids)
5149 def browse(self, arg=None):
5150 """ browse([ids]) -> records
5152 Returns a recordset for the ids provided as parameter in the current
5155 Can take no ids, a single id or a sequence of ids.
5157 ids = _normalize_ids(arg)
5158 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5159 return self._browse(self.env, ids)
5162 # Internal properties, for manipulating the instance's implementation
5167 """ List of actual record ids in this recordset (ignores placeholder
5168 ids for records to create)
5170 return filter(None, list(self._ids))
5172 # backward-compatibility with former browse records
5173 _cr = property(lambda self: self.env.cr)
5174 _uid = property(lambda self: self.env.uid)
5175 _context = property(lambda self: self.env.context)
5178 # Conversion methods
5181 def ensure_one(self):
5182 """ Verifies that the current recorset holds a single record. Raises
5183 an exception otherwise.
5187 raise except_orm("ValueError", "Expected singleton: %s" % self)
5189 def with_env(self, env):
5190 """ Returns a new version of this recordset attached to the provided
5193 :type env: :class:`~openerp.api.Environment`
5195 return self._browse(env, self._ids)
5197 def sudo(self, user=SUPERUSER_ID):
5198 """ sudo([user=SUPERUSER])
5200 Returns a new version of this recordset attached to the provided
5203 return self.with_env(self.env(user=user))
5205 def with_context(self, *args, **kwargs):
5206 """ with_context([context][, **overrides]) -> records
5208 Returns a new version of this recordset attached to an extended
5211 The extended context is either the provided ``context`` in which
5212 ``overrides`` are merged or the *current* context in which
5213 ``overrides`` are merged e.g.::
5215 # current context is {'key1': True}
5216 r2 = records.with_context({}, key2=True)
5217 # -> r2._context is {'key2': True}
5218 r2 = records.with_context(key2=True)
5219 # -> r2._context is {'key1': True, 'key2': True}
5221 context = dict(args[0] if args else self._context, **kwargs)
5222 return self.with_env(self.env(context=context))
5224 def _convert_to_cache(self, values, update=False, validate=True):
5225 """ Convert the `values` dictionary into cached values.
5227 :param update: whether the conversion is made for updating `self`;
5228 this is necessary for interpreting the commands of *2many fields
5229 :param validate: whether values must be checked
5231 fields = self._fields
5232 target = self if update else self.browse()
5234 name: fields[name].convert_to_cache(value, target, validate=validate)
5235 for name, value in values.iteritems()
5239 def _convert_to_write(self, values):
5240 """ Convert the `values` dictionary into the format of :meth:`write`. """
5241 fields = self._fields
5243 for name, value in values.iteritems():
5245 value = fields[name].convert_to_write(value)
5246 if not isinstance(value, NewId):
5247 result[name] = value
5251 # Record traversal and update
5254 def _mapped_func(self, func):
5255 """ Apply function `func` on all records in `self`, and return the
5256 result as a list or a recordset (if `func` returns recordsets).
5259 vals = [func(rec) for rec in self]
5260 return reduce(operator.or_, vals) if isinstance(vals[0], BaseModel) else vals
5263 return vals if isinstance(vals, BaseModel) else []
5265 def mapped(self, func):
5266 """ Apply `func` on all records in `self`, and return the result as a
5267 list or a recordset (if `func` return recordsets). In the latter
5268 case, the order of the returned recordset is arbritrary.
5270 :param func: a function or a dot-separated sequence of field names
5272 if isinstance(func, basestring):
5274 for name in func.split('.'):
5275 recs = recs._mapped_func(operator.itemgetter(name))
5278 return self._mapped_func(func)
5280 def _mapped_cache(self, name_seq):
5281 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5282 field names, and only cached values are used.
5285 for name in name_seq.split('.'):
5286 field = recs._fields[name]
5287 null = field.null(self.env)
5288 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5291 def filtered(self, func):
5292 """ Select the records in `self` such that `func(rec)` is true, and
5293 return them as a recordset.
5295 :param func: a function or a dot-separated sequence of field names
5297 if isinstance(func, basestring):
5299 func = lambda rec: filter(None, rec.mapped(name))
5300 return self.browse([rec.id for rec in self if func(rec)])
5302 def sorted(self, key=None, reverse=False):
5303 """ Return the recordset `self` ordered by `key`.
5305 :param key: either a function of one argument that returns a
5306 comparison key for each record, or ``None``, in which case
5307 records are ordered according the default model's order
5309 :param reverse: if ``True``, return the result in reverse order
5312 recs = self.search([('id', 'in', self.ids)])
5313 return self.browse(reversed(recs._ids)) if reverse else recs
5315 return self.browse(map(int, sorted(self, key=key, reverse=reverse)))
5317 def update(self, values):
5318 """ Update record `self[0]` with `values`. """
5319 for name, value in values.iteritems():
5323 # New records - represent records that do not exist in the database yet;
5324 # they are used to perform onchanges.
5328 def new(self, values={}):
5329 """ new([values]) -> record
5331 Return a new record instance attached to the current environment and
5332 initialized with the provided ``value``. The record is *not* created
5333 in database, it only exists in memory.
5335 record = self.browse([NewId()])
5336 record._cache.update(record._convert_to_cache(values, update=True))
5338 if record.env.in_onchange:
5339 # The cache update does not set inverse fields, so do it manually.
5340 # This is useful for computing a function field on secondary
5341 # records, if that field depends on the main record.
5343 field = self._fields.get(name)
5345 for invf in field.inverse_fields:
5346 invf._update(record[name], record)
5351 # Dirty flags, to mark record fields modified (in draft mode)
5354 def _is_dirty(self):
5355 """ Return whether any record in `self` is dirty. """
5356 dirty = self.env.dirty
5357 return any(record in dirty for record in self)
5359 def _get_dirty(self):
5360 """ Return the list of field names for which `self` is dirty. """
5361 dirty = self.env.dirty
5362 return list(dirty.get(self, ()))
5364 def _set_dirty(self, field_name):
5365 """ Mark the records in `self` as dirty for the given `field_name`. """
5366 dirty = self.env.dirty
5368 dirty[record].add(field_name)
5374 def __nonzero__(self):
5375 """ Test whether `self` is nonempty. """
5376 return bool(getattr(self, '_ids', True))
5379 """ Return the size of `self`. """
5380 return len(self._ids)
5383 """ Return an iterator over `self`. """
5384 for id in self._ids:
5385 yield self._browse(self.env, (id,))
5387 def __contains__(self, item):
5388 """ Test whether `item` (record or field name) is an element of `self`.
5389 In the first case, the test is fully equivalent to::
5391 any(item == record for record in self)
5393 if isinstance(item, BaseModel) and self._name == item._name:
5394 return len(item) == 1 and item.id in self._ids
5395 elif isinstance(item, basestring):
5396 return item in self._fields
5398 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5400 def __add__(self, other):
5401 """ Return the concatenation of two recordsets. """
5402 if not isinstance(other, BaseModel) or self._name != other._name:
5403 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5404 return self.browse(self._ids + other._ids)
5406 def __sub__(self, other):
5407 """ Return the recordset of all the records in `self` that are not in `other`. """
5408 if not isinstance(other, BaseModel) or self._name != other._name:
5409 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5410 other_ids = set(other._ids)
5411 return self.browse([id for id in self._ids if id not in other_ids])
5413 def __and__(self, other):
5414 """ Return the intersection of two recordsets.
5415 Note that recordset order is not preserved.
5417 if not isinstance(other, BaseModel) or self._name != other._name:
5418 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5419 return self.browse(set(self._ids) & set(other._ids))
5421 def __or__(self, other):
5422 """ Return the union of two recordsets.
5423 Note that recordset order is not preserved.
5425 if not isinstance(other, BaseModel) or self._name != other._name:
5426 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5427 return self.browse(set(self._ids) | set(other._ids))
5429 def __eq__(self, other):
5430 """ Test whether two recordsets are equivalent (up to reordering). """
5431 if not isinstance(other, BaseModel):
5433 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5435 return self._name == other._name and set(self._ids) == set(other._ids)
5437 def __ne__(self, other):
5438 return not self == other
5440 def __lt__(self, other):
5441 if not isinstance(other, BaseModel) or self._name != other._name:
5442 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5443 return set(self._ids) < set(other._ids)
5445 def __le__(self, other):
5446 if not isinstance(other, BaseModel) or self._name != other._name:
5447 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5448 return set(self._ids) <= set(other._ids)
5450 def __gt__(self, other):
5451 if not isinstance(other, BaseModel) or self._name != other._name:
5452 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5453 return set(self._ids) > set(other._ids)
5455 def __ge__(self, other):
5456 if not isinstance(other, BaseModel) or self._name != other._name:
5457 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5458 return set(self._ids) >= set(other._ids)
5464 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5466 def __unicode__(self):
5467 return unicode(str(self))
5472 if hasattr(self, '_ids'):
5473 return hash((self._name, frozenset(self._ids)))
5475 return hash(self._name)
5477 def __getitem__(self, key):
5478 """ If `key` is an integer or a slice, return the corresponding record
5479 selection as an instance (attached to `self.env`).
5480 Otherwise read the field `key` of the first record in `self`.
5484 inst = model.search(dom) # inst is a recordset
5485 r4 = inst[3] # fourth record in inst
5486 rs = inst[10:20] # subset of inst
5487 nm = rs['name'] # name of first record in inst
5489 if isinstance(key, basestring):
5490 # important: one must call the field's getter
5491 return self._fields[key].__get__(self, type(self))
5492 elif isinstance(key, slice):
5493 return self._browse(self.env, self._ids[key])
5495 return self._browse(self.env, (self._ids[key],))
5497 def __setitem__(self, key, value):
5498 """ Assign the field `key` to `value` in record `self`. """
5499 # important: one must call the field's setter
5500 return self._fields[key].__set__(self, value)
5503 # Cache and recomputation management
5508 """ Return the cache of `self`, mapping field names to values. """
5509 return RecordCache(self)
5512 def _in_cache_without(self, field):
5513 """ Make sure `self` is present in cache (for prefetching), and return
5514 the records of model `self` in cache that have no value for `field`
5515 (:class:`Field` instance).
5518 prefetch_ids = env.prefetch[self._name]
5519 prefetch_ids.update(self._ids)
5520 ids = filter(None, prefetch_ids - set(env.cache[field]))
5521 return self.browse(ids)
5525 """ Clear the records cache.
5528 The record cache is automatically invalidated.
5530 self.invalidate_cache()
5533 def invalidate_cache(self, fnames=None, ids=None):
5534 """ Invalidate the record caches after some records have been modified.
5535 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5537 :param fnames: the list of modified fields, or ``None`` for all fields
5538 :param ids: the list of modified record ids, or ``None`` for all
5542 return self.env.invalidate_all()
5543 fields = self._fields.values()
5545 fields = map(self._fields.__getitem__, fnames)
5547 # invalidate fields and inverse fields, too
5548 spec = [(f, ids) for f in fields] + \
5549 [(invf, None) for f in fields for invf in f.inverse_fields]
5550 self.env.invalidate(spec)
5553 def modified(self, fnames):
5554 """ Notify that fields have been modified on `self`. This invalidates
5555 the cache, and prepares the recomputation of stored function fields
5556 (new-style fields only).
5558 :param fnames: iterable of field names that have been modified on
5561 # each field knows what to invalidate and recompute
5563 for fname in fnames:
5564 spec += self._fields[fname].modified(self)
5568 for env in self.env.all
5569 for field in env.cache
5571 # invalidate non-stored fields.function which are currently cached
5572 spec += [(f, None) for f in self.pool.pure_function_fields
5573 if f in cached_fields]
5575 self.env.invalidate(spec)
5577 def _recompute_check(self, field):
5578 """ If `field` must be recomputed on some record in `self`, return the
5579 corresponding records that must be recomputed.
5581 return self.env.check_todo(field, self)
5583 def _recompute_todo(self, field):
5584 """ Mark `field` to be recomputed. """
5585 self.env.add_todo(field, self)
5587 def _recompute_done(self, field):
5588 """ Mark `field` as recomputed. """
5589 self.env.remove_todo(field, self)
5592 def recompute(self):
5593 """ Recompute stored function fields. The fields and records to
5594 recompute have been determined by method :meth:`modified`.
5596 while self.env.has_todo():
5597 field, recs = self.env.get_todo()
5598 # evaluate the fields to recompute, and save them to database
5599 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5601 values = rec._convert_to_write({
5602 f.name: rec[f.name] for f in field.computed_fields
5605 except MissingError:
5607 # mark the computed fields as done
5608 map(recs._recompute_done, field.computed_fields)
5611 # Generic onchange method
5614 def _has_onchange(self, field, other_fields):
5615 """ Return whether `field` should trigger an onchange event in the
5616 presence of `other_fields`.
5618 # test whether self has an onchange method for field, or field is a
5619 # dependency of any field in other_fields
5620 return field.name in self._onchange_methods or \
5621 any(dep in other_fields for dep in field.dependents)
5624 def _onchange_spec(self, view_info=None):
5625 """ Return the onchange spec from a view description; if not given, the
5626 result of ``self.fields_view_get()`` is used.
5630 # for traversing the XML arch and populating result
5631 def process(node, info, prefix):
5632 if node.tag == 'field':
5633 name = node.attrib['name']
5634 names = "%s.%s" % (prefix, name) if prefix else name
5635 if not result.get(names):
5636 result[names] = node.attrib.get('on_change')
5637 # traverse the subviews included in relational fields
5638 for subinfo in info['fields'][name].get('views', {}).itervalues():
5639 process(etree.fromstring(subinfo['arch']), subinfo, names)
5642 process(child, info, prefix)
5644 if view_info is None:
5645 view_info = self.fields_view_get()
5646 process(etree.fromstring(view_info['arch']), view_info, '')
5649 def _onchange_eval(self, field_name, onchange, result):
5650 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5651 on record `self`. Value assignments are applied on `self`, while
5652 domain and warning messages are put in dictionary `result`.
5654 onchange = onchange.strip()
5657 if onchange in ("1", "true"):
5658 for method in self._onchange_methods.get(field_name, ()):
5659 method_res = method(self)
5662 if 'domain' in method_res:
5663 result.setdefault('domain', {}).update(method_res['domain'])
5664 if 'warning' in method_res:
5665 result['warning'] = method_res['warning']
5669 match = onchange_v7.match(onchange)
5671 method, params = match.groups()
5673 # evaluate params -> tuple
5674 global_vars = {'context': self._context, 'uid': self._uid}
5675 if self._context.get('field_parent'):
5676 class RawRecord(object):
5677 def __init__(self, record):
5678 self._record = record
5679 def __getattr__(self, name):
5680 field = self._record._fields[name]
5681 value = self._record[name]
5682 return field.convert_to_onchange(value)
5683 record = self[self._context['field_parent']]
5684 global_vars['parent'] = RawRecord(record)
5686 key: self._fields[key].convert_to_onchange(val)
5687 for key, val in self._cache.iteritems()
5689 params = eval("[%s]" % params, global_vars, field_vars)
5691 # call onchange method
5692 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5693 method_res = getattr(self._model, method)(*args)
5694 if not isinstance(method_res, dict):
5696 if 'value' in method_res:
5697 method_res['value'].pop('id', None)
5698 self.update(self._convert_to_cache(method_res['value'], validate=False))
5699 if 'domain' in method_res:
5700 result.setdefault('domain', {}).update(method_res['domain'])
5701 if 'warning' in method_res:
5702 result['warning'] = method_res['warning']
5705 def onchange(self, values, field_name, field_onchange):
5706 """ Perform an onchange on the given field.
5708 :param values: dictionary mapping field names to values, giving the
5709 current state of modification
5710 :param field_name: name of the modified field_name
5711 :param field_onchange: dictionary mapping field names to their
5716 if field_name and field_name not in self._fields:
5719 # determine subfields for field.convert_to_write() below
5721 subfields = defaultdict(set)
5722 for dotname in field_onchange:
5724 secondary.append(dotname)
5725 name, subname = dotname.split('.')
5726 subfields[name].add(subname)
5728 # create a new record with values, and attach `self` to it
5729 with env.do_in_onchange():
5730 record = self.new(values)
5731 values = dict(record._cache)
5732 # attach `self` with a different context (for cache consistency)
5733 record._origin = self.with_context(__onchange=True)
5735 # determine which field should be triggered an onchange
5736 todo = set([field_name]) if field_name else set(values)
5739 # dummy assignment: trigger invalidations on the record
5741 value = record[name]
5742 field = self._fields[name]
5743 if not field_name and field.type == 'many2one' and field.delegate and not value:
5744 # do not nullify all fields of parent record for new records
5746 record[name] = value
5748 result = {'value': {}}
5756 with env.do_in_onchange():
5757 # apply field-specific onchange methods
5758 if field_onchange.get(name):
5759 record._onchange_eval(name, field_onchange[name], result)
5761 # force re-evaluation of function fields on secondary records
5762 for field_seq in secondary:
5763 record.mapped(field_seq)
5765 # determine which fields have been modified
5766 for name, oldval in values.iteritems():
5767 field = self._fields[name]
5768 newval = record[name]
5769 if field.type in ('one2many', 'many2many'):
5770 if newval != oldval or newval._is_dirty():
5771 # put new value in result
5772 result['value'][name] = field.convert_to_write(
5773 newval, record._origin, subfields.get(name),
5777 # keep result: newval may have been dirty before
5780 if newval != oldval:
5781 # put new value in result
5782 result['value'][name] = field.convert_to_write(
5783 newval, record._origin, subfields.get(name),
5787 # clean up result to not return another value
5788 result['value'].pop(name, None)
5790 # At the moment, the client does not support updates on a *2many field
5791 # while this one is modified by the user.
5792 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5793 result['value'].pop(field_name, None)
5798 class RecordCache(MutableMapping):
5799 """ Implements a proxy dictionary to read/update the cache of a record.
5800 Upon iteration, it looks like a dictionary mapping field names to
5801 values. However, fields may be used as keys as well.
5803 def __init__(self, records):
5804 self._recs = records
5806 def contains(self, field):
5807 """ Return whether `records[0]` has a value for `field` in cache. """
5808 if isinstance(field, basestring):
5809 field = self._recs._fields[field]
5810 return self._recs.id in self._recs.env.cache[field]
5812 def __contains__(self, field):
5813 """ Return whether `records[0]` has a regular value for `field` in cache. """
5814 if isinstance(field, basestring):
5815 field = self._recs._fields[field]
5816 dummy = SpecialValue(None)
5817 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5818 return not isinstance(value, SpecialValue)
5820 def __getitem__(self, field):
5821 """ Return the cached value of `field` for `records[0]`. """
5822 if isinstance(field, basestring):
5823 field = self._recs._fields[field]
5824 value = self._recs.env.cache[field][self._recs.id]
5825 return value.get() if isinstance(value, SpecialValue) else value
5827 def __setitem__(self, field, value):
5828 """ Assign the cached value of `field` for all records in `records`. """
5829 if isinstance(field, basestring):
5830 field = self._recs._fields[field]
5831 values = dict.fromkeys(self._recs._ids, value)
5832 self._recs.env.cache[field].update(values)
5834 def update(self, *args, **kwargs):
5835 """ Update the cache of all records in `records`. If the argument is a
5836 `SpecialValue`, update all fields (except "magic" columns).
5838 if args and isinstance(args[0], SpecialValue):
5839 values = dict.fromkeys(self._recs._ids, args[0])
5840 for name, field in self._recs._fields.iteritems():
5842 self._recs.env.cache[field].update(values)
5844 return super(RecordCache, self).update(*args, **kwargs)
5846 def __delitem__(self, field):
5847 """ Remove the cached value of `field` for all `records`. """
5848 if isinstance(field, basestring):
5849 field = self._recs._fields[field]
5850 field_cache = self._recs.env.cache[field]
5851 for id in self._recs._ids:
5852 field_cache.pop(id, None)
5855 """ Iterate over the field names with a regular value in cache. """
5856 cache, id = self._recs.env.cache, self._recs.id
5857 dummy = SpecialValue(None)
5858 for name, field in self._recs._fields.iteritems():
5859 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5863 """ Return the number of fields with a regular value in cache. """
5864 return sum(1 for name in self)
5866 class Model(BaseModel):
5867 """Main super-class for regular database-persisted OpenERP models.
5869 OpenERP models are created by inheriting from this class::
5874 The system will later instantiate the class once per database (on
5875 which the class' module is installed).
5878 _register = False # not visible in ORM registry, meant to be python-inherited only
5879 _transient = False # True in a TransientModel
5881 class TransientModel(BaseModel):
5882 """Model super-class for transient records, meant to be temporarily
5883 persisted, and regularly vaccuum-cleaned.
5885 A TransientModel has a simplified access rights management,
5886 all users can create new records, and may only access the
5887 records they created. The super-user has unrestricted access
5888 to all TransientModel records.
5891 _register = False # not visible in ORM registry, meant to be python-inherited only
5894 class AbstractModel(BaseModel):
5895 """Abstract Model super-class for creating an abstract class meant to be
5896 inherited by regular models (Models or TransientModels) but not meant to
5897 be usable on its own, or persisted.
5899 Technical note: we don't want to make AbstractModel the super-class of
5900 Model or BaseModel because it would not make sense to put the main
5901 definition of persistence methods such as create() in it, and still we
5902 should be able to override them within an AbstractModel.
5904 _auto = False # don't create any database backend for AbstractModels
5905 _register = False # not visible in ORM registry, meant to be python-inherited only
5908 def itemgetter_tuple(items):
5909 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5910 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5915 return lambda gettable: (gettable[items[0]],)
5916 return operator.itemgetter(*items)
5918 def convert_pgerror_23502(model, fields, info, e):
5919 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5920 r'not-null constraint\n',
5922 field_name = m and m.group('field')
5923 if not m or field_name not in fields:
5924 return {'message': unicode(e)}
5925 message = _(u"Missing required value for the field '%s'.") % field_name
5926 field = fields.get(field_name)
5928 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5931 'field': field_name,
5934 def convert_pgerror_23505(model, fields, info, e):
5935 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5937 field_name = m and m.group('field')
5938 if not m or field_name not in fields:
5939 return {'message': unicode(e)}
5940 message = _(u"The value for the field '%s' already exists.") % field_name
5941 field = fields.get(field_name)
5943 message = _(u"%s This might be '%s' in the current model, or a field "
5944 u"of the same name in an o2m.") % (message, field['string'])
5947 'field': field_name,
5950 PGERROR_TO_OE = defaultdict(
5951 # shape of mapped converters
5952 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5953 # not_null_violation
5954 '23502': convert_pgerror_23502,
5955 # unique constraint error
5956 '23505': convert_pgerror_23505,
5959 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5960 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5962 Various implementations were tested on the corpus of all browse() calls
5963 performed during a full crawler run (after having installed all website_*
5964 modules) and this one was the most efficient overall.
5966 A possible bit of correctness was sacrificed by not doing any test on
5967 Iterable and just assuming that any non-atomic type was an iterable of
5972 # much of the corpus is falsy objects (empty list, tuple or set, None)
5976 # `type in set` is significantly faster (because more restrictive) than
5977 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5978 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5979 # (and looks much worse) in most cases, but over millions of calls it
5980 # does have a very minor effect.
5981 if arg.__class__ in atoms:
5986 # keep those imports here to avoid dependency cycle errors
5987 from .osv import expression
5988 from .fields import Field, SpecialValue, FailedValue
5990 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: