1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
51 from collections import defaultdict, MutableMapping
52 from inspect import getmembers
55 import dateutil.relativedelta
57 from lxml import etree
60 from . import SUPERUSER_ID
63 from .api import Environment
64 from .exceptions import except_orm, AccessError, MissingError, ValidationError
65 from .osv import fields
66 from .osv.query import Query
67 from .tools import lazy_property, ormcache
68 from .tools.config import config
69 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
70 from .tools.safe_eval import safe_eval as eval
71 from .tools.translate import _
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
77 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
78 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
80 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def check_object_name(name):
84 """ Check if the given name is a valid openerp object name.
86 The _name attribute in osv and osv_memory object is subject to
87 some restrictions. This function returns True or False whether
88 the given name is allowed or not.
90 TODO: this is an approximation. The goal in this approximation
91 is to disallow uppercase characters (in some places, we quote
92 table/column names and in other not, which leads to this kind
95 psycopg2.ProgrammingError: relation "xxx" does not exist).
97 The same restriction should apply to both osv and osv_memory
98 objects for consistency.
101 if regex_object_name.match(name) is None:
105 def raise_on_invalid_object_name(name):
106 if not check_object_name(name):
107 msg = "The _name attribute %s is not valid." % name
109 raise except_orm('ValueError', msg)
111 POSTGRES_CONFDELTYPES = {
119 def intersect(la, lb):
120 return filter(lambda x: x in lb, la)
123 """ Test whether functions `f` and `g` are identical or have the same name """
124 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
126 def fix_import_export_id_paths(fieldname):
128 Fixes the id fields in import and exports, and splits field paths
131 :param str fieldname: name of the field to import/export
132 :return: split field name
135 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
136 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
137 return fixed_external_id.split('/')
139 def pg_varchar(size=0):
140 """ Returns the VARCHAR declaration for the provided size:
142 * If no size (or an empty or negative size is provided) return an
144 * Otherwise return a VARCHAR(n)
146 :type int size: varchar size, optional
150 if not isinstance(size, int):
151 raise TypeError("VARCHAR parameter should be an int, got %s"
154 return 'VARCHAR(%d)' % size
157 FIELDS_TO_PGTYPES = {
158 fields.boolean: 'bool',
159 fields.integer: 'int4',
163 fields.datetime: 'timestamp',
164 fields.binary: 'bytea',
165 fields.many2one: 'int4',
166 fields.serialized: 'text',
169 def get_pg_type(f, type_override=None):
171 :param fields._column f: field to get a Postgres type for
172 :param type type_override: use the provided type for dispatching instead of the field's own type
173 :returns: (postgres_identification_type, postgres_type_specification)
176 field_type = type_override or type(f)
178 if field_type in FIELDS_TO_PGTYPES:
179 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
180 elif issubclass(field_type, fields.float):
182 pg_type = ('numeric', 'NUMERIC')
184 pg_type = ('float8', 'DOUBLE PRECISION')
185 elif issubclass(field_type, (fields.char, fields.reference)):
186 pg_type = ('varchar', pg_varchar(f.size))
187 elif issubclass(field_type, fields.selection):
188 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
189 or getattr(f, 'size', None) == -1:
190 pg_type = ('int4', 'INTEGER')
192 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
193 elif issubclass(field_type, fields.function):
194 if f._type == 'selection':
195 pg_type = ('varchar', pg_varchar())
197 pg_type = get_pg_type(f, getattr(fields, f._type))
199 _logger.warning('%s type not supported!', field_type)
205 class MetaModel(api.Meta):
206 """ Metaclass for the models.
208 This class is used as the metaclass for the class :class:`BaseModel` to
209 discover the models defined in a module (without instanciating them).
210 If the automatic discovery is not needed, it is possible to set the model's
211 ``_register`` attribute to False.
215 module_to_models = {}
217 def __init__(self, name, bases, attrs):
218 if not self._register:
219 self._register = True
220 super(MetaModel, self).__init__(name, bases, attrs)
223 if not hasattr(self, '_module'):
224 # The (OpenERP) module name can be in the `openerp.addons` namespace
225 # or not. For instance, module `sale` can be imported as
226 # `openerp.addons.sale` (the right way) or `sale` (for backward
228 module_parts = self.__module__.split('.')
229 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
230 module_name = self.__module__.split('.')[2]
232 module_name = self.__module__.split('.')[0]
233 self._module = module_name
235 # Remember which models to instanciate for this module.
237 self.module_to_models.setdefault(self._module, []).append(self)
239 # transform columns into new-style fields (enables field inheritance)
240 for name, column in self._columns.iteritems():
241 if not hasattr(self, name):
242 setattr(self, name, column.to_field())
246 """ Pseudo-ids for new records. """
247 def __nonzero__(self):
250 IdType = (int, long, basestring, NewId)
253 # maximum number of prefetched records
256 # special columns automatically created by the ORM
257 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
258 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
260 class BaseModel(object):
261 """ Base class for OpenERP models.
263 OpenERP models are created by inheriting from this class' subclasses:
265 * :class:`Model` for regular database-persisted models
267 * :class:`TransientModel` for temporary data, stored in the database but
268 automatically vaccuumed every so often
270 * :class:`AbstractModel` for abstract super classes meant to be shared by
271 multiple inheriting model
273 The system automatically instantiates every model once per database. Those
274 instances represent the available models on each database, and depend on
275 which modules are installed on that database. The actual class of each
276 instance is built from the Python classes that create and inherit from the
279 Every model instance is a "recordset", i.e., an ordered collection of
280 records of the model. Recordsets are returned by methods like
281 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
282 explicit representation: a record is represented as a recordset of one
285 To create a class that should not be instantiated, the _register class
286 attribute may be set to False.
288 __metaclass__ = MetaModel
289 _auto = True # create database backend
290 _register = False # Set to false if the model shouldn't be automatically discovered.
297 _parent_name = 'parent_id'
298 _parent_store = False
299 _parent_order = False
305 _translate = True # set to False to disable translations export for this model
307 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
308 # to include in the _read_group, if grouped on this field
312 _transient = False # True in a TransientModel
315 # { 'parent_model': 'm2o_field', ... }
318 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
319 # model from which it is inherits'd, r is the (local) field towards m, f
320 # is the _column object itself, and n is the original (i.e. top-most)
323 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
324 # field_column_obj, origina_parent_model), ... }
327 # Mapping field name/column_info object
328 # This is similar to _inherit_fields but:
329 # 1. includes self fields,
330 # 2. uses column_info instead of a triple.
335 _sql_constraints = []
337 # model dependencies, for models backed up by sql views:
338 # {model_name: field_names, ...}
341 CONCURRENCY_CHECK_FIELD = '__last_update'
343 def log(self, cr, uid, id, message, secondary=False, context=None):
344 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
346 def view_init(self, cr, uid, fields_list, context=None):
347 """Override this method to do specific things when a view on the object is opened."""
350 def _field_create(self, cr, context=None):
351 """ Create entries in ir_model_fields for all the model's fields.
353 If necessary, also create an entry in ir_model, and if called from the
354 modules loading scheme (by receiving 'module' in the context), also
355 create entries in ir_model_data (for the model and the fields).
357 - create an entry in ir_model (if there is not already one),
358 - create an entry in ir_model_data (if there is not already one, and if
359 'module' is in the context),
360 - update ir_model_fields with the fields found in _columns
361 (TODO there is some redundancy as _columns is updated from
362 ir_model_fields in __init__).
367 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
369 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
370 model_id = cr.fetchone()[0]
371 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
373 model_id = cr.fetchone()[0]
374 if 'module' in context:
375 name_id = 'model_'+self._name.replace('.', '_')
376 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
378 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
379 (name_id, context['module'], 'ir.model', model_id)
382 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
384 for rec in cr.dictfetchall():
385 cols[rec['name']] = rec
387 ir_model_fields_obj = self.pool.get('ir.model.fields')
389 # sparse field should be created at the end, as it depends on its serialized field already existing
390 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
391 for (k, f) in model_fields:
393 'model_id': model_id,
396 'field_description': f.string,
398 'relation': f._obj or '',
399 'select_level': tools.ustr(int(f.select)),
400 'readonly': (f.readonly and 1) or 0,
401 'required': (f.required and 1) or 0,
402 'selectable': (f.selectable and 1) or 0,
403 'translate': (f.translate and 1) or 0,
404 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
405 'serialization_field_id': None,
407 if getattr(f, 'serialization_field', None):
408 # resolve link to serialization_field if specified by name
409 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
410 if not serialization_field_id:
411 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
412 vals['serialization_field_id'] = serialization_field_id[0]
414 # When its a custom field,it does not contain f.select
415 if context.get('field_state', 'base') == 'manual':
416 if context.get('field_name', '') == k:
417 vals['select_level'] = context.get('select', '0')
418 #setting value to let the problem NOT occur next time
420 vals['select_level'] = cols[k]['select_level']
423 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
424 id = cr.fetchone()[0]
426 cr.execute("""INSERT INTO ir_model_fields (
427 id, model_id, model, name, field_description, ttype,
428 relation,state,select_level,relation_field, translate, serialization_field_id
430 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
432 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
433 vals['relation'], 'base',
434 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
436 if 'module' in context:
437 name1 = 'field_' + self._table + '_' + k
438 cr.execute("select name from ir_model_data where name=%s", (name1,))
440 name1 = name1 + "_" + str(id)
441 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
442 (name1, context['module'], 'ir.model.fields', id)
445 for key, val in vals.items():
446 if cols[k][key] != vals[key]:
447 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
448 cr.execute("""UPDATE ir_model_fields SET
449 model_id=%s, field_description=%s, ttype=%s, relation=%s,
450 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
452 model=%s AND name=%s""", (
453 vals['model_id'], vals['field_description'], vals['ttype'],
455 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
458 self.invalidate_cache(cr, SUPERUSER_ID)
461 def _add_field(cls, name, field):
462 """ Add the given `field` under the given `name` in the class """
463 field.set_class_name(cls, name)
465 # add field in _fields (for reflection)
466 cls._fields[name] = field
468 # add field as an attribute, unless another kind of value already exists
469 if isinstance(getattr(cls, name, field), Field):
470 setattr(cls, name, field)
472 _logger.warning("In model %r, member %r is not a field", cls._name, name)
475 cls._columns[name] = field.to_column()
477 # remove potential column that may be overridden by field
478 cls._columns.pop(name, None)
481 def _pop_field(cls, name):
482 """ Remove the field with the given `name` from the model.
483 This method should only be used for manual fields.
485 field = cls._fields.pop(name)
486 cls._columns.pop(name, None)
487 cls._all_columns.pop(name, None)
488 if hasattr(cls, name):
493 def _add_magic_fields(cls):
494 """ Introduce magic fields on the current class
496 * id is a "normal" field (with a specific getter)
497 * create_uid, create_date, write_uid and write_date have become
499 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
500 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
501 to get the same structure as the previous
502 ``(now() at time zone 'UTC')::timestamp``::
504 # select (now() at time zone 'UTC')::timestamp;
506 ----------------------------
507 2013-06-18 08:30:37.292809
509 >>> str(datetime.datetime.utcnow())
510 '2013-06-18 08:31:32.821177'
512 def add(name, field):
513 """ add `field` with the given `name` if it does not exist yet """
514 if name not in cls._columns and name not in cls._fields:
515 cls._add_field(name, field)
520 # this field 'id' must override any other column or field
521 cls._add_field('id', fields.Id(automatic=True))
523 add('display_name', fields.Char(string='Display Name', automatic=True,
524 compute='_compute_display_name'))
527 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
528 add('create_date', fields.Datetime(string='Created on', automatic=True))
529 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
530 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
531 last_modified_name = 'compute_concurrency_field_with_access'
533 last_modified_name = 'compute_concurrency_field'
535 # this field must override any other column or field
536 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
537 string='Last Modified on', compute=last_modified_name, automatic=True))
540 def compute_concurrency_field(self):
541 self[self.CONCURRENCY_CHECK_FIELD] = \
542 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
545 @api.depends('create_date', 'write_date')
546 def compute_concurrency_field_with_access(self):
547 self[self.CONCURRENCY_CHECK_FIELD] = \
548 self.write_date or self.create_date or \
549 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
552 # Goal: try to apply inheritance at the instanciation level and
553 # put objects in the pool var
556 def _build_model(cls, pool, cr):
557 """ Instanciate a given model.
559 This class method instanciates the class of some model (i.e. a class
560 deriving from osv or osv_memory). The class might be the class passed
561 in argument or, if it inherits from another class, a class constructed
562 by combining the two classes.
566 # IMPORTANT: the registry contains an instance for each model. The class
567 # of each model carries inferred metadata that is shared among the
568 # model's instances for this registry, but not among registries. Hence
569 # we cannot use that "registry class" for combining model classes by
570 # inheritance, since it confuses the metadata inference process.
572 # Keep links to non-inherited constraints in cls; this is useful for
573 # instance when exporting translations
574 cls._local_constraints = cls.__dict__.get('_constraints', [])
575 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
577 # determine inherited models
578 parents = getattr(cls, '_inherit', [])
579 parents = [parents] if isinstance(parents, basestring) else (parents or [])
581 # determine the model's name
582 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
584 # determine the module that introduced the model
585 original_module = pool[name]._original_module if name in parents else cls._module
587 # build the class hierarchy for the model
588 for parent in parents:
589 if parent not in pool:
590 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
591 'You may need to add a dependency on the parent class\' module.' % (name, parent))
592 parent_model = pool[parent]
594 # do no use the class of parent_model, since that class contains
595 # inferred metadata; use its ancestor instead
596 parent_class = type(parent_model).__base__
598 # don't inherit custom fields
599 columns = dict((key, val)
600 for key, val in parent_class._columns.iteritems()
603 columns.update(cls._columns)
605 defaults = dict(parent_class._defaults)
606 defaults.update(cls._defaults)
608 inherits = dict(parent_class._inherits)
609 inherits.update(cls._inherits)
611 depends = dict(parent_class._depends)
612 for m, fs in cls._depends.iteritems():
613 depends[m] = depends.get(m, []) + fs
615 old_constraints = parent_class._constraints
616 new_constraints = cls._constraints
617 # filter out from old_constraints the ones overridden by a
618 # constraint with the same function name in new_constraints
619 constraints = new_constraints + [oldc
620 for oldc in old_constraints
621 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
622 for newc in new_constraints)
625 sql_constraints = cls._sql_constraints + \
626 parent_class._sql_constraints
632 '_defaults': defaults,
633 '_inherits': inherits,
635 '_constraints': constraints,
636 '_sql_constraints': sql_constraints,
638 cls = type(name, (cls, parent_class), attrs)
640 # introduce the "registry class" of the model;
641 # duplicate some attributes so that the ORM can modify them
645 '_columns': dict(cls._columns),
646 '_defaults': dict(cls._defaults),
647 '_inherits': dict(cls._inherits),
648 '_depends': dict(cls._depends),
649 '_constraints': list(cls._constraints),
650 '_sql_constraints': list(cls._sql_constraints),
651 '_original_module': original_module,
653 cls = type(cls._name, (cls,), attrs)
655 # instantiate the model, and initialize it
656 model = object.__new__(cls)
657 model.__init__(pool, cr)
661 def _init_function_fields(cls, pool, cr):
662 # initialize the list of non-stored function fields for this model
663 pool._pure_function_fields[cls._name] = []
665 # process store of low-level function fields
666 for fname, column in cls._columns.iteritems():
667 if hasattr(column, 'digits_change'):
668 column.digits_change(cr)
669 # filter out existing store about this field
670 pool._store_function[cls._name] = [
672 for stored in pool._store_function.get(cls._name, [])
673 if (stored[0], stored[1]) != (cls._name, fname)
675 if not isinstance(column, fields.function):
678 # register it on the pool for invalidation
679 pool._pure_function_fields[cls._name].append(fname)
681 # process store parameter
684 get_ids = lambda self, cr, uid, ids, c={}: ids
685 store = {cls._name: (get_ids, None, column.priority, None)}
686 for model, spec in store.iteritems():
688 (fnct, fields2, order, length) = spec
690 (fnct, fields2, order) = spec
693 raise except_orm('Error',
694 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
695 pool._store_function.setdefault(model, [])
696 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
697 if t not in pool._store_function[model]:
698 pool._store_function[model].append(t)
699 pool._store_function[model].sort(key=lambda x: x[4])
702 def _init_manual_fields(cls, pool, cr):
703 # Check whether the query is already done
704 if pool.fields_by_model is not None:
705 manual_fields = pool.fields_by_model.get(cls._name, [])
707 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
708 manual_fields = cr.dictfetchall()
710 for field in manual_fields:
711 if field['name'] in cls._columns:
714 'string': field['field_description'],
715 'required': bool(field['required']),
716 'readonly': bool(field['readonly']),
717 'domain': eval(field['domain']) if field['domain'] else None,
718 'size': field['size'] or None,
719 'ondelete': field['on_delete'],
720 'translate': (field['translate']),
723 #'select': int(field['select_level'])
725 if field['serialization_field_id']:
726 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
727 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
728 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
729 attrs.update({'relation': field['relation']})
730 cls._columns[field['name']] = fields.sparse(**attrs)
731 elif field['ttype'] == 'selection':
732 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
733 elif field['ttype'] == 'reference':
734 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
735 elif field['ttype'] == 'many2one':
736 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
737 elif field['ttype'] == 'one2many':
738 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
739 elif field['ttype'] == 'many2many':
740 _rel1 = field['relation'].replace('.', '_')
741 _rel2 = field['model'].replace('.', '_')
742 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
743 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
745 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
748 def _init_constraints_onchanges(cls):
749 # store sql constraint error messages
750 for (key, _, msg) in cls._sql_constraints:
751 cls.pool._sql_error[cls._table + '_' + key] = msg
753 # collect constraint and onchange methods
754 cls._constraint_methods = []
755 cls._onchange_methods = defaultdict(list)
756 for attr, func in getmembers(cls, callable):
757 if hasattr(func, '_constrains'):
758 if not all(name in cls._fields for name in func._constrains):
759 _logger.warning("@constrains%r parameters must be field names", func._constrains)
760 cls._constraint_methods.append(func)
761 if hasattr(func, '_onchange'):
762 if not all(name in cls._fields for name in func._onchange):
763 _logger.warning("@onchange%r parameters must be field names", func._onchange)
764 for name in func._onchange:
765 cls._onchange_methods[name].append(func)
768 # In the past, this method was registering the model class in the server.
769 # This job is now done entirely by the metaclass MetaModel.
771 # Do not create an instance here. Model instances are created by method
775 def __init__(self, pool, cr):
776 """ Initialize a model and make it part of the given registry.
778 - copy the stored fields' functions in the registry,
779 - retrieve custom fields and add them in the model,
780 - ensure there is a many2one for each _inherits'd parent,
781 - update the children's _columns,
782 - give a chance to each field to initialize itself.
787 # link the class to the registry, and update the registry
789 cls._model = self # backward compatibility
790 pool.add(cls._name, self)
792 # determine description, table, sequence and log_access
793 if not cls._description:
794 cls._description = cls._name
796 cls._table = cls._name.replace('.', '_')
797 if not cls._sequence:
798 cls._sequence = cls._table + '_id_seq'
799 if not hasattr(cls, '_log_access'):
800 # If _log_access is not specified, it is the same value as _auto.
801 cls._log_access = cls._auto
804 if cls.is_transient():
805 cls._transient_check_count = 0
806 cls._transient_max_count = config.get('osv_memory_count_limit')
807 cls._transient_max_hours = config.get('osv_memory_age_limit')
808 assert cls._log_access, \
809 "TransientModels must have log_access turned on, " \
810 "in order to implement their access rights policy"
812 # retrieve new-style fields and duplicate them (to avoid clashes with
813 # inheritance between different models)
815 for attr, field in getmembers(cls, Field.__instancecheck__):
816 if not field.inherited:
817 cls._add_field(attr, field.copy())
819 # introduce magic fields
820 cls._add_magic_fields()
822 # register stuff about low-level function fields and custom fields
823 cls._init_function_fields(pool, cr)
824 cls._init_manual_fields(pool, cr)
827 cls._inherits_check()
828 cls._inherits_reload()
830 # register constraints and onchange methods
831 cls._init_constraints_onchanges()
834 for k in cls._defaults:
835 assert k in cls._fields, \
836 "Model %s has a default for non-existing field %s" % (cls._name, k)
839 for column in cls._columns.itervalues():
844 assert cls._rec_name in cls._fields, \
845 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
846 elif 'name' in cls._fields:
847 cls._rec_name = 'name'
849 # prepare ormcache, which must be shared by all instances of the model
854 def _is_an_ordinary_table(self):
855 self.env.cr.execute("""\
859 AND relkind = %s""", [self._table, 'r'])
860 return bool(self.env.cr.fetchone())
862 def __export_xml_id(self):
863 """ Return a valid xml_id for the record `self`. """
864 if not self._is_an_ordinary_table():
866 "You can not export the column ID of model %s, because the "
867 "table %s is not an ordinary table."
868 % (self._name, self._table))
869 ir_model_data = self.sudo().env['ir.model.data']
870 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
873 return '%s.%s' % (data[0].module, data[0].name)
878 name = '%s_%s' % (self._table, self.id)
879 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
881 name = '%s_%s_%s' % (self._table, self.id, postfix)
882 ir_model_data.create({
885 'module': '__export__',
888 return '__export__.' + name
891 def __export_rows(self, fields):
892 """ Export fields of the records in `self`.
894 :param fields: list of lists of fields to traverse
895 :return: list of lists of corresponding values
899 # main line of record, initially empty
900 current = [''] * len(fields)
901 lines.append(current)
903 # list of primary fields followed by secondary field(s)
906 # process column by column
907 for i, path in enumerate(fields):
912 if name in primary_done:
916 current[i] = str(record.id)
918 current[i] = record.__export_xml_id()
920 field = record._fields[name]
923 # this part could be simpler, but it has to be done this way
924 # in order to reproduce the former behavior
925 if not isinstance(value, BaseModel):
926 current[i] = field.convert_to_export(value, self.env)
928 primary_done.append(name)
930 # This is a special case, its strange behavior is intended!
931 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
932 xml_ids = [r.__export_xml_id() for r in value]
933 current[i] = ','.join(xml_ids) or False
936 # recursively export the fields that follow name
937 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
938 lines2 = value.__export_rows(fields2)
940 # merge first line with record's main line
941 for j, val in enumerate(lines2[0]):
944 # check value of current field
946 # assign xml_ids, and forget about remaining lines
947 xml_ids = [item[1] for item in value.name_get()]
948 current[i] = ','.join(xml_ids)
950 # append the other lines at the end
958 def export_data(self, fields_to_export, raw_data=False):
959 """ Export fields for selected objects
961 :param fields_to_export: list of fields
962 :param raw_data: True to return value in native Python type
963 :rtype: dictionary with a *datas* matrix
965 This method is used when exporting data via client menu
967 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
969 self = self.with_context(export_raw_data=True)
970 return {'datas': self.__export_rows(fields_to_export)}
972 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
975 Use :meth:`~load` instead
977 Import given data in given module
979 This method is used when importing data via client menu.
981 Example of fields to import for a sale.order::
984 partner_id, (=name_search)
985 order_line/.id, (=database_id)
987 order_line/product_id/id, (=xml id)
988 order_line/price_unit,
989 order_line/product_uom_qty,
990 order_line/product_uom/id (=xml_id)
992 This method returns a 4-tuple with the following structure::
994 (return_code, errored_resource, error_message, unused)
996 * The first item is a return code, it is ``-1`` in case of
997 import error, or the last imported row number in case of success
998 * The second item contains the record data dict that failed to import
999 in case of error, otherwise it's 0
1000 * The third item contains an error message string in case of error,
1002 * The last item is currently unused, with no specific semantics
1004 :param fields: list of fields to import
1005 :param datas: data to import
1006 :param mode: 'init' or 'update' for record creation
1007 :param current_module: module name
1008 :param noupdate: flag for record creation
1009 :param filename: optional file to store partial import state for recovery
1010 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1011 :rtype: (int, dict or 0, str or 0, str or 0)
1013 context = dict(context) if context is not None else {}
1014 context['_import_current_module'] = current_module
1016 fields = map(fix_import_export_id_paths, fields)
1017 ir_model_data_obj = self.pool.get('ir.model.data')
1020 if m['type'] == 'error':
1021 raise Exception(m['message'])
1023 if config.get('import_partial') and filename:
1024 with open(config.get('import_partial'), 'rb') as partial_import_file:
1025 data = pickle.load(partial_import_file)
1026 position = data.get(filename, 0)
1030 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1031 self._extract_records(cr, uid, fields, datas,
1032 context=context, log=log),
1033 context=context, log=log):
1034 ir_model_data_obj._update(cr, uid, self._name,
1035 current_module, res, mode=mode, xml_id=xml_id,
1036 noupdate=noupdate, res_id=res_id, context=context)
1037 position = info.get('rows', {}).get('to', 0) + 1
1038 if config.get('import_partial') and filename and (not (position%100)):
1039 with open(config.get('import_partial'), 'rb') as partial_import:
1040 data = pickle.load(partial_import)
1041 data[filename] = position
1042 with open(config.get('import_partial'), 'wb') as partial_import:
1043 pickle.dump(data, partial_import)
1044 if context.get('defer_parent_store_computation'):
1045 self._parent_store_compute(cr)
1047 except Exception, e:
1049 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1051 if context.get('defer_parent_store_computation'):
1052 self._parent_store_compute(cr)
1053 return position, 0, 0, 0
1055 def load(self, cr, uid, fields, data, context=None):
1057 Attempts to load the data matrix, and returns a list of ids (or
1058 ``False`` if there was an error and no id could be generated) and a
1061 The ids are those of the records created and saved (in database), in
1062 the same order they were extracted from the file. They can be passed
1063 directly to :meth:`~read`
1065 :param fields: list of fields to import, at the same index as the corresponding data
1066 :type fields: list(str)
1067 :param data: row-major matrix of data to import
1068 :type data: list(list(str))
1069 :param dict context:
1070 :returns: {ids: list(int)|False, messages: [Message]}
1072 cr.execute('SAVEPOINT model_load')
1075 fields = map(fix_import_export_id_paths, fields)
1076 ModelData = self.pool['ir.model.data'].clear_caches()
1078 fg = self.fields_get(cr, uid, context=context)
1085 for id, xid, record, info in self._convert_records(cr, uid,
1086 self._extract_records(cr, uid, fields, data,
1087 context=context, log=messages.append),
1088 context=context, log=messages.append):
1090 cr.execute('SAVEPOINT model_load_save')
1091 except psycopg2.InternalError, e:
1092 # broken transaction, exit and hope the source error was
1094 if not any(message['type'] == 'error' for message in messages):
1095 messages.append(dict(info, type='error',message=
1096 u"Unknown database error: '%s'" % e))
1099 ids.append(ModelData._update(cr, uid, self._name,
1100 current_module, record, mode=mode, xml_id=xid,
1101 noupdate=noupdate, res_id=id, context=context))
1102 cr.execute('RELEASE SAVEPOINT model_load_save')
1103 except psycopg2.Warning, e:
1104 messages.append(dict(info, type='warning', message=str(e)))
1105 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1106 except psycopg2.Error, e:
1107 messages.append(dict(
1109 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1110 # Failed to write, log to messages, rollback savepoint (to
1111 # avoid broken transaction) and keep going
1112 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1113 except Exception, e:
1114 message = (_('Unknown error during import:') +
1115 ' %s: %s' % (type(e), unicode(e)))
1116 moreinfo = _('Resolve other errors first')
1117 messages.append(dict(info, type='error',
1120 # Failed for some reason, perhaps due to invalid data supplied,
1121 # rollback savepoint and keep going
1122 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1123 if any(message['type'] == 'error' for message in messages):
1124 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1126 return {'ids': ids, 'messages': messages}
1128 def _extract_records(self, cr, uid, fields_, data,
1129 context=None, log=lambda a: None):
1130 """ Generates record dicts from the data sequence.
1132 The result is a generator of dicts mapping field names to raw
1133 (unconverted, unvalidated) values.
1135 For relational fields, if sub-fields were provided the value will be
1136 a list of sub-records
1138 The following sub-fields may be set on the record (by key):
1139 * None is the name_get for the record (to use with name_create/name_search)
1140 * "id" is the External ID for the record
1141 * ".id" is the Database ID for the record
1143 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1144 # Fake columns to avoid special cases in extractor
1145 columns[None] = fields.char('rec_name')
1146 columns['id'] = fields.char('External ID')
1147 columns['.id'] = fields.integer('Database ID')
1149 # m2o fields can't be on multiple lines so exclude them from the
1150 # is_relational field rows filter, but special-case it later on to
1151 # be handled with relational fields (as it can have subfields)
1152 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1153 get_o2m_values = itemgetter_tuple(
1154 [index for index, field in enumerate(fields_)
1155 if columns[field[0]]._type == 'one2many'])
1156 get_nono2m_values = itemgetter_tuple(
1157 [index for index, field in enumerate(fields_)
1158 if columns[field[0]]._type != 'one2many'])
1159 # Checks if the provided row has any non-empty non-relational field
1160 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1161 return any(g(row)) and not any(f(row))
1165 if index >= len(data): return
1168 # copy non-relational fields to record dict
1169 record = dict((field[0], value)
1170 for field, value in itertools.izip(fields_, row)
1171 if not is_relational(field[0]))
1173 # Get all following rows which have relational values attached to
1174 # the current record (no non-relational values)
1175 record_span = itertools.takewhile(
1176 only_o2m_values, itertools.islice(data, index + 1, None))
1177 # stitch record row back on for relational fields
1178 record_span = list(itertools.chain([row], record_span))
1179 for relfield in set(
1180 field[0] for field in fields_
1181 if is_relational(field[0])):
1182 column = columns[relfield]
1183 # FIXME: how to not use _obj without relying on fields_get?
1184 Model = self.pool[column._obj]
1186 # get only cells for this sub-field, should be strictly
1187 # non-empty, field path [None] is for name_get column
1188 indices, subfields = zip(*((index, field[1:] or [None])
1189 for index, field in enumerate(fields_)
1190 if field[0] == relfield))
1192 # return all rows which have at least one value for the
1193 # subfields of relfield
1194 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1195 record[relfield] = [subrecord
1196 for subrecord, _subinfo in Model._extract_records(
1197 cr, uid, subfields, relfield_data,
1198 context=context, log=log)]
1200 yield record, {'rows': {
1202 'to': index + len(record_span) - 1
1204 index += len(record_span)
1206 def _convert_records(self, cr, uid, records,
1207 context=None, log=lambda a: None):
1208 """ Converts records from the source iterable (recursive dicts of
1209 strings) into forms which can be written to the database (via
1210 self.create or (ir.model.data)._update)
1212 :returns: a list of triplets of (id, xid, record)
1213 :rtype: list((int|None, str|None, dict))
1215 if context is None: context = {}
1216 Converter = self.pool['ir.fields.converter']
1217 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1218 Translation = self.pool['ir.translation']
1220 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1221 context.get('lang'))
1223 for f, column in columns.iteritems())
1225 convert = Converter.for_model(cr, uid, self, context=context)
1227 def _log(base, field, exception):
1228 type = 'warning' if isinstance(exception, Warning) else 'error'
1229 # logs the logical (not human-readable) field name for automated
1230 # processing of response, but injects human readable in message
1231 record = dict(base, type=type, field=field,
1232 message=unicode(exception.args[0]) % base)
1233 if len(exception.args) > 1 and exception.args[1]:
1234 record.update(exception.args[1])
1237 stream = CountingStream(records)
1238 for record, extras in stream:
1241 # name_get/name_create
1242 if None in record: pass
1249 dbid = int(record['.id'])
1251 # in case of overridden id column
1252 dbid = record['.id']
1253 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1256 record=stream.index,
1258 message=_(u"Unknown database identifier '%s'") % dbid))
1261 converted = convert(record, lambda field, err:\
1262 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1264 yield dbid, xid, converted, dict(extras, record=stream.index)
1267 def _validate_fields(self, field_names):
1268 field_names = set(field_names)
1270 # old-style constraint methods
1271 trans = self.env['ir.translation']
1272 cr, uid, context = self.env.args
1275 for fun, msg, names in self._constraints:
1277 # validation must be context-independent; call `fun` without context
1278 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1280 except Exception, e:
1281 _logger.debug('Exception while validating constraint', exc_info=True)
1283 extra_error = tools.ustr(e)
1286 res_msg = msg(self._model, cr, uid, ids, context=context)
1287 if isinstance(res_msg, tuple):
1288 template, params = res_msg
1289 res_msg = template % params
1291 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1293 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1295 _("Field(s) `%s` failed against a constraint: %s") %
1296 (', '.join(names), res_msg)
1299 raise ValidationError('\n'.join(errors))
1301 # new-style constraint methods
1302 for check in self._constraint_methods:
1303 if set(check._constrains) & field_names:
1306 except ValidationError, e:
1308 except Exception, e:
1309 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1311 def default_get(self, cr, uid, fields_list, context=None):
1312 """ default_get(fields) -> default_values
1314 Return default values for the fields in `fields_list`. Default
1315 values are determined by the context, user defaults, and the model
1318 :param fields_list: a list of field names
1319 :return: a dictionary mapping each field name to its corresponding
1320 default value; the keys of the dictionary are the fields in
1321 `fields_list` that have a default value different from ``False``.
1323 This method should not be overridden. In order to change the
1324 mechanism for determining default values, you should override method
1325 :meth:`add_default_value` instead.
1327 # trigger view init hook
1328 self.view_init(cr, uid, fields_list, context)
1330 # use a new record to determine default values; evaluate fields on the
1331 # new record and put default values in result
1332 record = self.new(cr, uid, {}, context=context)
1334 for name in fields_list:
1335 if name in self._fields:
1336 value = record[name]
1337 if name in record._cache:
1338 result[name] = value # it really is a default value
1340 # convert default values to the expected format
1341 result = self._convert_to_write(result)
1344 def add_default_value(self, field):
1345 """ Set the default value of `field` to the new record `self`.
1346 The value must be assigned to `self`.
1348 assert not self.id, "Expected new record: %s" % self
1349 cr, uid, context = self.env.args
1352 # 1. look up context
1353 key = 'default_' + name
1355 self[name] = context[key]
1358 # 2. look up ir_values
1359 # Note: performance is good, because get_defaults_dict is cached!
1360 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1361 if name in ir_values_dict:
1362 self[name] = ir_values_dict[name]
1365 # 3. look up property fields
1366 # TODO: get rid of this one
1367 column = self._columns.get(name)
1368 if isinstance(column, fields.property):
1369 self[name] = self.env['ir.property'].get(name, self._name)
1372 # 4. look up _defaults
1373 if name in self._defaults:
1374 value = self._defaults[name]
1376 value = value(self._model, cr, uid, context)
1380 # 5. delegate to field
1381 field.determine_default(self)
1383 def fields_get_keys(self, cr, user, context=None):
1384 res = self._columns.keys()
1385 # TODO I believe this loop can be replace by
1386 # res.extend(self._inherit_fields.key())
1387 for parent in self._inherits:
1388 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1391 def _rec_name_fallback(self, cr, uid, context=None):
1392 rec_name = self._rec_name
1393 if rec_name not in self._columns:
1394 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1398 # Overload this method if you need a window title which depends on the context
1400 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1403 def user_has_groups(self, cr, uid, groups, context=None):
1404 """Return true if the user is at least member of one of the groups
1405 in groups_str. Typically used to resolve `groups` attribute
1406 in view and model definitions.
1408 :param str groups: comma-separated list of fully-qualified group
1409 external IDs, e.g.: ``base.group_user,base.group_system``
1410 :return: True if the current user is a member of one of the
1413 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1414 for group_ext_id in groups.split(','))
1416 def _get_default_form_view(self, cr, user, context=None):
1417 """ Generates a default single-line form view using all fields
1418 of the current model except the m2m and o2m ones.
1420 :param cr: database cursor
1421 :param int user: user id
1422 :param dict context: connection context
1423 :returns: a form view as an lxml document
1424 :rtype: etree._Element
1426 view = etree.Element('form', string=self._description)
1427 group = etree.SubElement(view, 'group', col="4")
1428 for fname, field in self._fields.iteritems():
1429 if field.automatic or field.type in ('one2many', 'many2many'):
1432 etree.SubElement(group, 'field', name=fname)
1433 if field.type == 'text':
1434 etree.SubElement(group, 'newline')
1437 def _get_default_search_view(self, cr, user, context=None):
1438 """ Generates a single-field search view, based on _rec_name.
1440 :param cr: database cursor
1441 :param int user: user id
1442 :param dict context: connection context
1443 :returns: a tree view as an lxml document
1444 :rtype: etree._Element
1446 view = etree.Element('search', string=self._description)
1447 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1450 def _get_default_tree_view(self, cr, user, context=None):
1451 """ Generates a single-field tree view, based on _rec_name.
1453 :param cr: database cursor
1454 :param int user: user id
1455 :param dict context: connection context
1456 :returns: a tree view as an lxml document
1457 :rtype: etree._Element
1459 view = etree.Element('tree', string=self._description)
1460 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1463 def _get_default_calendar_view(self, cr, user, context=None):
1464 """ Generates a default calendar view by trying to infer
1465 calendar fields from a number of pre-set attribute names
1467 :param cr: database cursor
1468 :param int user: user id
1469 :param dict context: connection context
1470 :returns: a calendar view
1471 :rtype: etree._Element
1473 def set_first_of(seq, in_, to):
1474 """Sets the first value of `seq` also found in `in_` to
1475 the `to` attribute of the view being closed over.
1477 Returns whether it's found a suitable value (and set it on
1478 the attribute) or not
1486 view = etree.Element('calendar', string=self._description)
1487 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1489 if self._date_name not in self._columns:
1491 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1492 if dt in self._columns:
1493 self._date_name = dt
1498 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1499 view.set('date_start', self._date_name)
1501 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1502 self._columns, 'color')
1504 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1505 self._columns, 'date_stop'):
1506 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1507 self._columns, 'date_delay'):
1509 _('Invalid Object Architecture!'),
1510 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1514 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1515 """ fields_view_get([view_id | view_type='form'])
1517 Get the detailed composition of the requested view like fields, model, view architecture
1519 :param view_id: id of the view or None
1520 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1521 :param toolbar: true to include contextual actions
1522 :param submenu: deprecated
1523 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1524 :raise AttributeError:
1525 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1526 * if some tag other than 'position' is found in parent view
1527 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1531 View = self.pool['ir.ui.view']
1534 'model': self._name,
1535 'field_parent': False,
1538 # try to find a view_id if none provided
1540 # <view_type>_view_ref in context can be used to overrride the default view
1541 view_ref_key = view_type + '_view_ref'
1542 view_ref = context.get(view_ref_key)
1545 module, view_ref = view_ref.split('.', 1)
1546 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1547 view_ref_res = cr.fetchone()
1549 view_id = view_ref_res[0]
1551 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1552 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1556 # otherwise try to find the lowest priority matching ir.ui.view
1557 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1559 # context for post-processing might be overriden
1562 # read the view with inherited views applied
1563 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1564 result['arch'] = root_view['arch']
1565 result['name'] = root_view['name']
1566 result['type'] = root_view['type']
1567 result['view_id'] = root_view['id']
1568 result['field_parent'] = root_view['field_parent']
1569 # override context fro postprocessing
1570 if root_view.get('model') != self._name:
1571 ctx = dict(context, base_model_name=root_view.get('model'))
1573 # fallback on default views methods if no ir.ui.view could be found
1575 get_func = getattr(self, '_get_default_%s_view' % view_type)
1576 arch_etree = get_func(cr, uid, context)
1577 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1578 result['type'] = view_type
1579 result['name'] = 'default'
1580 except AttributeError:
1581 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1583 # Apply post processing, groups and modifiers etc...
1584 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1585 result['arch'] = xarch
1586 result['fields'] = xfields
1588 # Add related action information if aksed
1590 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1596 ir_values_obj = self.pool.get('ir.values')
1597 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1598 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1599 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1600 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1601 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1602 #When multi="True" set it will display only in More of the list view
1603 resrelate = [clean(action) for action in resrelate
1604 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1606 for x in itertools.chain(resprint, resaction, resrelate):
1607 x['string'] = x['name']
1609 result['toolbar'] = {
1611 'action': resaction,
1616 def get_formview_id(self, cr, uid, id, context=None):
1617 """ Return an view id to open the document with. This method is meant to be
1618 overridden in addons that want to give specific view ids for example.
1620 :param int id: id of the document to open
1624 def get_formview_action(self, cr, uid, id, context=None):
1625 """ Return an action to open the document. This method is meant to be
1626 overridden in addons that want to give specific view ids for example.
1628 :param int id: id of the document to open
1630 view_id = self.get_formview_id(cr, uid, id, context=context)
1632 'type': 'ir.actions.act_window',
1633 'res_model': self._name,
1634 'view_type': 'form',
1635 'view_mode': 'form',
1636 'views': [(view_id, 'form')],
1637 'target': 'current',
1641 def get_access_action(self, cr, uid, id, context=None):
1642 """ Return an action to open the document. This method is meant to be
1643 overridden in addons that want to give specific access to the document.
1644 By default it opens the formview of the document.
1646 :paramt int id: id of the document to open
1648 return self.get_formview_action(cr, uid, id, context=context)
1650 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1651 return self.pool['ir.ui.view'].postprocess_and_fields(
1652 cr, uid, self._name, node, view_id, context=context)
1654 def search_count(self, cr, user, args, context=None):
1655 """ search_count(args) -> int
1657 Returns the number of records in the current model matching :ref:`the
1658 provided domain <reference/orm/domains>`.
1660 res = self.search(cr, user, args, context=context, count=True)
1661 if isinstance(res, list):
1665 @api.returns('self')
1666 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1667 """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1669 Searches for records based on the ``args``
1670 :ref:`search domain <reference/orm/domains>`.
1672 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1673 list to match all records.
1674 :param int offset: number of results to ignore (default: none)
1675 :param int limit: maximum number of records to return (default: all)
1676 :param str order: sort string
1677 :param bool count: if ``True``, the call should return the number of
1678 records matching ``args`` rather than the records
1680 :returns: at most ``limit`` records matching the search criteria
1682 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1684 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1687 # display_name, name_get, name_create, name_search
1690 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1691 def _compute_display_name(self):
1692 names = dict(self.name_get())
1694 record.display_name = names.get(record.id, False)
1698 """ name_get() -> [(id, name), ...]
1700 Returns a textual representation for the records in ``self``.
1701 By default this is the value of the ``display_name`` field.
1703 :return: list of pairs ``(id, text_repr)`` for each records
1707 name = self._rec_name
1708 if name in self._fields:
1709 convert = self._fields[name].convert_to_display_name
1711 result.append((record.id, convert(record[name])))
1714 result.append((record.id, "%s,%s" % (record._name, record.id)))
1719 def name_create(self, name):
1720 """ name_create(name) -> record
1722 Create a new record by calling :meth:`~.create` with only one value
1723 provided: the display name of the new record.
1725 The new record will be initialized with any default values
1726 applicable to this model, or provided through the context. The usual
1727 behavior of :meth:`~.create` applies.
1729 :param name: display name of the record to create
1731 :return: the :meth:`~.name_get` pair value of the created record
1734 record = self.create({self._rec_name: name})
1735 return record.name_get()[0]
1737 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1741 def name_search(self, name='', args=None, operator='ilike', limit=100):
1742 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1744 Search for records that have a display name matching the given
1745 `name` pattern when compared with the given `operator`, while also
1746 matching the optional search domain (`args`).
1748 This is used for example to provide suggestions based on a partial
1749 value for a relational field. Sometimes be seen as the inverse
1750 function of :meth:`~.name_get`, but it is not guaranteed to be.
1752 This method is equivalent to calling :meth:`~.search` with a search
1753 domain based on ``display_name`` and then :meth:`~.name_get` on the
1754 result of the search.
1756 :param str name: the name pattern to match
1757 :param list args: optional search domain (see :meth:`~.search` for
1758 syntax), specifying further restrictions
1759 :param str operator: domain operator for matching `name`, such as
1760 ``'like'`` or ``'='``.
1761 :param int limit: optional max number of records to return
1763 :return: list of pairs ``(id, text_repr)`` for all matching records.
1765 return self._name_search(name, args, operator, limit=limit)
1767 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1768 # private implementation of name_search, allows passing a dedicated user
1769 # for the name_get part to solve some access rights issues
1770 args = list(args or [])
1771 # optimize out the default criterion of ``ilike ''`` that matches everything
1772 if not self._rec_name:
1773 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1774 elif not (name == '' and operator == 'ilike'):
1775 args += [(self._rec_name, operator, name)]
1776 access_rights_uid = name_get_uid or user
1777 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1778 res = self.name_get(cr, access_rights_uid, ids, context)
1781 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1784 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1786 fields = self._columns.keys() + self._inherit_fields.keys()
1787 #FIXME: collect all calls to _get_source into one SQL call.
1789 res[lang] = {'code': lang}
1791 if f in self._columns:
1792 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1794 res[lang][f] = res_trans
1796 res[lang][f] = self._columns[f].string
1797 for table in self._inherits:
1798 cols = intersect(self._inherit_fields.keys(), fields)
1799 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1802 res[lang]['code'] = lang
1803 for f in res2[lang]:
1804 res[lang][f] = res2[lang][f]
1807 def write_string(self, cr, uid, id, langs, vals, context=None):
1808 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1809 #FIXME: try to only call the translation in one SQL
1812 if field in self._columns:
1813 src = self._columns[field].string
1814 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1815 for table in self._inherits:
1816 cols = intersect(self._inherit_fields.keys(), vals)
1818 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1821 def _add_missing_default_values(self, cr, uid, values, context=None):
1822 # avoid overriding inherited values when parent is set
1824 for tables, parent_field in self._inherits.items():
1825 if parent_field in values:
1826 avoid_tables.append(tables)
1828 # compute missing fields
1829 missing_defaults = set()
1830 for field in self._columns.keys():
1831 if not field in values:
1832 missing_defaults.add(field)
1833 for field in self._inherit_fields.keys():
1834 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1835 missing_defaults.add(field)
1836 # discard magic fields
1837 missing_defaults -= set(MAGIC_COLUMNS)
1839 if missing_defaults:
1840 # override defaults with the provided values, never allow the other way around
1841 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1843 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1844 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1845 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1846 defaults[dv] = [(6, 0, defaults[dv])]
1847 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1848 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1849 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1850 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1851 defaults.update(values)
1855 def clear_caches(self):
1856 """ Clear the caches
1858 This clears the caches associated to methods decorated with
1859 ``tools.ormcache`` or ``tools.ormcache_multi``.
1862 self._ormcache.clear()
1863 self.pool._any_cache_cleared = True
1864 except AttributeError:
1868 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1869 aggregated_fields, count_field,
1870 read_group_result, read_group_order=None, context=None):
1871 """Helper method for filling in empty groups for all possible values of
1872 the field being grouped by"""
1874 # self._group_by_full should map groupable fields to a method that returns
1875 # a list of all aggregated values that we want to display for this field,
1876 # in the form of a m2o-like pair (key,label).
1877 # This is useful to implement kanban views for instance, where all columns
1878 # should be displayed even if they don't contain any record.
1880 # Grab the list of all groups that should be displayed, including all present groups
1881 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1882 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1883 read_group_order=read_group_order,
1884 access_rights_uid=openerp.SUPERUSER_ID,
1887 result_template = dict.fromkeys(aggregated_fields, False)
1888 result_template[groupby + '_count'] = 0
1889 if remaining_groupbys:
1890 result_template['__context'] = {'group_by': remaining_groupbys}
1892 # Merge the left_side (current results as dicts) with the right_side (all
1893 # possible values as m2o pairs). Both lists are supposed to be using the
1894 # same ordering, and can be merged in one pass.
1897 def append_left(left_side):
1898 grouped_value = left_side[groupby] and left_side[groupby][0]
1899 if not grouped_value in known_values:
1900 result.append(left_side)
1901 known_values[grouped_value] = left_side
1903 known_values[grouped_value].update({count_field: left_side[count_field]})
1904 def append_right(right_side):
1905 grouped_value = right_side[0]
1906 if not grouped_value in known_values:
1907 line = dict(result_template)
1908 line[groupby] = right_side
1909 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1911 known_values[grouped_value] = line
1912 while read_group_result or all_groups:
1913 left_side = read_group_result[0] if read_group_result else None
1914 right_side = all_groups[0] if all_groups else None
1915 assert left_side is None or left_side[groupby] is False \
1916 or isinstance(left_side[groupby], (tuple,list)), \
1917 'M2O-like pair expected, got %r' % left_side[groupby]
1918 assert right_side is None or isinstance(right_side, (tuple,list)), \
1919 'M2O-like pair expected, got %r' % right_side
1920 if left_side is None:
1921 append_right(all_groups.pop(0))
1922 elif right_side is None:
1923 append_left(read_group_result.pop(0))
1924 elif left_side[groupby] == right_side:
1925 append_left(read_group_result.pop(0))
1926 all_groups.pop(0) # discard right_side
1927 elif not left_side[groupby] or not left_side[groupby][0]:
1928 # left side == "Undefined" entry, not present on right_side
1929 append_left(read_group_result.pop(0))
1931 append_right(all_groups.pop(0))
1935 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1938 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1940 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1941 to the query if order should be computed against m2o field.
1942 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1943 :param aggregated_fields: list of aggregated fields in the query
1944 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1945 These dictionaries contains the qualified name of each groupby
1946 (fully qualified SQL name for the corresponding field),
1947 and the (non raw) field name.
1948 :param osv.Query query: the query under construction
1949 :return: (groupby_terms, orderby_terms)
1952 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1953 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1955 return groupby_terms, orderby_terms
1957 self._check_qorder(orderby)
1958 for order_part in orderby.split(','):
1959 order_split = order_part.split()
1960 order_field = order_split[0]
1961 if order_field in groupby_fields:
1963 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1964 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1966 orderby_terms.append(order_clause)
1967 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1969 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1970 orderby_terms.append(order)
1971 elif order_field in aggregated_fields:
1972 orderby_terms.append(order_part)
1974 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1975 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1976 self._name, order_part)
1977 return groupby_terms, orderby_terms
1979 def _read_group_process_groupby(self, gb, query, context):
1981 Helper method to collect important information about groupbys: raw
1982 field name, type, time informations, qualified name, ...
1984 split = gb.split(':')
1985 field_type = self._all_columns[split[0]].column._type
1986 gb_function = split[1] if len(split) == 2 else None
1987 temporal = field_type in ('date', 'datetime')
1988 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1989 qualified_field = self._inherits_join_calc(split[0], query)
1992 'day': 'dd MMM YYYY',
1993 'week': "'W'w YYYY",
1994 'month': 'MMMM YYYY',
1995 'quarter': 'QQQ YYYY',
1999 'day': dateutil.relativedelta.relativedelta(days=1),
2000 'week': datetime.timedelta(days=7),
2001 'month': dateutil.relativedelta.relativedelta(months=1),
2002 'quarter': dateutil.relativedelta.relativedelta(months=3),
2003 'year': dateutil.relativedelta.relativedelta(years=1)
2006 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
2007 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
2008 if field_type == 'boolean':
2009 qualified_field = "coalesce(%s,false)" % qualified_field
2014 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2015 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2016 'tz_convert': tz_convert,
2017 'qualified_field': qualified_field
2020 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2022 Helper method to sanitize the data received by read_group. The None
2023 values are converted to False, and the date/datetime are formatted,
2024 and corrected according to the timezones.
2026 value = False if value is None else value
2027 gb = groupby_dict.get(key)
2028 if gb and gb['type'] in ('date', 'datetime') and value:
2029 if isinstance(value, basestring):
2030 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2031 value = datetime.datetime.strptime(value, dt_format)
2032 if gb['tz_convert']:
2033 value = pytz.timezone(context['tz']).localize(value)
2036 def _read_group_get_domain(self, groupby, value):
2038 Helper method to construct the domain corresponding to a groupby and
2039 a given value. This is mostly relevant for date/datetime.
2041 if groupby['type'] in ('date', 'datetime') and value:
2042 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2043 domain_dt_begin = value
2044 domain_dt_end = value + groupby['interval']
2045 if groupby['tz_convert']:
2046 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2047 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2048 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2049 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2050 if groupby['type'] == 'many2one' and value:
2052 return [(groupby['field'], '=', value)]
2054 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2056 Helper method to format the data contained in the dictianary data by
2057 adding the domain corresponding to its values, the groupbys in the
2058 context and by properly formatting the date/datetime values.
2060 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2061 for k,v in data.iteritems():
2062 gb = groupby_dict.get(k)
2063 if gb and gb['type'] in ('date', 'datetime') and v:
2064 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2066 data['__domain'] = domain_group + domain
2067 if len(groupby) - len(annotated_groupbys) >= 1:
2068 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2072 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2074 Get the list of records in list view grouped by the given ``groupby`` fields
2076 :param cr: database cursor
2077 :param uid: current user id
2078 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2079 :param list fields: list of fields present in the list view specified on the object
2080 :param list groupby: list of groupby descriptions by which the records will be grouped.
2081 A groupby description is either a field (then it will be grouped by that field)
2082 or a string 'field:groupby_function'. Right now, the only functions supported
2083 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2084 date/datetime fields.
2085 :param int offset: optional number of records to skip
2086 :param int limit: optional max number of records to return
2087 :param dict context: context arguments, like lang, time zone.
2088 :param list orderby: optional ``order by`` specification, for
2089 overriding the natural sort ordering of the
2090 groups, see also :py:meth:`~osv.osv.osv.search`
2091 (supported only for many2one fields currently)
2092 :param bool lazy: if true, the results are only grouped by the first groupby and the
2093 remaining groupbys are put in the __context key. If false, all the groupbys are
2095 :return: list of dictionaries(one dictionary for each record) containing:
2097 * the values of fields grouped by the fields in ``groupby`` argument
2098 * __domain: list of tuples specifying the search criteria
2099 * __context: dictionary with argument like ``groupby``
2100 :rtype: [{'field_name_1': value, ...]
2101 :raise AccessError: * if user has no read rights on the requested object
2102 * if user tries to bypass access rules for read on the requested object
2106 self.check_access_rights(cr, uid, 'read')
2107 query = self._where_calc(cr, uid, domain, context=context)
2108 fields = fields or self._columns.keys()
2110 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2111 groupby_list = groupby[:1] if lazy else groupby
2112 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2113 for gb in groupby_list]
2114 groupby_fields = [g['field'] for g in annotated_groupbys]
2115 order = orderby or ','.join([g for g in groupby_list])
2116 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2118 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2119 for gb in groupby_fields:
2120 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2121 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2122 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2123 if not (gb in self._all_columns):
2124 # Don't allow arbitrary values, as this would be a SQL injection vector!
2125 raise except_orm(_('Invalid group_by'),
2126 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2128 aggregated_fields = [
2130 if f not in ('id', 'sequence')
2131 if f not in groupby_fields
2132 if f in self._all_columns
2133 if self._all_columns[f].column._type in ('integer', 'float')
2134 if getattr(self._all_columns[f].column, '_classic_write')]
2136 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2137 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2139 for gb in annotated_groupbys:
2140 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2142 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2143 from_clause, where_clause, where_clause_params = query.get_sql()
2144 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2145 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2148 count_field += '_count'
2150 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2151 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2154 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2162 'table': self._table,
2163 'count_field': count_field,
2164 'extra_fields': prefix_terms(',', select_terms),
2165 'from': from_clause,
2166 'where': prefix_term('WHERE', where_clause),
2167 'groupby': prefix_terms('GROUP BY', groupby_terms),
2168 'orderby': prefix_terms('ORDER BY', orderby_terms),
2169 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2170 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2172 cr.execute(query, where_clause_params)
2173 fetched_data = cr.dictfetchall()
2175 if not groupby_fields:
2178 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2180 data_ids = [r['id'] for r in fetched_data]
2181 many2onefields = list(set(many2onefields))
2182 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2183 for d in fetched_data:
2184 d.update(data_dict[d['id']])
2186 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2187 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2188 if lazy and groupby_fields[0] in self._group_by_full:
2189 # Right now, read_group only fill results in lazy mode (by default).
2190 # If you need to have the empty groups in 'eager' mode, then the
2191 # method _read_group_fill_results need to be completely reimplemented
2193 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2194 aggregated_fields, count_field, result, read_group_order=order,
2198 def _inherits_join_add(self, current_model, parent_model_name, query):
2200 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2201 :param current_model: current model object
2202 :param parent_model_name: name of the parent model for which the clauses should be added
2203 :param query: query object on which the JOIN should be added
2205 inherits_field = current_model._inherits[parent_model_name]
2206 parent_model = self.pool[parent_model_name]
2207 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2210 def _inherits_join_calc(self, field, query):
2212 Adds missing table select and join clause(s) to ``query`` for reaching
2213 the field coming from an '_inherits' parent table (no duplicates).
2215 :param field: name of inherited field to reach
2216 :param query: query object on which the JOIN should be added
2217 :return: qualified name of field, to be used in SELECT clause
2219 current_table = self
2220 parent_alias = '"%s"' % current_table._table
2221 while field in current_table._inherit_fields and not field in current_table._columns:
2222 parent_model_name = current_table._inherit_fields[field][0]
2223 parent_table = self.pool[parent_model_name]
2224 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2225 current_table = parent_table
2226 return '%s."%s"' % (parent_alias, field)
2228 def _parent_store_compute(self, cr):
2229 if not self._parent_store:
2231 _logger.info('Computing parent left and right for table %s...', self._table)
2232 def browse_rec(root, pos=0):
2234 where = self._parent_name+'='+str(root)
2236 where = self._parent_name+' IS NULL'
2237 if self._parent_order:
2238 where += ' order by '+self._parent_order
2239 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2241 for id in cr.fetchall():
2242 pos2 = browse_rec(id[0], pos2)
2243 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2245 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2246 if self._parent_order:
2247 query += ' order by ' + self._parent_order
2250 for (root,) in cr.fetchall():
2251 pos = browse_rec(root, pos)
2252 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2255 def _update_store(self, cr, f, k):
2256 _logger.info("storing computed values of fields.function '%s'", k)
2257 ss = self._columns[k]._symbol_set
2258 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2259 cr.execute('select id from '+self._table)
2260 ids_lst = map(lambda x: x[0], cr.fetchall())
2262 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2263 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2264 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2265 for key, val in res.items():
2268 # if val is a many2one, just write the ID
2269 if type(val) == tuple:
2271 if val is not False:
2272 cr.execute(update_query, (ss[1](val), key))
2274 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2275 """Raise except_orm if value is not among the valid values for the selection field"""
2276 if self._columns[field]._type == 'reference':
2277 val_model, val_id_str = value.split(',', 1)
2280 val_id = long(val_id_str)
2284 raise except_orm(_('ValidateError'),
2285 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2289 if isinstance(self._columns[field].selection, (tuple, list)):
2290 if val in dict(self._columns[field].selection):
2292 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2294 raise except_orm(_('ValidateError'),
2295 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
2297 def _check_removed_columns(self, cr, log=False):
2298 # iterate on the database columns to drop the NOT NULL constraints
2299 # of fields which were required but have been removed (or will be added by another module)
2300 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2301 columns += MAGIC_COLUMNS
2302 cr.execute("SELECT a.attname, a.attnotnull"
2303 " FROM pg_class c, pg_attribute a"
2304 " WHERE c.relname=%s"
2305 " AND c.oid=a.attrelid"
2306 " AND a.attisdropped=%s"
2307 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2308 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2310 for column in cr.dictfetchall():
2312 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2313 column['attname'], self._table, self._name)
2314 if column['attnotnull']:
2315 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2316 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2317 self._table, column['attname'])
2319 def _save_constraint(self, cr, constraint_name, type, definition):
2321 Record the creation of a constraint for this model, to make it possible
2322 to delete it later when the module is uninstalled. Type can be either
2323 'f' or 'u' depending on the constraint being a foreign key or not.
2325 if not self._module:
2326 # no need to save constraints for custom models as they're not part
2329 assert type in ('f', 'u')
2331 SELECT type, definition FROM ir_model_constraint, ir_module_module
2332 WHERE ir_model_constraint.module=ir_module_module.id
2333 AND ir_model_constraint.name=%s
2334 AND ir_module_module.name=%s
2335 """, (constraint_name, self._module))
2336 constraints = cr.dictfetchone()
2339 INSERT INTO ir_model_constraint
2340 (name, date_init, date_update, module, model, type, definition)
2341 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2342 (SELECT id FROM ir_module_module WHERE name=%s),
2343 (SELECT id FROM ir_model WHERE model=%s), %s, %s)""",
2344 (constraint_name, self._module, self._name, type, definition))
2345 elif constraints['type'] != type or (definition and constraints['definition'] != definition):
2347 UPDATE ir_model_constraint
2348 SET date_update=now() AT TIME ZONE 'UTC', type=%s, definition=%s
2349 WHERE name=%s AND module = (SELECT id FROM ir_module_module WHERE name=%s)""",
2350 (type, definition, constraint_name, self._module))
2352 def _save_relation_table(self, cr, relation_table):
2354 Record the creation of a many2many for this model, to make it possible
2355 to delete it later when the module is uninstalled.
2358 SELECT 1 FROM ir_model_relation, ir_module_module
2359 WHERE ir_model_relation.module=ir_module_module.id
2360 AND ir_model_relation.name=%s
2361 AND ir_module_module.name=%s
2362 """, (relation_table, self._module))
2364 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2365 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2366 (SELECT id FROM ir_module_module WHERE name=%s),
2367 (SELECT id FROM ir_model WHERE model=%s))""",
2368 (relation_table, self._module, self._name))
2369 self.invalidate_cache(cr, SUPERUSER_ID)
2371 # checked version: for direct m2o starting from `self`
2372 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2373 assert self.is_transient() or not dest_model.is_transient(), \
2374 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2375 if self.is_transient() and not dest_model.is_transient():
2376 # TransientModel relationships to regular Models are annoying
2377 # usually because they could block deletion due to the FKs.
2378 # So unless stated otherwise we default them to ondelete=cascade.
2379 ondelete = ondelete or 'cascade'
2380 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2381 self._foreign_keys.add(fk_def)
2382 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2384 # unchecked version: for custom cases, such as m2m relationships
2385 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2386 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2387 self._foreign_keys.add(fk_def)
2388 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2390 def _drop_constraint(self, cr, source_table, constraint_name):
2391 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2393 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2394 # Find FK constraint(s) currently established for the m2o field,
2395 # and see whether they are stale or not
2396 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2397 cl2.relname as foreign_table
2398 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2399 pg_attribute as att1, pg_attribute as att2
2400 WHERE con.conrelid = cl1.oid
2401 AND cl1.relname = %s
2402 AND con.confrelid = cl2.oid
2403 AND array_lower(con.conkey, 1) = 1
2404 AND con.conkey[1] = att1.attnum
2405 AND att1.attrelid = cl1.oid
2406 AND att1.attname = %s
2407 AND array_lower(con.confkey, 1) = 1
2408 AND con.confkey[1] = att2.attnum
2409 AND att2.attrelid = cl2.oid
2410 AND att2.attname = %s
2411 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2412 constraints = cr.dictfetchall()
2414 if len(constraints) == 1:
2415 # Is it the right constraint?
2417 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2418 or cons['foreign_table'] != dest_model._table:
2419 # Wrong FK: drop it and recreate
2420 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2421 source_table, cons['constraint_name'])
2422 self._drop_constraint(cr, source_table, cons['constraint_name'])
2424 # it's all good, nothing to do!
2427 # Multiple FKs found for the same field, drop them all, and re-create
2428 for cons in constraints:
2429 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2430 source_table, cons['constraint_name'])
2431 self._drop_constraint(cr, source_table, cons['constraint_name'])
2433 # (re-)create the FK
2434 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2437 def _set_default_value_on_column(self, cr, column_name, context=None):
2438 # ideally should use add_default_value but fails
2439 # due to ir.values not being ready
2441 # get old-style default
2442 default = self._defaults.get(column_name)
2443 if callable(default):
2444 default = default(self, cr, SUPERUSER_ID, context)
2446 # get new_style default if no old-style
2448 record = self.new(cr, SUPERUSER_ID, context=context)
2449 field = self._fields[column_name]
2450 field.determine_default(record)
2451 defaults = dict(record._cache)
2452 if column_name in defaults:
2453 default = field.convert_to_write(defaults[column_name])
2455 column = self._columns[column_name]
2456 ss = column._symbol_set
2457 db_default = ss[1](default)
2458 # Write default if non-NULL, except for booleans for which False means
2459 # the same as NULL - this saves us an expensive query on large tables.
2460 write_default = (db_default is not None if column._type != 'boolean'
2463 _logger.debug("Table '%s': setting default value of new column %s to %r",
2464 self._table, column_name, default)
2465 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2466 self._table, column_name, ss[0], column_name)
2467 cr.execute(query, (db_default,))
2468 # this is a disgrace
2471 def _auto_init(self, cr, context=None):
2474 Call _field_create and, unless _auto is False:
2476 - create the corresponding table in database for the model,
2477 - possibly add the parent columns in database,
2478 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2479 'write_date' in database if _log_access is True (the default),
2480 - report on database columns no more existing in _columns,
2481 - remove no more existing not null constraints,
2482 - alter existing database columns to match _columns,
2483 - create database tables to match _columns,
2484 - add database indices to match _columns,
2485 - save in self._foreign_keys a list a foreign keys to create (see
2489 self._foreign_keys = set()
2490 raise_on_invalid_object_name(self._name)
2493 store_compute = False
2494 stored_fields = [] # new-style stored fields with compute
2496 update_custom_fields = context.get('update_custom_fields', False)
2497 self._field_create(cr, context=context)
2498 create = not self._table_exist(cr)
2502 self._create_table(cr)
2505 cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
2506 has_rows = cr.rowcount
2509 if self._parent_store:
2510 if not self._parent_columns_exist(cr):
2511 self._create_parent_columns(cr)
2512 store_compute = True
2514 self._check_removed_columns(cr, log=False)
2516 # iterate on the "object columns"
2517 column_data = self._select_column_data(cr)
2519 for k, f in self._columns.iteritems():
2520 if k == 'id': # FIXME: maybe id should be a regular column?
2522 # Don't update custom (also called manual) fields
2523 if f.manual and not update_custom_fields:
2526 if isinstance(f, fields.one2many):
2527 self._o2m_raise_on_missing_reference(cr, f)
2529 elif isinstance(f, fields.many2many):
2530 self._m2m_raise_or_create_relation(cr, f)
2533 res = column_data.get(k)
2535 # The field is not found as-is in database, try if it
2536 # exists with an old name.
2537 if not res and hasattr(f, 'oldname'):
2538 res = column_data.get(f.oldname)
2540 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2542 column_data[k] = res
2543 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2544 self._table, f.oldname, k)
2546 # The field already exists in database. Possibly
2547 # change its type, rename it, drop it or change its
2550 f_pg_type = res['typname']
2551 f_pg_size = res['size']
2552 f_pg_notnull = res['attnotnull']
2553 if isinstance(f, fields.function) and not f.store and\
2554 not getattr(f, 'nodrop', False):
2555 _logger.info('column %s (%s) converted to a function, removed from table %s',
2556 k, f.string, self._table)
2557 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2559 _schema.debug("Table '%s': dropped column '%s' with cascade",
2563 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2568 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2569 ('varchar', 'text', 'TEXT', ''),
2570 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2571 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2572 ('timestamp', 'date', 'date', '::date'),
2573 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2574 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2576 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2578 with cr.savepoint():
2579 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2580 except psycopg2.NotSupportedError:
2581 # In place alter table cannot be done because a view is depending of this field.
2582 # Do a manual copy. This will drop the view (that will be recreated later)
2583 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2584 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2585 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2586 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2588 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2589 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2591 if (f_pg_type==c[0]) and (f._type==c[1]):
2592 if f_pg_type != f_obj_type:
2594 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2595 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2596 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2597 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2599 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2600 self._table, k, c[0], c[1])
2603 if f_pg_type != f_obj_type:
2607 newname = k + '_moved' + str(i)
2608 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2609 "WHERE c.relname=%s " \
2610 "AND a.attname=%s " \
2611 "AND c.oid=a.attrelid ", (self._table, newname))
2612 if not cr.fetchone()[0]:
2616 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2617 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2618 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2619 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2620 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2621 self._table, k, f_pg_type, f._type, newname)
2623 # if the field is required and hasn't got a NOT NULL constraint
2624 if f.required and f_pg_notnull == 0:
2626 self._set_default_value_on_column(cr, k, context=context)
2627 # add the NOT NULL constraint
2629 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2631 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2634 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2635 "If you want to have it, you should update the records and execute manually:\n"\
2636 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2637 _schema.warning(msg, self._table, k, self._table, k)
2639 elif not f.required and f_pg_notnull == 1:
2640 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2642 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2645 indexname = '%s_%s_index' % (self._table, k)
2646 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2647 res2 = cr.dictfetchall()
2648 if not res2 and f.select:
2649 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2651 if f._type == 'text':
2652 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2653 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2654 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2655 " because there is a length limit for indexable btree values!\n"\
2656 "Use a search view instead if you simply want to make the field searchable."
2657 _schema.warning(msg, self._table, f._type, k)
2658 if res2 and not f.select:
2659 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2661 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2662 _schema.debug(msg, self._table, k, f._type)
2664 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2665 dest_model = self.pool[f._obj]
2666 if dest_model._auto and dest_model._table != 'ir_actions':
2667 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2669 # The field doesn't exist in database. Create it if necessary.
2671 if not isinstance(f, fields.function) or f.store:
2672 # add the missing field
2673 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2674 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2675 _schema.debug("Table '%s': added column '%s' with definition=%s",
2676 self._table, k, get_pg_type(f)[1])
2680 self._set_default_value_on_column(cr, k, context=context)
2682 # remember the functions to call for the stored fields
2683 if isinstance(f, fields.function):
2685 if f.store is not True: # i.e. if f.store is a dict
2686 order = f.store[f.store.keys()[0]][2]
2687 todo_end.append((order, self._update_store, (f, k)))
2689 # remember new-style stored fields with compute method
2690 if k in self._fields and self._fields[k].depends:
2691 stored_fields.append(self._fields[k])
2693 # and add constraints if needed
2694 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2695 if f._obj not in self.pool:
2696 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2697 dest_model = self.pool[f._obj]
2698 ref = dest_model._table
2699 # ir_actions is inherited so foreign key doesn't work on it
2700 if dest_model._auto and ref != 'ir_actions':
2701 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2703 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2707 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2708 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2711 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2712 "Try to re-run: openerp-server --update=module\n"\
2713 "If it doesn't work, update records and execute manually:\n"\
2714 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2715 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2719 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2720 create = not bool(cr.fetchone())
2722 cr.commit() # start a new transaction
2725 self._add_sql_constraints(cr)
2728 self._execute_sql(cr)
2731 self._parent_store_compute(cr)
2735 # trigger computation of new-style stored fields with a compute
2737 _logger.info("Storing computed values of %s fields %s",
2738 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2739 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2740 recs = recs.search([])
2742 map(recs._recompute_todo, stored_fields)
2745 todo_end.append((1000, func, ()))
2749 def _auto_end(self, cr, context=None):
2750 """ Create the foreign keys recorded by _auto_init. """
2751 for t, k, r, d in self._foreign_keys:
2752 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2753 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f', False)
2755 del self._foreign_keys
2758 def _table_exist(self, cr):
2759 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2763 def _create_table(self, cr):
2764 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2765 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2766 _schema.debug("Table '%s': created", self._table)
2769 def _parent_columns_exist(self, cr):
2770 cr.execute("""SELECT c.relname
2771 FROM pg_class c, pg_attribute a
2772 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2773 """, (self._table, 'parent_left'))
2777 def _create_parent_columns(self, cr):
2778 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2779 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2780 if 'parent_left' not in self._columns:
2781 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2783 _schema.debug("Table '%s': added column '%s' with definition=%s",
2784 self._table, 'parent_left', 'INTEGER')
2785 elif not self._columns['parent_left'].select:
2786 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2788 if 'parent_right' not in self._columns:
2789 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2791 _schema.debug("Table '%s': added column '%s' with definition=%s",
2792 self._table, 'parent_right', 'INTEGER')
2793 elif not self._columns['parent_right'].select:
2794 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2796 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2797 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2798 self._parent_name, self._name)
2803 def _select_column_data(self, cr):
2804 # attlen is the number of bytes necessary to represent the type when
2805 # the type has a fixed size. If the type has a varying size attlen is
2806 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2807 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2808 "FROM pg_class c,pg_attribute a,pg_type t " \
2809 "WHERE c.relname=%s " \
2810 "AND c.oid=a.attrelid " \
2811 "AND a.atttypid=t.oid", (self._table,))
2812 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2815 def _o2m_raise_on_missing_reference(self, cr, f):
2816 # TODO this check should be a method on fields.one2many.
2817 if f._obj in self.pool:
2818 other = self.pool[f._obj]
2819 # TODO the condition could use fields_get_keys().
2820 if f._fields_id not in other._columns.keys():
2821 if f._fields_id not in other._inherit_fields.keys():
2822 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2824 def _m2m_raise_or_create_relation(self, cr, f):
2825 m2m_tbl, col1, col2 = f._sql_names(self)
2826 # do not create relations for custom fields as they do not belong to a module
2827 # they will be automatically removed when dropping the corresponding ir.model.field
2828 # table name for custom relation all starts with x_, see __init__
2829 if not m2m_tbl.startswith('x_'):
2830 self._save_relation_table(cr, m2m_tbl)
2831 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2832 if not cr.dictfetchall():
2833 if f._obj not in self.pool:
2834 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2835 dest_model = self.pool[f._obj]
2836 ref = dest_model._table
2837 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2838 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2839 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2840 if not cr.fetchall():
2841 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2842 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2843 if not cr.fetchall():
2844 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2846 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2847 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2848 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2850 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2853 def _add_sql_constraints(self, cr):
2856 Modify this model's database table constraints so they match the one in
2860 def unify_cons_text(txt):
2861 return txt.lower().replace(', ',',').replace(' (','(')
2863 for (key, con, _) in self._sql_constraints:
2864 conname = '%s_%s' % (self._table, key)
2866 # using 1 to get result if no imc but one pgc
2867 cr.execute("""SELECT definition, 1
2868 FROM ir_model_constraint imc
2869 RIGHT JOIN pg_constraint pgc
2870 ON (pgc.conname = imc.name)
2871 WHERE pgc.conname=%s
2873 existing_constraints = cr.dictfetchone()
2877 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2878 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2879 self._table, conname, con),
2880 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2885 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2886 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2887 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2893 if not existing_constraints:
2894 # constraint does not exists:
2895 sql_actions['add']['execute'] = True
2896 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2897 elif unify_cons_text(con) != existing_constraints['definition']:
2898 # constraint exists but its definition has changed:
2899 sql_actions['drop']['execute'] = True
2900 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints['definition'] or '', )
2901 sql_actions['add']['execute'] = True
2902 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2904 # we need to add the constraint:
2905 self._save_constraint(cr, conname, 'u', unify_cons_text(con))
2906 sql_actions = [item for item in sql_actions.values()]
2907 sql_actions.sort(key=lambda x: x['order'])
2908 for sql_action in [action for action in sql_actions if action['execute']]:
2910 cr.execute(sql_action['query'])
2912 _schema.debug(sql_action['msg_ok'])
2914 _schema.warning(sql_action['msg_err'])
2918 def _execute_sql(self, cr):
2919 """ Execute the SQL code from the _sql attribute (if any)."""
2920 if hasattr(self, "_sql"):
2921 for line in self._sql.split(';'):
2922 line2 = line.replace('\n', '').strip()
2928 # Update objects that uses this one to update their _inherits fields
2932 def _inherits_reload_src(cls):
2933 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2934 for model in cls.pool.values():
2935 if cls._name in model._inherits:
2936 model._inherits_reload()
2939 def _inherits_reload(cls):
2940 """ Recompute the _inherit_fields mapping.
2942 This will also call itself on each inherits'd child model.
2946 for table in cls._inherits:
2947 other = cls.pool[table]
2948 for col in other._columns.keys():
2949 res[col] = (table, cls._inherits[table], other._columns[col], table)
2950 for col in other._inherit_fields.keys():
2951 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2952 cls._inherit_fields = res
2953 cls._all_columns = cls._get_column_infos()
2955 # interface columns with new-style fields
2956 for attr, column in cls._columns.items():
2957 if attr not in cls._fields:
2958 cls._add_field(attr, column.to_field())
2960 # interface inherited fields with new-style fields (note that the
2961 # reverse order is for being consistent with _all_columns above)
2962 for parent_model, parent_field in reversed(cls._inherits.items()):
2963 for attr, field in cls.pool[parent_model]._fields.iteritems():
2964 if attr not in cls._fields:
2965 cls._add_field(attr, field.copy(
2967 related=(parent_field, attr),
2971 cls._inherits_reload_src()
2974 def _get_column_infos(cls):
2975 """Returns a dict mapping all fields names (direct fields and
2976 inherited field via _inherits) to a ``column_info`` struct
2977 giving detailed columns """
2979 # do not inverse for loops, since local fields may hide inherited ones!
2980 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2981 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2982 for k, col in cls._columns.iteritems():
2983 result[k] = fields.column_info(k, col)
2987 def _inherits_check(cls):
2988 for table, field_name in cls._inherits.items():
2989 if field_name not in cls._columns:
2990 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2991 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2992 required=True, ondelete="cascade")
2993 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2994 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2995 cls._columns[field_name].required = True
2996 cls._columns[field_name].ondelete = "cascade"
2998 # reflect fields with delegate=True in dictionary cls._inherits
2999 for field in cls._fields.itervalues():
3000 if field.type == 'many2one' and not field.related and field.delegate:
3001 if not field.required:
3002 _logger.warning("Field %s with delegate=True must be required.", field)
3003 field.required = True
3004 if field.ondelete.lower() not in ('cascade', 'restrict'):
3005 field.ondelete = 'cascade'
3006 cls._inherits[field.comodel_name] = field.name
3009 def _prepare_setup_fields(self):
3010 """ Prepare the setup of fields once the models have been loaded. """
3011 for field in self._fields.itervalues():
3015 def _setup_fields(self, partial=False):
3016 """ Setup the fields (dependency triggers, etc). """
3017 for field in self._fields.itervalues():
3018 if partial and field.manual and \
3019 field.relational and \
3020 (field.comodel_name not in self.pool or \
3021 (field.type == 'one2many' and field.inverse_name not in self.pool[field.comodel_name]._fields)):
3022 # do not set up manual fields that refer to unknown models
3024 field.setup(self.env)
3026 # group fields by compute to determine field.computed_fields
3027 fields_by_compute = defaultdict(list)
3028 for field in self._fields.itervalues():
3030 field.computed_fields = fields_by_compute[field.compute]
3031 field.computed_fields.append(field)
3033 field.computed_fields = []
3035 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3036 """ fields_get([fields])
3038 Return the definition of each field.
3040 The returned value is a dictionary (indiced by field name) of
3041 dictionaries. The _inherits'd fields are included. The string, help,
3042 and selection (if present) attributes are translated.
3044 :param cr: database cursor
3045 :param user: current user id
3046 :param allfields: list of fields
3047 :param context: context arguments, like lang, time zone
3048 :return: dictionary of field dictionaries, each one describing a field of the business object
3049 :raise AccessError: * if user has no create/write rights on the requested object
3052 recs = self.browse(cr, user, [], context)
3055 for fname, field in self._fields.iteritems():
3056 if allfields and fname not in allfields:
3058 if field.groups and not recs.user_has_groups(field.groups):
3060 res[fname] = field.get_description(recs.env)
3062 # if user cannot create or modify records, make all fields readonly
3063 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3064 if not (has_access('write') or has_access('create')):
3065 for description in res.itervalues():
3066 description['readonly'] = True
3067 description['states'] = {}
3071 def get_empty_list_help(self, cr, user, help, context=None):
3072 """ Generic method giving the help message displayed when having
3073 no result to display in a list or kanban view. By default it returns
3074 the help given in parameter that is generally the help message
3075 defined in the action.
3079 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3081 Check the user access rights on the given fields. This raises Access
3082 Denied if the user does not have the rights. Otherwise it returns the
3083 fields (as is if the fields is not falsy, or the readable/writable
3084 fields if fields is falsy).
3086 if user == SUPERUSER_ID:
3087 return fields or list(self._fields)
3090 """ determine whether user has access to field `fname` """
3091 field = self._fields.get(fname)
3092 if field and field.groups:
3093 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3098 fields = filter(valid, self._fields)
3100 invalid_fields = set(filter(lambda name: not valid(name), fields))
3102 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3103 operation, user, self._name, ', '.join(invalid_fields))
3105 _('The requested operation cannot be completed due to security restrictions. '
3106 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3107 (self._description, operation))
3111 # add explicit old-style implementation to read()
3113 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3114 records = self.browse(cr, user, ids, context)
3115 result = BaseModel.read(records, fields, load=load)
3116 return result if isinstance(ids, list) else (bool(result) and result[0])
3118 # new-style implementation of read()
3120 def read(self, fields=None, load='_classic_read'):
3123 Reads the requested fields for the records in `self`, low-level/RPC
3124 method. In Python code, prefer :meth:`~.browse`.
3126 :param fields: list of field names to return (default is all fields)
3127 :return: a list of dictionaries mapping field names to their values,
3128 with one dictionary per record
3129 :raise AccessError: if user has no read rights on some of the given
3132 # check access rights
3133 self.check_access_rights('read')
3134 fields = self.check_field_access_rights('read', fields)
3136 # split fields into stored and computed fields
3137 stored, computed = [], []
3139 if name in self._columns:
3141 elif name in self._fields:
3142 computed.append(name)
3144 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3146 # fetch stored fields from the database to the cache
3147 self._read_from_database(stored)
3149 # retrieve results from records; this takes values from the cache and
3150 # computes remaining fields
3152 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3153 use_name_get = (load == '_classic_read')
3156 values = {'id': record.id}
3157 for name, field in name_fields:
3158 values[name] = field.convert_to_read(record[name], use_name_get)
3159 result.append(values)
3160 except MissingError:
3166 def _prefetch_field(self, field):
3167 """ Read from the database in order to fetch `field` (:class:`Field`
3168 instance) for `self` in cache.
3170 # fetch the records of this model without field_name in their cache
3171 records = self._in_cache_without(field)
3173 if len(records) > PREFETCH_MAX:
3174 records = records[:PREFETCH_MAX] | self
3176 # by default, simply fetch field
3177 fnames = {field.name}
3179 if self.env.in_draft:
3180 # we may be doing an onchange, do not prefetch other fields
3182 elif self.env.field_todo(field):
3183 # field must be recomputed, do not prefetch records to recompute
3184 records -= self.env.field_todo(field)
3185 elif not self._context.get('prefetch_fields', True):
3186 # do not prefetch other fields
3188 elif self._columns[field.name]._prefetch:
3189 # here we can optimize: prefetch all classic and many2one fields
3191 for fname, fcolumn in self._columns.iteritems()
3192 if fcolumn._prefetch
3193 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3196 # fetch records with read()
3197 assert self in records and field.name in fnames
3200 result = records.read(list(fnames), load='_classic_write')
3204 # check the cache, and update it if necessary
3205 if not self._cache.contains(field):
3206 for values in result:
3207 record = self.browse(values.pop('id'))
3208 record._cache.update(record._convert_to_cache(values, validate=False))
3209 if not self._cache.contains(field):
3210 e = AccessError("No value found for %s.%s" % (self, field.name))
3211 self._cache[field] = FailedValue(e)
3214 def _read_from_database(self, field_names):
3215 """ Read the given fields of the records in `self` from the database,
3216 and store them in cache. Access errors are also stored in cache.
3219 cr, user, context = env.args
3221 # FIXME: The query construction needs to be rewritten using the internal Query
3222 # object, as in search(), to avoid ambiguous column references when
3223 # reading/sorting on a table that is auto_joined to another table with
3224 # common columns (e.g. the magical columns)
3226 # Construct a clause for the security rules.
3227 # 'tables' holds the list of tables necessary for the SELECT, including
3228 # the ir.rule clauses, and contains at least self._table.
3229 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3231 # determine the fields that are stored as columns in self._table
3232 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3234 # we need fully-qualified column names in case len(tables) > 1
3236 if isinstance(self._columns.get(f), fields.binary) and \
3237 context.get('bin_size_%s' % f, context.get('bin_size')):
3238 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3239 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3241 return '%s."%s"' % (self._table, f)
3242 qual_names = map(qualify, set(fields_pre + ['id']))
3244 query = """ SELECT %(qual_names)s FROM %(tables)s
3245 WHERE %(table)s.id IN %%s AND (%(extra)s)
3248 'qual_names': ",".join(qual_names),
3249 'tables': ",".join(tables),
3250 'table': self._table,
3251 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3252 'order': self._parent_order or self._order,
3256 for sub_ids in cr.split_for_in_conditions(self.ids):
3257 cr.execute(query, [tuple(sub_ids)] + rule_params)
3258 result.extend(cr.dictfetchall())
3260 ids = [vals['id'] for vals in result]
3263 # translate the fields if necessary
3264 if context.get('lang'):
3265 ir_translation = env['ir.translation']
3266 for f in fields_pre:
3267 if self._columns[f].translate:
3268 #TODO: optimize out of this loop
3269 res_trans = ir_translation._get_ids(
3270 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3272 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3274 # apply the symbol_get functions of the fields we just read
3275 for f in fields_pre:
3276 symbol_get = self._columns[f]._symbol_get
3279 vals[f] = symbol_get(vals[f])
3281 # store result in cache for POST fields
3283 record = self.browse(vals['id'])
3284 record._cache.update(record._convert_to_cache(vals, validate=False))
3286 # determine the fields that must be processed now
3287 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3289 # Compute POST fields, grouped by multi
3290 by_multi = defaultdict(list)
3291 for f in fields_post:
3292 by_multi[self._columns[f]._multi].append(f)
3294 for multi, fs in by_multi.iteritems():
3296 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3297 assert res2 is not None, \
3298 'The function field "%s" on the "%s" model returned None\n' \
3299 '(a dictionary was expected).' % (fs[0], self._name)
3301 # TOCHECK : why got string instend of dict in python2.6
3302 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3303 multi_fields = res2.get(vals['id'], {})
3306 vals[f] = multi_fields.get(f, [])
3309 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3312 vals[f] = res2[vals['id']]
3316 # Warn about deprecated fields now that fields_pre and fields_post are computed
3317 for f in field_names:
3318 column = self._columns[f]
3319 if column.deprecated:
3320 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3322 # store result in cache
3324 record = self.browse(vals.pop('id'))
3325 record._cache.update(record._convert_to_cache(vals, validate=False))
3327 # store failed values in cache for the records that could not be read
3328 fetched = self.browse(ids)
3329 missing = self - fetched
3331 extras = fetched - self
3334 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3335 ', '.join(map(repr, missing._ids)),
3336 ', '.join(map(repr, extras._ids)),
3338 # store an access error exception in existing records
3340 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3341 (self._name, 'read')
3343 forbidden = missing.exists()
3344 forbidden._cache.update(FailedValue(exc))
3345 # store a missing error exception in non-existing records
3347 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3349 (missing - forbidden)._cache.update(FailedValue(exc))
3352 def get_metadata(self):
3354 Returns some metadata about the given records.
3356 :return: list of ownership dictionaries for each requested record
3357 :rtype: list of dictionaries with the following keys:
3360 * create_uid: user who created the record
3361 * create_date: date when the record was created
3362 * write_uid: last user who changed the record
3363 * write_date: date of the last change to the record
3364 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3367 if self._log_access:
3368 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3369 quoted_table = '"%s"' % self._table
3370 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3371 query = '''SELECT %s, __imd.module, __imd.name
3372 FROM %s LEFT JOIN ir_model_data __imd
3373 ON (__imd.model = %%s and __imd.res_id = %s.id)
3374 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3375 self._cr.execute(query, (self._name, tuple(self.ids)))
3376 res = self._cr.dictfetchall()
3378 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3379 names = dict(self.env['res.users'].browse(uids).name_get())
3383 value = r[key] = r[key] or False
3384 if key in ('write_uid', 'create_uid') and value in names:
3385 r[key] = (value, names[value])
3386 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3387 del r['name'], r['module']
3390 def _check_concurrency(self, cr, ids, context):
3393 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3395 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3396 for sub_ids in cr.split_for_in_conditions(ids):
3399 id_ref = "%s,%s" % (self._name, id)
3400 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3402 ids_to_check.extend([id, update_date])
3403 if not ids_to_check:
3405 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3408 # mention the first one only to keep the error message readable
3409 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3411 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3412 """Verify the returned rows after applying record rules matches
3413 the length of `ids`, and raise an appropriate exception if it does not.
3417 ids, result_ids = set(ids), set(result_ids)
3418 missing_ids = ids - result_ids
3420 # Attempt to distinguish record rule restriction vs deleted records,
3421 # to provide a more specific error message - check if the missinf
3422 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3423 forbidden_ids = [x[0] for x in cr.fetchall()]
3425 # the missing ids are (at least partially) hidden by access rules
3426 if uid == SUPERUSER_ID:
3428 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3429 raise except_orm(_('Access Denied'),
3430 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3431 (self._description, operation))
3433 # If we get here, the missing_ids are not in the database
3434 if operation in ('read','unlink'):
3435 # No need to warn about deleting an already deleted record.
3436 # And no error when reading a record that was deleted, to prevent spurious
3437 # errors for non-transactional search/read sequences coming from clients
3439 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3440 raise except_orm(_('Missing document(s)'),
3441 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3444 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3445 """Verifies that the operation given by ``operation`` is allowed for the user
3446 according to the access rights."""
3447 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3449 def check_access_rule(self, cr, uid, ids, operation, context=None):
3450 """Verifies that the operation given by ``operation`` is allowed for the user
3451 according to ir.rules.
3453 :param operation: one of ``write``, ``unlink``
3454 :raise except_orm: * if current ir.rules do not permit this operation.
3455 :return: None if the operation is allowed
3457 if uid == SUPERUSER_ID:
3460 if self.is_transient():
3461 # Only one single implicit access rule for transient models: owner only!
3462 # This is ok to hardcode because we assert that TransientModels always
3463 # have log_access enabled so that the create_uid column is always there.
3464 # And even with _inherits, these fields are always present in the local
3465 # table too, so no need for JOINs.
3466 cr.execute("""SELECT distinct create_uid
3468 WHERE id IN %%s""" % self._table, (tuple(ids),))
3469 uids = [x[0] for x in cr.fetchall()]
3470 if len(uids) != 1 or uids[0] != uid:
3471 raise except_orm(_('Access Denied'),
3472 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3474 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3476 where_clause = ' and ' + ' and '.join(where_clause)
3477 for sub_ids in cr.split_for_in_conditions(ids):
3478 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3479 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3480 [sub_ids] + where_params)
3481 returned_ids = [x['id'] for x in cr.dictfetchall()]
3482 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3484 def create_workflow(self, cr, uid, ids, context=None):
3485 """Create a workflow instance for each given record IDs."""
3486 from openerp import workflow
3488 workflow.trg_create(uid, self._name, res_id, cr)
3489 # self.invalidate_cache(cr, uid, context=context) ?
3492 def delete_workflow(self, cr, uid, ids, context=None):
3493 """Delete the workflow instances bound to the given record IDs."""
3494 from openerp import workflow
3496 workflow.trg_delete(uid, self._name, res_id, cr)
3497 self.invalidate_cache(cr, uid, context=context)
3500 def step_workflow(self, cr, uid, ids, context=None):
3501 """Reevaluate the workflow instances of the given record IDs."""
3502 from openerp import workflow
3504 workflow.trg_write(uid, self._name, res_id, cr)
3505 # self.invalidate_cache(cr, uid, context=context) ?
3508 def signal_workflow(self, cr, uid, ids, signal, context=None):
3509 """Send given workflow signal and return a dict mapping ids to workflow results"""
3510 from openerp import workflow
3513 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3514 # self.invalidate_cache(cr, uid, context=context) ?
3517 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3518 """ Rebind the workflow instance bound to the given 'old' record IDs to
3519 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3521 from openerp import workflow
3522 for old_id, new_id in old_new_ids:
3523 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3524 self.invalidate_cache(cr, uid, context=context)
3527 def unlink(self, cr, uid, ids, context=None):
3530 Deletes the records of the current set
3532 :raise AccessError: * if user has no unlink rights on the requested object
3533 * if user tries to bypass access rules for unlink on the requested object
3534 :raise UserError: if the record is default property for other records
3539 if isinstance(ids, (int, long)):
3542 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3544 # for recomputing new-style fields
3545 recs = self.browse(cr, uid, ids, context)
3546 recs.modified(self._fields)
3548 self._check_concurrency(cr, ids, context)
3550 self.check_access_rights(cr, uid, 'unlink')
3552 ir_property = self.pool.get('ir.property')
3554 # Check if the records are used as default properties.
3555 domain = [('res_id', '=', False),
3556 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3558 if ir_property.search(cr, uid, domain, context=context):
3559 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3561 # Delete the records' properties.
3562 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3563 ir_property.unlink(cr, uid, property_ids, context=context)
3565 self.delete_workflow(cr, uid, ids, context=context)
3567 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3568 pool_model_data = self.pool.get('ir.model.data')
3569 ir_values_obj = self.pool.get('ir.values')
3570 ir_attachment_obj = self.pool.get('ir.attachment')
3571 for sub_ids in cr.split_for_in_conditions(ids):
3572 cr.execute('delete from ' + self._table + ' ' \
3573 'where id IN %s', (sub_ids,))
3575 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3576 # as these are not connected with real database foreign keys, and would be dangling references.
3577 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3578 # to avoid possible side-effects during admin calls.
3579 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3580 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3581 # Step 2. Marching towards the real deletion of referenced records
3583 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3585 # For the same reason, removing the record relevant to ir_values
3586 ir_value_ids = ir_values_obj.search(cr, uid,
3587 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3590 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3592 # For the same reason, removing the record relevant to ir_attachment
3593 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3594 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3595 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3596 if ir_attachment_ids:
3597 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3599 # invalidate the *whole* cache, since the orm does not handle all
3600 # changes made in the database, like cascading delete!
3601 recs.invalidate_cache()
3603 for order, obj_name, store_ids, fields in result_store:
3604 if obj_name == self._name:
3605 effective_store_ids = set(store_ids) - set(ids)
3607 effective_store_ids = store_ids
3608 if effective_store_ids:
3609 obj = self.pool[obj_name]
3610 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3611 rids = map(lambda x: x[0], cr.fetchall())
3613 obj._store_set_values(cr, uid, rids, fields, context)
3615 # recompute new-style fields
3624 def write(self, vals):
3627 Updates all records in the current set with the provided values.
3629 :param dict vals: fields to update and the value to set on them e.g::
3631 {'foo': 1, 'bar': "Qux"}
3633 will set the field ``foo`` to ``1`` and the field ``bar`` to
3634 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3636 :raise AccessError: * if user has no write rights on the requested object
3637 * if user tries to bypass access rules for write on the requested object
3638 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3639 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3641 .. _openerp/models/relationals/format:
3643 .. note:: Relational fields use a special "commands" format to manipulate their values
3645 This format is a list of command triplets executed sequentially,
3646 possible command triplets are:
3648 ``(0, _, values: dict)``
3649 links to a new record created from the provided values
3650 ``(1, id, values: dict)``
3651 updates the already-linked record of id ``id`` with the
3654 unlinks and deletes the linked record of id ``id``
3656 unlinks the linked record of id ``id`` without deleting it
3658 links to an existing record of id ``id``
3660 unlinks all records in the relation, equivalent to using
3661 the command ``3`` on every linked record
3663 replaces the existing list of linked records by the provoded
3664 ones, equivalent to using ``5`` then ``4`` for each id in
3667 (in command triplets, ``_`` values are ignored and can be
3668 anything, generally ``0`` or ``False``)
3670 Any command can be used on :class:`~openerp.fields.Many2many`,
3671 only ``0``, ``1`` and ``2`` can be used on
3672 :class:`~openerp.fields.One2many`.
3677 self._check_concurrency(self._ids)
3678 self.check_access_rights('write')
3680 # No user-driven update of these columns
3681 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3682 vals.pop(field, None)
3684 # split up fields into old-style and pure new-style ones
3685 old_vals, new_vals, unknown = {}, {}, []
3686 for key, val in vals.iteritems():
3687 if key in self._columns:
3689 elif key in self._fields:
3695 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3697 # write old-style fields with (low-level) method _write
3699 self._write(old_vals)
3701 # put the values of pure new-style fields into cache, and inverse them
3704 record._cache.update(record._convert_to_cache(new_vals, update=True))
3705 for key in new_vals:
3706 self._fields[key].determine_inverse(self)
3710 def _write(self, cr, user, ids, vals, context=None):
3711 # low-level implementation of write()
3716 self.check_field_access_rights(cr, user, 'write', vals.keys())
3717 deleted_related = defaultdict(list)
3718 for field in vals.keys():
3720 if field in self._columns:
3721 fobj = self._columns[field]
3722 elif field in self._inherit_fields:
3723 fobj = self._inherit_fields[field][2]
3726 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3727 for wtuple in vals[field]:
3728 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3729 deleted_related[fobj._obj].append(wtuple[1])
3734 for group in groups:
3735 module = group.split(".")[0]
3736 grp = group.split(".")[1]
3737 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3738 (grp, module, 'res.groups', user))
3739 readonly = cr.fetchall()
3740 if readonly[0][0] >= 1:
3747 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3749 # for recomputing new-style fields
3750 recs = self.browse(cr, user, ids, context)
3751 modified_fields = list(vals)
3752 if self._log_access:
3753 modified_fields += ['write_date', 'write_uid']
3754 recs.modified(modified_fields)
3756 parents_changed = []
3757 parent_order = self._parent_order or self._order
3758 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3759 # The parent_left/right computation may take up to
3760 # 5 seconds. No need to recompute the values if the
3761 # parent is the same.
3762 # Note: to respect parent_order, nodes must be processed in
3763 # order, so ``parents_changed`` must be ordered properly.
3764 parent_val = vals[self._parent_name]
3766 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3767 (self._table, self._parent_name, self._parent_name, parent_order)
3768 cr.execute(query, (tuple(ids), parent_val))
3770 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3771 (self._table, self._parent_name, parent_order)
3772 cr.execute(query, (tuple(ids),))
3773 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3780 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3782 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3783 if field_column and field_column.deprecated:
3784 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3785 if field in self._columns:
3786 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3787 if (not totranslate) or not self._columns[field].translate:
3788 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3789 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3790 direct.append(field)
3792 upd_todo.append(field)
3794 updend.append(field)
3795 if field in self._columns \
3796 and hasattr(self._columns[field], 'selection') \
3798 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3800 if self._log_access:
3801 upd0.append('write_uid=%s')
3802 upd0.append("write_date=(now() at time zone 'UTC')")
3806 self.check_access_rule(cr, user, ids, 'write', context=context)
3807 for sub_ids in cr.split_for_in_conditions(ids):
3808 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3809 'where id IN %s', upd1 + [sub_ids])
3810 if cr.rowcount != len(sub_ids):
3811 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3816 if self._columns[f].translate:
3817 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3820 # Inserting value to DB
3821 context_wo_lang = dict(context, lang=None)
3822 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3823 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3825 # call the 'set' method of fields which are not classic_write
3826 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3828 # default element in context must be removed when call a one2many or many2many
3829 rel_context = context.copy()
3830 for c in context.items():
3831 if c[0].startswith('default_'):
3832 del rel_context[c[0]]
3834 for field in upd_todo:
3836 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3838 unknown_fields = updend[:]
3839 for table in self._inherits:
3840 col = self._inherits[table]
3842 for sub_ids in cr.split_for_in_conditions(ids):
3843 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3844 'where id IN %s', (sub_ids,))
3845 nids.extend([x[0] for x in cr.fetchall()])
3849 if self._inherit_fields[val][0] == table:
3851 unknown_fields.remove(val)
3853 self.pool[table].write(cr, user, nids, v, context)
3857 'No such field(s) in model %s: %s.',
3858 self._name, ', '.join(unknown_fields))
3860 # check Python constraints
3861 recs._validate_fields(vals)
3863 # TODO: use _order to set dest at the right position and not first node of parent
3864 # We can't defer parent_store computation because the stored function
3865 # fields that are computer may refer (directly or indirectly) to
3866 # parent_left/right (via a child_of domain)
3869 self.pool._init_parent[self._name] = True
3871 order = self._parent_order or self._order
3872 parent_val = vals[self._parent_name]
3874 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3876 clause, params = '%s IS NULL' % (self._parent_name,), ()
3878 for id in parents_changed:
3879 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3880 pleft, pright = cr.fetchone()
3881 distance = pright - pleft + 1
3883 # Positions of current siblings, to locate proper insertion point;
3884 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3885 # after each update, in case several nodes are sequentially inserted one
3886 # next to the other (i.e computed incrementally)
3887 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3888 parents = cr.fetchall()
3890 # Find Position of the element
3892 for (parent_pright, parent_id) in parents:
3895 position = parent_pright and parent_pright + 1 or 1
3897 # It's the first node of the parent
3902 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3903 position = cr.fetchone()[0] + 1
3905 if pleft < position <= pright:
3906 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3908 if pleft < position:
3909 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3910 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3911 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3913 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3914 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3915 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3916 recs.invalidate_cache(['parent_left', 'parent_right'])
3918 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3921 # for recomputing new-style fields
3922 recs.modified(modified_fields)
3925 for order, model_name, ids_to_update, fields_to_recompute in result:
3926 key = (model_name, tuple(fields_to_recompute))
3927 done.setdefault(key, {})
3928 # avoid to do several times the same computation
3930 for id in ids_to_update:
3931 if id not in done[key]:
3932 done[key][id] = True
3933 if id not in deleted_related[model_name]:
3935 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3937 # recompute new-style fields
3938 if context.get('recompute', True):
3941 self.step_workflow(cr, user, ids, context=context)
3945 # TODO: Should set perm to user.xxx
3948 @api.returns('self', lambda value: value.id)
3949 def create(self, vals):
3950 """ create(vals) -> record
3952 Creates a new record for the model.
3954 The new record is initialized using the values from ``vals`` and
3955 if necessary those from :meth:`~.default_get`.
3958 values for the model's fields, as a dictionary::
3960 {'field_name': field_value, ...}
3962 see :meth:`~.write` for details
3963 :return: new record created
3964 :raise AccessError: * if user has no create rights on the requested object
3965 * if user tries to bypass access rules for create on the requested object
3966 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3967 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3969 self.check_access_rights('create')
3971 # add missing defaults, and drop fields that may not be set by user
3972 vals = self._add_missing_default_values(vals)
3973 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3974 vals.pop(field, None)
3976 # split up fields into old-style and pure new-style ones
3977 old_vals, new_vals, unknown = {}, {}, []
3978 for key, val in vals.iteritems():
3979 if key in self._all_columns:
3981 elif key in self._fields:
3987 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3989 # create record with old-style fields
3990 record = self.browse(self._create(old_vals))
3992 # put the values of pure new-style fields into cache, and inverse them
3993 record._cache.update(record._convert_to_cache(new_vals))
3994 for key in new_vals:
3995 self._fields[key].determine_inverse(record)
3999 def _create(self, cr, user, vals, context=None):
4000 # low-level implementation of create()
4004 if self.is_transient():
4005 self._transient_vacuum(cr, user)
4008 for v in self._inherits:
4009 if self._inherits[v] not in vals:
4012 tocreate[v] = {'id': vals[self._inherits[v]]}
4015 # list of column assignments defined as tuples like:
4016 # (column_name, format_string, column_value)
4017 # (column_name, sql_formula)
4018 # Those tuples will be used by the string formatting for the INSERT
4020 ('id', "nextval('%s')" % self._sequence),
4025 for v in vals.keys():
4026 if v in self._inherit_fields and v not in self._columns:
4027 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4028 tocreate[table][v] = vals[v]
4031 if (v not in self._inherit_fields) and (v not in self._columns):
4033 unknown_fields.append(v)
4036 'No such field(s) in model %s: %s.',
4037 self._name, ', '.join(unknown_fields))
4039 for table in tocreate:
4040 if self._inherits[table] in vals:
4041 del vals[self._inherits[table]]
4043 record_id = tocreate[table].pop('id', None)
4045 if record_id is None or not record_id:
4046 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4048 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4050 updates.append((self._inherits[table], '%s', record_id))
4052 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4053 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4055 for bool_field in bool_fields:
4056 if bool_field not in vals:
4057 vals[bool_field] = False
4059 for field in vals.keys():
4061 if field in self._columns:
4062 fobj = self._columns[field]
4064 fobj = self._inherit_fields[field][2]
4070 for group in groups:
4071 module = group.split(".")[0]
4072 grp = group.split(".")[1]
4073 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4074 (grp, module, 'res.groups', user))
4075 readonly = cr.fetchall()
4076 if readonly[0][0] >= 1:
4079 elif readonly[0][0] == 0:
4087 current_field = self._columns[field]
4088 if current_field._classic_write:
4089 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4091 #for the function fields that receive a value, we set them directly in the database
4092 #(they may be required), but we also need to trigger the _fct_inv()
4093 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4094 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4095 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4096 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4097 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4098 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4099 #after the release but, definitively, the behavior shouldn't be different for related and function
4101 upd_todo.append(field)
4103 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4104 #related. See the above TODO comment for further explanations.
4105 if not isinstance(current_field, fields.related):
4106 upd_todo.append(field)
4107 if field in self._columns \
4108 and hasattr(current_field, 'selection') \
4110 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4111 if self._log_access:
4112 updates.append(('create_uid', '%s', user))
4113 updates.append(('write_uid', '%s', user))
4114 updates.append(('create_date', "(now() at time zone 'UTC')"))
4115 updates.append(('write_date', "(now() at time zone 'UTC')"))
4117 # the list of tuples used in this formatting corresponds to
4118 # tuple(field_name, format, value)
4119 # In some case, for example (id, create_date, write_date) we does not
4120 # need to read the third value of the tuple, because the real value is
4121 # encoded in the second value (the format).
4123 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4125 ', '.join('"%s"' % u[0] for u in updates),
4126 ', '.join(u[1] for u in updates)
4128 tuple([u[2] for u in updates if len(u) > 2])
4131 id_new, = cr.fetchone()
4132 recs = self.browse(cr, user, id_new, context)
4133 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4135 if self._parent_store and not context.get('defer_parent_store_computation'):
4137 self.pool._init_parent[self._name] = True
4139 parent = vals.get(self._parent_name, False)
4141 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4143 result_p = cr.fetchall()
4144 for (pleft,) in result_p:
4149 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4150 pleft_old = cr.fetchone()[0]
4153 cr.execute('select max(parent_right) from '+self._table)
4154 pleft = cr.fetchone()[0] or 0
4155 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4156 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4157 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4158 recs.invalidate_cache(['parent_left', 'parent_right'])
4160 # default element in context must be remove when call a one2many or many2many
4161 rel_context = context.copy()
4162 for c in context.items():
4163 if c[0].startswith('default_'):
4164 del rel_context[c[0]]
4167 for field in upd_todo:
4168 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4170 # check Python constraints
4171 recs._validate_fields(vals)
4173 # invalidate and mark new-style fields to recompute
4174 modified_fields = list(vals)
4175 if self._log_access:
4176 modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
4177 recs.modified(modified_fields)
4179 if context.get('recompute', True):
4180 result += self._store_get_values(cr, user, [id_new],
4181 list(set(vals.keys() + self._inherits.values())),
4185 for order, model_name, ids, fields2 in result:
4186 if not (model_name, ids, fields2) in done:
4187 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4188 done.append((model_name, ids, fields2))
4189 # recompute new-style fields
4192 if self._log_create and context.get('recompute', True):
4193 message = self._description + \
4195 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4196 "' " + _("created.")
4197 self.log(cr, user, id_new, message, True, context=context)
4199 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4200 self.create_workflow(cr, user, [id_new], context=context)
4203 def _store_get_values(self, cr, uid, ids, fields, context):
4204 """Returns an ordered list of fields.function to call due to
4205 an update operation on ``fields`` of records with ``ids``,
4206 obtained by calling the 'store' triggers of these fields,
4207 as setup by their 'store' attribute.
4209 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4211 if fields is None: fields = []
4212 stored_functions = self.pool._store_function.get(self._name, [])
4214 # use indexed names for the details of the stored_functions:
4215 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4217 # only keep store triggers that should be triggered for the ``fields``
4219 triggers_to_compute = (
4220 f for f in stored_functions
4221 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4225 target_id_results = {}
4226 for store_trigger in triggers_to_compute:
4227 target_func_id_ = id(store_trigger[target_ids_func_])
4228 if target_func_id_ not in target_id_results:
4229 # use admin user for accessing objects having rules defined on store fields
4230 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4231 target_ids = target_id_results[target_func_id_]
4233 # the compound key must consider the priority and model name
4234 key = (store_trigger[priority_], store_trigger[model_name_])
4235 for target_id in target_ids:
4236 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4238 # Here to_compute_map looks like:
4239 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4240 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4241 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4244 # Now we need to generate the batch function calls list
4246 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4248 for ((priority,model), id_map) in to_compute_map.iteritems():
4249 trigger_ids_maps = {}
4250 # function_ids_maps =
4251 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4252 for target_id, triggers in id_map.iteritems():
4253 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4254 for triggers, target_ids in trigger_ids_maps.iteritems():
4255 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4256 [t[func_field_to_compute_] for t in triggers]))
4259 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4262 def _store_set_values(self, cr, uid, ids, fields, context):
4263 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4264 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4269 if self._log_access:
4270 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4274 field_dict.setdefault(r[0], [])
4275 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4276 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4277 for i in self.pool._store_function.get(self._name, []):
4279 up_write_date = write_date + datetime.timedelta(hours=i[5])
4280 if datetime.datetime.now() < up_write_date:
4282 field_dict[r[0]].append(i[1])
4288 if self._columns[f]._multi not in keys:
4289 keys.append(self._columns[f]._multi)
4290 todo.setdefault(self._columns[f]._multi, [])
4291 todo[self._columns[f]._multi].append(f)
4295 # use admin user for accessing objects having rules defined on store fields
4296 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4297 for id, value in result.items():
4299 for f in value.keys():
4300 if f in field_dict[id]:
4307 if self._columns[v]._type == 'many2one':
4309 value[v] = value[v][0]
4312 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4313 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4316 cr.execute('update "' + self._table + '" set ' + \
4317 ','.join(upd0) + ' where id = %s', upd1)
4321 # use admin user for accessing objects having rules defined on store fields
4322 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4323 for r in result.keys():
4325 if r in field_dict.keys():
4326 if f in field_dict[r]:
4328 for id, value in result.items():
4329 if self._columns[f]._type == 'many2one':
4334 cr.execute('update "' + self._table + '" set ' + \
4335 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4337 # invalidate and mark new-style fields to recompute
4338 self.browse(cr, uid, ids, context).modified(fields)
4342 # TODO: ameliorer avec NULL
4343 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4344 """Computes the WHERE clause needed to implement an OpenERP domain.
4345 :param domain: the domain to compute
4347 :param active_test: whether the default filtering of records with ``active``
4348 field set to ``False`` should be applied.
4349 :return: the query expressing the given domain as provided in domain
4350 :rtype: osv.query.Query
4355 # if the object has a field named 'active', filter out all inactive
4356 # records unless they were explicitely asked for
4357 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4359 # the item[0] trick below works for domain items and '&'/'|'/'!'
4361 if not any(item[0] == 'active' for item in domain):
4362 domain.insert(0, ('active', '=', 1))
4364 domain = [('active', '=', 1)]
4367 e = expression.expression(cr, user, domain, self, context)
4368 tables = e.get_tables()
4369 where_clause, where_params = e.to_sql()
4370 where_clause = where_clause and [where_clause] or []
4372 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4374 return Query(tables, where_clause, where_params)
4376 def _check_qorder(self, word):
4377 if not regex_order.match(word):
4378 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4381 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4382 """Add what's missing in ``query`` to implement all appropriate ir.rules
4383 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4385 :param query: the current query object
4387 if uid == SUPERUSER_ID:
4390 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4391 """ :param parent_model: name of the parent model, if the added
4392 clause comes from a parent model
4396 # as inherited rules are being applied, we need to add the missing JOIN
4397 # to reach the parent table (if it was not JOINed yet in the query)
4398 parent_alias = self._inherits_join_add(self, parent_model, query)
4399 # inherited rules are applied on the external table -> need to get the alias and replace
4400 parent_table = self.pool[parent_model]._table
4401 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4402 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4404 for table in added_tables:
4405 # table is just a table name -> switch to the full alias
4406 if table == '"%s"' % parent_table:
4407 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4408 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4410 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4411 added_tables = new_tables
4412 query.where_clause += added_clause
4413 query.where_clause_params += added_params
4414 for table in added_tables:
4415 if table not in query.tables:
4416 query.tables.append(table)
4420 # apply main rules on the object
4421 rule_obj = self.pool.get('ir.rule')
4422 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4423 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4425 # apply ir.rules from the parents (through _inherits)
4426 for inherited_model in self._inherits:
4427 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4428 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4429 parent_model=inherited_model)
4431 def _generate_m2o_order_by(self, order_field, query):
4433 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4434 either native m2o fields or function/related fields that are stored, including
4435 intermediate JOINs for inheritance if required.
4437 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4439 if order_field not in self._columns and order_field in self._inherit_fields:
4440 # also add missing joins for reaching the table containing the m2o field
4441 qualified_field = self._inherits_join_calc(order_field, query)
4442 order_field_column = self._inherit_fields[order_field][2]
4444 qualified_field = '"%s"."%s"' % (self._table, order_field)
4445 order_field_column = self._columns[order_field]
4447 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4448 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4449 _logger.debug("Many2one function/related fields must be stored " \
4450 "to be used as ordering fields! Ignoring sorting for %s.%s",
4451 self._name, order_field)
4454 # figure out the applicable order_by for the m2o
4455 dest_model = self.pool[order_field_column._obj]
4456 m2o_order = dest_model._order
4457 if not regex_order.match(m2o_order):
4458 # _order is complex, can't use it here, so we default to _rec_name
4459 m2o_order = dest_model._rec_name
4461 # extract the field names, to be able to qualify them and add desc/asc
4463 for order_part in m2o_order.split(","):
4464 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4465 m2o_order = m2o_order_list
4467 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4468 # as we don't want to exclude results that have NULL values for the m2o
4469 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4470 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4471 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4472 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4474 def _generate_order_by(self, order_spec, query):
4476 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4477 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4479 :raise" except_orm in case order_spec is malformed
4481 order_by_clause = ''
4482 order_spec = order_spec or self._order
4484 order_by_elements = []
4485 self._check_qorder(order_spec)
4486 for order_part in order_spec.split(','):
4487 order_split = order_part.strip().split(' ')
4488 order_field = order_split[0].strip()
4489 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4492 if order_field == 'id':
4493 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4494 elif order_field in self._columns:
4495 order_column = self._columns[order_field]
4496 if order_column._classic_read:
4497 inner_clause = '"%s"."%s"' % (self._table, order_field)
4498 elif order_column._type == 'many2one':
4499 inner_clause = self._generate_m2o_order_by(order_field, query)
4501 continue # ignore non-readable or "non-joinable" fields
4502 elif order_field in self._inherit_fields:
4503 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4504 order_column = parent_obj._columns[order_field]
4505 if order_column._classic_read:
4506 inner_clause = self._inherits_join_calc(order_field, query)
4507 elif order_column._type == 'many2one':
4508 inner_clause = self._generate_m2o_order_by(order_field, query)
4510 continue # ignore non-readable or "non-joinable" fields
4512 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4513 if order_column and order_column._type == 'boolean':
4514 inner_clause = "COALESCE(%s, false)" % inner_clause
4516 if isinstance(inner_clause, list):
4517 for clause in inner_clause:
4518 order_by_elements.append("%s %s" % (clause, order_direction))
4520 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4521 if order_by_elements:
4522 order_by_clause = ",".join(order_by_elements)
4524 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4526 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4528 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4529 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4530 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4531 This is ok at the security level because this method is private and not callable through XML-RPC.
4533 :param access_rights_uid: optional user ID to use when checking access rights
4534 (not for ir.rules, this is only for ir.model.access)
4538 self.check_access_rights(cr, access_rights_uid or user, 'read')
4540 # For transient models, restrict acces to the current user, except for the super-user
4541 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4542 args = expression.AND(([('create_uid', '=', user)], args or []))
4544 query = self._where_calc(cr, user, args, context=context)
4545 self._apply_ir_rules(cr, user, query, 'read', context=context)
4546 order_by = self._generate_order_by(order, query)
4547 from_clause, where_clause, where_clause_params = query.get_sql()
4549 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4552 # Ignore order, limit and offset when just counting, they don't make sense and could
4554 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4555 cr.execute(query_str, where_clause_params)
4559 limit_str = limit and ' limit %d' % limit or ''
4560 offset_str = offset and ' offset %d' % offset or ''
4561 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4562 cr.execute(query_str, where_clause_params)
4565 # TDE note: with auto_join, we could have several lines about the same result
4566 # i.e. a lead with several unread messages; we uniquify the result using
4567 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4568 def _uniquify_list(seq):
4570 return [x for x in seq if x not in seen and not seen.add(x)]
4572 return _uniquify_list([x[0] for x in res])
4574 # returns the different values ever entered for one field
4575 # this is used, for example, in the client when the user hits enter on
4577 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4580 if field in self._inherit_fields:
4581 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4583 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4585 def copy_data(self, cr, uid, id, default=None, context=None):
4587 Copy given record's data with all its fields values
4589 :param cr: database cursor
4590 :param uid: current user id
4591 :param id: id of the record to copy
4592 :param default: field values to override in the original values of the copied record
4593 :type default: dictionary
4594 :param context: context arguments, like lang, time zone
4595 :type context: dictionary
4596 :return: dictionary containing all the field values
4602 # avoid recursion through already copied records in case of circular relationship
4603 seen_map = context.setdefault('__copy_data_seen', {})
4604 if id in seen_map.setdefault(self._name, []):
4606 seen_map[self._name].append(id)
4610 if 'state' not in default:
4611 if 'state' in self._defaults:
4612 if callable(self._defaults['state']):
4613 default['state'] = self._defaults['state'](self, cr, uid, context)
4615 default['state'] = self._defaults['state']
4617 # build a black list of fields that should not be copied
4618 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4619 def blacklist_given_fields(obj):
4620 # blacklist the fields that are given by inheritance
4621 for other, field_to_other in obj._inherits.items():
4622 blacklist.add(field_to_other)
4623 if field_to_other in default:
4624 # all the fields of 'other' are given by the record: default[field_to_other],
4625 # except the ones redefined in self
4626 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4628 blacklist_given_fields(self.pool[other])
4629 # blacklist deprecated fields
4630 for name, field in obj._columns.items():
4631 if field.deprecated:
4634 blacklist_given_fields(self)
4637 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4640 if f not in blacklist)
4642 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4646 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4649 for f, colinfo in fields_to_copy.iteritems():
4650 field = colinfo.column
4651 if field._type == 'many2one':
4652 res[f] = data[f] and data[f][0]
4653 elif field._type == 'one2many':
4654 other = self.pool[field._obj]
4655 # duplicate following the order of the ids because we'll rely on
4656 # it later for copying translations in copy_translation()!
4657 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4658 # the lines are duplicated using the wrong (old) parent, but then
4659 # are reassigned to the correct one thanks to the (0, 0, ...)
4660 res[f] = [(0, 0, line) for line in lines if line]
4661 elif field._type == 'many2many':
4662 res[f] = [(6, 0, data[f])]
4668 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4672 # avoid recursion through already copied records in case of circular relationship
4673 seen_map = context.setdefault('__copy_translations_seen',{})
4674 if old_id in seen_map.setdefault(self._name,[]):
4676 seen_map[self._name].append(old_id)
4678 trans_obj = self.pool.get('ir.translation')
4679 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4680 fields = self.fields_get(cr, uid, context=context)
4682 for field_name, field_def in fields.items():
4683 # removing the lang to compare untranslated values
4684 context_wo_lang = dict(context, lang=None)
4685 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4686 # we must recursively copy the translations for o2o and o2m
4687 if field_def['type'] == 'one2many':
4688 target_obj = self.pool[field_def['relation']]
4689 # here we rely on the order of the ids to match the translations
4690 # as foreseen in copy_data()
4691 old_children = sorted(r.id for r in old_record[field_name])
4692 new_children = sorted(r.id for r in new_record[field_name])
4693 for (old_child, new_child) in zip(old_children, new_children):
4694 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4695 # and for translatable fields we keep them for copy
4696 elif field_def.get('translate'):
4697 if field_name in self._columns:
4698 trans_name = self._name + "," + field_name
4701 elif field_name in self._inherit_fields:
4702 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4703 # get the id of the parent record to set the translation
4704 inherit_field_name = self._inherit_fields[field_name][1]
4705 target_id = new_record[inherit_field_name].id
4706 source_id = old_record[inherit_field_name].id
4710 trans_ids = trans_obj.search(cr, uid, [
4711 ('name', '=', trans_name),
4712 ('res_id', '=', source_id)
4714 user_lang = context.get('lang')
4715 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4717 # remove source to avoid triggering _set_src
4718 del record['source']
4719 record.update({'res_id': target_id})
4720 if user_lang and user_lang == record['lang']:
4721 # 'source' to force the call to _set_src
4722 # 'value' needed if value is changed in copy(), want to see the new_value
4723 record['source'] = old_record[field_name]
4724 record['value'] = new_record[field_name]
4725 trans_obj.create(cr, uid, record, context=context)
4727 @api.returns('self', lambda value: value.id)
4728 def copy(self, cr, uid, id, default=None, context=None):
4729 """ copy(default=None)
4731 Duplicate record with given id updating it with default values
4733 :param dict default: dictionary of field values to override in the
4734 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4735 :returns: new record
4740 context = context.copy()
4741 data = self.copy_data(cr, uid, id, default, context)
4742 new_id = self.create(cr, uid, data, context)
4743 self.copy_translations(cr, uid, id, new_id, context)
4747 @api.returns('self')
4749 """ exists() -> records
4751 Returns the subset of records in `self` that exist, and marks deleted
4752 records as such in cache. It can be used as a test on records::
4757 By convention, new records are returned as existing.
4759 ids = filter(None, self._ids) # ids to check in database
4762 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4763 self._cr.execute(query, (ids,))
4764 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4765 [id for id in self._ids if not id]) # new ids
4766 existing = self.browse(ids)
4767 if len(existing) < len(self):
4768 # mark missing records in cache with a failed value
4769 exc = MissingError(_("Record does not exist or has been deleted."))
4770 (self - existing)._cache.update(FailedValue(exc))
4773 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4774 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4776 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4777 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4778 return self._check_recursion(cr, uid, ids, context, parent)
4780 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4782 Verifies that there is no loop in a hierarchical structure of records,
4783 by following the parent relationship using the **parent** field until a loop
4784 is detected or until a top-level record is found.
4786 :param cr: database cursor
4787 :param uid: current user id
4788 :param ids: list of ids of records to check
4789 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4790 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4793 parent = self._parent_name
4795 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4796 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4799 while current_id is not None:
4800 cr.execute(query, (current_id,))
4801 result = cr.fetchone()
4802 current_id = result[0] if result else None
4803 if current_id == id:
4807 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4809 Verifies that there is no loop in a hierarchical structure of records,
4810 by following the parent relationship using the **parent** field until a loop
4811 is detected or until a top-level record is found.
4813 :param cr: database cursor
4814 :param uid: current user id
4815 :param ids: list of ids of records to check
4816 :param field_name: field to check
4817 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4820 field = self._all_columns.get(field_name)
4821 field = field.column if field else None
4822 if not field or field._type != 'many2many' or field._obj != self._name:
4823 # field must be a many2many on itself
4824 raise ValueError('invalid field_name: %r' % (field_name,))
4826 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4830 for i in range(0, len(ids_parent), cr.IN_MAX):
4832 sub_ids_parent = ids_parent[i:j]
4833 cr.execute(query, (tuple(sub_ids_parent),))
4834 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4835 ids_parent = ids_parent2
4836 for i in ids_parent:
4841 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4842 """Retrieve the External ID(s) of any database record.
4844 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4846 :return: map of ids to the list of their fully qualified External IDs
4847 in the form ``module.key``, or an empty list when there's no External
4848 ID for a record, e.g.::
4850 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4853 ir_model_data = self.pool.get('ir.model.data')
4854 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4855 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4858 # can't use dict.fromkeys() as the list would be shared!
4860 for record in data_results:
4861 result[record['res_id']].append('%(module)s.%(name)s' % record)
4864 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4865 """Retrieve the External ID of any database record, if there
4866 is one. This method works as a possible implementation
4867 for a function field, to be able to add it to any
4868 model object easily, referencing it as ``Model.get_external_id``.
4870 When multiple External IDs exist for a record, only one
4871 of them is returned (randomly).
4873 :return: map of ids to their fully qualified XML ID,
4874 defaulting to an empty string when there's none
4875 (to be usable as a function field),
4878 { 'id': 'module.ext_id',
4881 results = self._get_xml_ids(cr, uid, ids)
4882 for k, v in results.iteritems():
4889 # backwards compatibility
4890 get_xml_id = get_external_id
4891 _get_xml_ids = _get_external_ids
4893 def print_report(self, cr, uid, ids, name, data, context=None):
4895 Render the report `name` for the given IDs. The report must be defined
4896 for this model, not another.
4898 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4899 assert self._name == report.table
4900 return report.create(cr, uid, ids, data, context)
4904 def is_transient(cls):
4905 """ Return whether the model is transient.
4907 See :class:`TransientModel`.
4910 return cls._transient
4912 def _transient_clean_rows_older_than(self, cr, seconds):
4913 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4914 # Never delete rows used in last 5 minutes
4915 seconds = max(seconds, 300)
4916 query = ("SELECT id FROM " + self._table + " WHERE"
4917 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4918 " < ((now() at time zone 'UTC') - interval %s)")
4919 cr.execute(query, ("%s seconds" % seconds,))
4920 ids = [x[0] for x in cr.fetchall()]
4921 self.unlink(cr, SUPERUSER_ID, ids)
4923 def _transient_clean_old_rows(self, cr, max_count):
4924 # Check how many rows we have in the table
4925 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4927 if res[0][0] <= max_count:
4928 return # max not reached, nothing to do
4929 self._transient_clean_rows_older_than(cr, 300)
4931 def _transient_vacuum(self, cr, uid, force=False):
4932 """Clean the transient records.
4934 This unlinks old records from the transient model tables whenever the
4935 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4936 Actual cleaning will happen only once every "_transient_check_time" calls.
4937 This means this method can be called frequently called (e.g. whenever
4938 a new record is created).
4939 Example with both max_hours and max_count active:
4940 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4941 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4942 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4943 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4944 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4945 would immediately cause the maximum to be reached again.
4946 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4948 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4949 _transient_check_time = 20 # arbitrary limit on vacuum executions
4950 self._transient_check_count += 1
4951 if not force and (self._transient_check_count < _transient_check_time):
4952 return True # no vacuum cleaning this time
4953 self._transient_check_count = 0
4955 # Age-based expiration
4956 if self._transient_max_hours:
4957 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4959 # Count-based expiration
4960 if self._transient_max_count:
4961 self._transient_clean_old_rows(cr, self._transient_max_count)
4965 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4966 """ Serializes one2many and many2many commands into record dictionaries
4967 (as if all the records came from the database via a read()). This
4968 method is aimed at onchange methods on one2many and many2many fields.
4970 Because commands might be creation commands, not all record dicts
4971 will contain an ``id`` field. Commands matching an existing record
4972 will have an ``id``.
4974 :param field_name: name of the one2many or many2many field matching the commands
4975 :type field_name: str
4976 :param commands: one2many or many2many commands to execute on ``field_name``
4977 :type commands: list((int|False, int|False, dict|False))
4978 :param fields: list of fields to read from the database, when applicable
4979 :type fields: list(str)
4980 :returns: records in a shape similar to that returned by ``read()``
4981 (except records may be missing the ``id`` field if they don't exist in db)
4984 result = [] # result (list of dict)
4985 record_ids = [] # ids of records to read
4986 updates = {} # {id: dict} of updates on particular records
4988 for command in commands or []:
4989 if not isinstance(command, (list, tuple)):
4990 record_ids.append(command)
4991 elif command[0] == 0:
4992 result.append(command[2])
4993 elif command[0] == 1:
4994 record_ids.append(command[1])
4995 updates.setdefault(command[1], {}).update(command[2])
4996 elif command[0] in (2, 3):
4997 record_ids = [id for id in record_ids if id != command[1]]
4998 elif command[0] == 4:
4999 record_ids.append(command[1])
5000 elif command[0] == 5:
5001 result, record_ids = [], []
5002 elif command[0] == 6:
5003 result, record_ids = [], list(command[2])
5005 # read the records and apply the updates
5006 other_model = self.pool[self._all_columns[field_name].column._obj]
5007 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
5008 record.update(updates.get(record['id'], {}))
5009 result.append(record)
5013 # for backward compatibility
5014 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5016 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5018 Performs a ``search()`` followed by a ``read()``.
5020 :param cr: database cursor
5021 :param user: current user id
5022 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5023 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5024 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5025 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5026 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5027 :param context: context arguments.
5028 :return: List of dictionaries containing the asked fields.
5029 :rtype: List of dictionaries.
5032 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5036 if fields and fields == ['id']:
5037 # shortcut read if we only want the ids
5038 return [{'id': id} for id in record_ids]
5040 # read() ignores active_test, but it would forward it to any downstream search call
5041 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5042 # was presumably only meant for the main search().
5043 # TODO: Move this to read() directly?
5044 read_ctx = dict(context or {})
5045 read_ctx.pop('active_test', None)
5047 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5048 if len(result) <= 1:
5052 index = dict((r['id'], r) for r in result)
5053 return [index[x] for x in record_ids if x in index]
5055 def _register_hook(self, cr):
5056 """ stuff to do right after the registry is built """
5060 def _patch_method(cls, name, method):
5061 """ Monkey-patch a method for all instances of this model. This replaces
5062 the method called `name` by `method` in the given class.
5063 The original method is then accessible via ``method.origin``, and it
5064 can be restored with :meth:`~._revert_method`.
5069 def do_write(self, values):
5070 # do stuff, and call the original method
5071 return do_write.origin(self, values)
5073 # patch method write of model
5074 model._patch_method('write', do_write)
5076 # this will call do_write
5077 records = model.search([...])
5080 # restore the original method
5081 model._revert_method('write')
5083 origin = getattr(cls, name)
5084 method.origin = origin
5085 # propagate decorators from origin to method, and apply api decorator
5086 wrapped = api.guess(api.propagate(origin, method))
5087 wrapped.origin = origin
5088 setattr(cls, name, wrapped)
5091 def _revert_method(cls, name):
5092 """ Revert the original method called `name` in the given class.
5093 See :meth:`~._patch_method`.
5095 method = getattr(cls, name)
5096 setattr(cls, name, method.origin)
5101 # An instance represents an ordered collection of records in a given
5102 # execution environment. The instance object refers to the environment, and
5103 # the records themselves are represented by their cache dictionary. The 'id'
5104 # of each record is found in its corresponding cache dictionary.
5106 # This design has the following advantages:
5107 # - cache access is direct and thus fast;
5108 # - one can consider records without an 'id' (see new records);
5109 # - the global cache is only an index to "resolve" a record 'id'.
5113 def _browse(cls, env, ids):
5114 """ Create an instance attached to `env`; `ids` is a tuple of record
5117 records = object.__new__(cls)
5120 env.prefetch[cls._name].update(ids)
5124 def browse(self, cr, uid, arg=None, context=None):
5125 ids = _normalize_ids(arg)
5126 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5127 return self._browse(Environment(cr, uid, context or {}), ids)
5130 def browse(self, arg=None):
5131 """ browse([ids]) -> records
5133 Returns a recordset for the ids provided as parameter in the current
5136 Can take no ids, a single id or a sequence of ids.
5138 ids = _normalize_ids(arg)
5139 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5140 return self._browse(self.env, ids)
5143 # Internal properties, for manipulating the instance's implementation
5148 """ List of actual record ids in this recordset (ignores placeholder
5149 ids for records to create)
5151 return filter(None, list(self._ids))
5153 # backward-compatibility with former browse records
5154 _cr = property(lambda self: self.env.cr)
5155 _uid = property(lambda self: self.env.uid)
5156 _context = property(lambda self: self.env.context)
5159 # Conversion methods
5162 def ensure_one(self):
5163 """ Verifies that the current recorset holds a single record. Raises
5164 an exception otherwise.
5168 raise except_orm("ValueError", "Expected singleton: %s" % self)
5170 def with_env(self, env):
5171 """ Returns a new version of this recordset attached to the provided
5174 :type env: :class:`~openerp.api.Environment`
5176 return self._browse(env, self._ids)
5178 def sudo(self, user=SUPERUSER_ID):
5179 """ sudo([user=SUPERUSER])
5181 Returns a new version of this recordset attached to the provided
5184 return self.with_env(self.env(user=user))
5186 def with_context(self, *args, **kwargs):
5187 """ with_context([context][, **overrides]) -> records
5189 Returns a new version of this recordset attached to an extended
5192 The extended context is either the provided ``context`` in which
5193 ``overrides`` are merged or the *current* context in which
5194 ``overrides`` are merged e.g.::
5196 # current context is {'key1': True}
5197 r2 = records.with_context({}, key2=True)
5198 # -> r2._context is {'key2': True}
5199 r2 = records.with_context(key2=True)
5200 # -> r2._context is {'key1': True, 'key2': True}
5202 context = dict(args[0] if args else self._context, **kwargs)
5203 return self.with_env(self.env(context=context))
5205 def _convert_to_cache(self, values, update=False, validate=True):
5206 """ Convert the `values` dictionary into cached values.
5208 :param update: whether the conversion is made for updating `self`;
5209 this is necessary for interpreting the commands of *2many fields
5210 :param validate: whether values must be checked
5212 fields = self._fields
5213 target = self if update else self.browse()
5215 name: fields[name].convert_to_cache(value, target, validate=validate)
5216 for name, value in values.iteritems()
5220 def _convert_to_write(self, values):
5221 """ Convert the `values` dictionary into the format of :meth:`write`. """
5222 fields = self._fields
5224 for name, value in values.iteritems():
5226 value = fields[name].convert_to_write(value)
5227 if not isinstance(value, NewId):
5228 result[name] = value
5232 # Record traversal and update
5235 def _mapped_func(self, func):
5236 """ Apply function `func` on all records in `self`, and return the
5237 result as a list or a recordset (if `func` return recordsets).
5239 vals = [func(rec) for rec in self]
5240 val0 = vals[0] if vals else func(self)
5241 if isinstance(val0, BaseModel):
5242 return reduce(operator.or_, vals, val0)
5245 def mapped(self, func):
5246 """ Apply `func` on all records in `self`, and return the result as a
5247 list or a recordset (if `func` return recordsets). In the latter
5248 case, the order of the returned recordset is arbritrary.
5250 :param func: a function or a dot-separated sequence of field names
5252 if isinstance(func, basestring):
5254 for name in func.split('.'):
5255 recs = recs._mapped_func(operator.itemgetter(name))
5258 return self._mapped_func(func)
5260 def _mapped_cache(self, name_seq):
5261 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5262 field names, and only cached values are used.
5265 for name in name_seq.split('.'):
5266 field = recs._fields[name]
5267 null = field.null(self.env)
5268 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5271 def filtered(self, func):
5272 """ Select the records in `self` such that `func(rec)` is true, and
5273 return them as a recordset.
5275 :param func: a function or a dot-separated sequence of field names
5277 if isinstance(func, basestring):
5279 func = lambda rec: filter(None, rec.mapped(name))
5280 return self.browse([rec.id for rec in self if func(rec)])
5282 def sorted(self, key=None):
5283 """ Return the recordset `self` ordered by `key` """
5285 return self.search([('id', 'in', self.ids)])
5287 return self.browse(map(int, sorted(self, key=key)))
5289 def update(self, values):
5290 """ Update record `self[0]` with `values`. """
5291 for name, value in values.iteritems():
5295 # New records - represent records that do not exist in the database yet;
5296 # they are used to compute default values and perform onchanges.
5300 def new(self, values={}):
5301 """ new([values]) -> record
5303 Return a new record instance attached to the current environment and
5304 initialized with the provided ``value``. The record is *not* created
5305 in database, it only exists in memory.
5307 record = self.browse([NewId()])
5308 record._cache.update(record._convert_to_cache(values, update=True))
5310 if record.env.in_onchange:
5311 # The cache update does not set inverse fields, so do it manually.
5312 # This is useful for computing a function field on secondary
5313 # records, if that field depends on the main record.
5315 field = self._fields.get(name)
5317 for invf in field.inverse_fields:
5318 invf._update(record[name], record)
5323 # Dirty flag, to mark records modified (in draft mode)
5328 """ Return whether any record in `self` is dirty. """
5329 dirty = self.env.dirty
5330 return any(record in dirty for record in self)
5333 def _dirty(self, value):
5334 """ Mark the records in `self` as dirty. """
5336 map(self.env.dirty.add, self)
5338 map(self.env.dirty.discard, self)
5344 def __nonzero__(self):
5345 """ Test whether `self` is nonempty. """
5346 return bool(getattr(self, '_ids', True))
5349 """ Return the size of `self`. """
5350 return len(self._ids)
5353 """ Return an iterator over `self`. """
5354 for id in self._ids:
5355 yield self._browse(self.env, (id,))
5357 def __contains__(self, item):
5358 """ Test whether `item` (record or field name) is an element of `self`.
5359 In the first case, the test is fully equivalent to::
5361 any(item == record for record in self)
5363 if isinstance(item, BaseModel) and self._name == item._name:
5364 return len(item) == 1 and item.id in self._ids
5365 elif isinstance(item, basestring):
5366 return item in self._fields
5368 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5370 def __add__(self, other):
5371 """ Return the concatenation of two recordsets. """
5372 if not isinstance(other, BaseModel) or self._name != other._name:
5373 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5374 return self.browse(self._ids + other._ids)
5376 def __sub__(self, other):
5377 """ Return the recordset of all the records in `self` that are not in `other`. """
5378 if not isinstance(other, BaseModel) or self._name != other._name:
5379 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5380 other_ids = set(other._ids)
5381 return self.browse([id for id in self._ids if id not in other_ids])
5383 def __and__(self, other):
5384 """ Return the intersection of two recordsets.
5385 Note that recordset order is not preserved.
5387 if not isinstance(other, BaseModel) or self._name != other._name:
5388 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5389 return self.browse(set(self._ids) & set(other._ids))
5391 def __or__(self, other):
5392 """ Return the union of two recordsets.
5393 Note that recordset order is not preserved.
5395 if not isinstance(other, BaseModel) or self._name != other._name:
5396 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5397 return self.browse(set(self._ids) | set(other._ids))
5399 def __eq__(self, other):
5400 """ Test whether two recordsets are equivalent (up to reordering). """
5401 if not isinstance(other, BaseModel):
5403 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5405 return self._name == other._name and set(self._ids) == set(other._ids)
5407 def __ne__(self, other):
5408 return not self == other
5410 def __lt__(self, other):
5411 if not isinstance(other, BaseModel) or self._name != other._name:
5412 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5413 return set(self._ids) < set(other._ids)
5415 def __le__(self, other):
5416 if not isinstance(other, BaseModel) or self._name != other._name:
5417 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5418 return set(self._ids) <= set(other._ids)
5420 def __gt__(self, other):
5421 if not isinstance(other, BaseModel) or self._name != other._name:
5422 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5423 return set(self._ids) > set(other._ids)
5425 def __ge__(self, other):
5426 if not isinstance(other, BaseModel) or self._name != other._name:
5427 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5428 return set(self._ids) >= set(other._ids)
5434 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5436 def __unicode__(self):
5437 return unicode(str(self))
5442 if hasattr(self, '_ids'):
5443 return hash((self._name, frozenset(self._ids)))
5445 return hash(self._name)
5447 def __getitem__(self, key):
5448 """ If `key` is an integer or a slice, return the corresponding record
5449 selection as an instance (attached to `self.env`).
5450 Otherwise read the field `key` of the first record in `self`.
5454 inst = model.search(dom) # inst is a recordset
5455 r4 = inst[3] # fourth record in inst
5456 rs = inst[10:20] # subset of inst
5457 nm = rs['name'] # name of first record in inst
5459 if isinstance(key, basestring):
5460 # important: one must call the field's getter
5461 return self._fields[key].__get__(self, type(self))
5462 elif isinstance(key, slice):
5463 return self._browse(self.env, self._ids[key])
5465 return self._browse(self.env, (self._ids[key],))
5467 def __setitem__(self, key, value):
5468 """ Assign the field `key` to `value` in record `self`. """
5469 # important: one must call the field's setter
5470 return self._fields[key].__set__(self, value)
5473 # Cache and recomputation management
5478 """ Return the cache of `self`, mapping field names to values. """
5479 return RecordCache(self)
5482 def _in_cache_without(self, field):
5483 """ Make sure `self` is present in cache (for prefetching), and return
5484 the records of model `self` in cache that have no value for `field`
5485 (:class:`Field` instance).
5488 prefetch_ids = env.prefetch[self._name]
5489 prefetch_ids.update(self._ids)
5490 ids = filter(None, prefetch_ids - set(env.cache[field]))
5491 return self.browse(ids)
5495 """ Clear the records cache.
5498 The record cache is automatically invalidated.
5500 self.invalidate_cache()
5503 def invalidate_cache(self, fnames=None, ids=None):
5504 """ Invalidate the record caches after some records have been modified.
5505 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5507 :param fnames: the list of modified fields, or ``None`` for all fields
5508 :param ids: the list of modified record ids, or ``None`` for all
5512 return self.env.invalidate_all()
5513 fields = self._fields.values()
5515 fields = map(self._fields.__getitem__, fnames)
5517 # invalidate fields and inverse fields, too
5518 spec = [(f, ids) for f in fields] + \
5519 [(invf, None) for f in fields for invf in f.inverse_fields]
5520 self.env.invalidate(spec)
5523 def modified(self, fnames):
5524 """ Notify that fields have been modified on `self`. This invalidates
5525 the cache, and prepares the recomputation of stored function fields
5526 (new-style fields only).
5528 :param fnames: iterable of field names that have been modified on
5531 # each field knows what to invalidate and recompute
5533 for fname in fnames:
5534 spec += self._fields[fname].modified(self)
5538 for env in self.env.all
5539 for field in env.cache
5541 # invalidate non-stored fields.function which are currently cached
5542 spec += [(f, None) for f in self.pool.pure_function_fields
5543 if f in cached_fields]
5545 self.env.invalidate(spec)
5547 def _recompute_check(self, field):
5548 """ If `field` must be recomputed on some record in `self`, return the
5549 corresponding records that must be recomputed.
5551 return self.env.check_todo(field, self)
5553 def _recompute_todo(self, field):
5554 """ Mark `field` to be recomputed. """
5555 self.env.add_todo(field, self)
5557 def _recompute_done(self, field):
5558 """ Mark `field` as recomputed. """
5559 self.env.remove_todo(field, self)
5562 def recompute(self):
5563 """ Recompute stored function fields. The fields and records to
5564 recompute have been determined by method :meth:`modified`.
5566 while self.env.has_todo():
5567 field, recs = self.env.get_todo()
5568 # evaluate the fields to recompute, and save them to database
5569 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5571 values = rec._convert_to_write({
5572 f.name: rec[f.name] for f in field.computed_fields
5575 except MissingError:
5577 # mark the computed fields as done
5578 map(recs._recompute_done, field.computed_fields)
5581 # Generic onchange method
5584 def _has_onchange(self, field, other_fields):
5585 """ Return whether `field` should trigger an onchange event in the
5586 presence of `other_fields`.
5588 # test whether self has an onchange method for field, or field is a
5589 # dependency of any field in other_fields
5590 return field.name in self._onchange_methods or \
5591 any(dep in other_fields for dep in field.dependents)
5594 def _onchange_spec(self, view_info=None):
5595 """ Return the onchange spec from a view description; if not given, the
5596 result of ``self.fields_view_get()`` is used.
5600 # for traversing the XML arch and populating result
5601 def process(node, info, prefix):
5602 if node.tag == 'field':
5603 name = node.attrib['name']
5604 names = "%s.%s" % (prefix, name) if prefix else name
5605 if not result.get(names):
5606 result[names] = node.attrib.get('on_change')
5607 # traverse the subviews included in relational fields
5608 for subinfo in info['fields'][name].get('views', {}).itervalues():
5609 process(etree.fromstring(subinfo['arch']), subinfo, names)
5612 process(child, info, prefix)
5614 if view_info is None:
5615 view_info = self.fields_view_get()
5616 process(etree.fromstring(view_info['arch']), view_info, '')
5619 def _onchange_eval(self, field_name, onchange, result):
5620 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5621 on record `self`. Value assignments are applied on `self`, while
5622 domain and warning messages are put in dictionary `result`.
5624 onchange = onchange.strip()
5627 if onchange in ("1", "true"):
5628 for method in self._onchange_methods.get(field_name, ()):
5629 method_res = method(self)
5632 if 'domain' in method_res:
5633 result.setdefault('domain', {}).update(method_res['domain'])
5634 if 'warning' in method_res:
5635 result['warning'] = method_res['warning']
5639 match = onchange_v7.match(onchange)
5641 method, params = match.groups()
5643 # evaluate params -> tuple
5644 global_vars = {'context': self._context, 'uid': self._uid}
5645 if self._context.get('field_parent'):
5646 class RawRecord(object):
5647 def __init__(self, record):
5648 self._record = record
5649 def __getattr__(self, name):
5650 field = self._record._fields[name]
5651 value = self._record[name]
5652 return field.convert_to_onchange(value)
5653 record = self[self._context['field_parent']]
5654 global_vars['parent'] = RawRecord(record)
5656 key: self._fields[key].convert_to_onchange(val)
5657 for key, val in self._cache.iteritems()
5659 params = eval("[%s]" % params, global_vars, field_vars)
5661 # call onchange method
5662 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5663 method_res = getattr(self._model, method)(*args)
5664 if not isinstance(method_res, dict):
5666 if 'value' in method_res:
5667 method_res['value'].pop('id', None)
5668 self.update(self._convert_to_cache(method_res['value'], validate=False))
5669 if 'domain' in method_res:
5670 result.setdefault('domain', {}).update(method_res['domain'])
5671 if 'warning' in method_res:
5672 result['warning'] = method_res['warning']
5675 def onchange(self, values, field_name, field_onchange):
5676 """ Perform an onchange on the given field.
5678 :param values: dictionary mapping field names to values, giving the
5679 current state of modification
5680 :param field_name: name of the modified field_name
5681 :param field_onchange: dictionary mapping field names to their
5686 if field_name and field_name not in self._fields:
5689 # determine subfields for field.convert_to_write() below
5691 subfields = defaultdict(set)
5692 for dotname in field_onchange:
5694 secondary.append(dotname)
5695 name, subname = dotname.split('.')
5696 subfields[name].add(subname)
5698 # create a new record with values, and attach `self` to it
5699 with env.do_in_onchange():
5700 record = self.new(values)
5701 values = dict(record._cache)
5702 # attach `self` with a different context (for cache consistency)
5703 record._origin = self.with_context(__onchange=True)
5705 # determine which field should be triggered an onchange
5706 todo = set([field_name]) if field_name else set(values)
5709 # dummy assignment: trigger invalidations on the record
5711 value = record[name]
5712 field = self._fields[name]
5713 if not field_name and field.type == 'many2one' and field.delegate and not value:
5714 # do not nullify all fields of parent record for new records
5716 record[name] = value
5718 result = {'value': {}}
5726 with env.do_in_onchange():
5727 # apply field-specific onchange methods
5728 if field_onchange.get(name):
5729 record._onchange_eval(name, field_onchange[name], result)
5731 # force re-evaluation of function fields on secondary records
5732 for field_seq in secondary:
5733 record.mapped(field_seq)
5735 # determine which fields have been modified
5736 for name, oldval in values.iteritems():
5737 field = self._fields[name]
5738 newval = record[name]
5739 if field.type in ('one2many', 'many2many'):
5740 if newval != oldval or newval._dirty:
5741 # put new value in result
5742 result['value'][name] = field.convert_to_write(
5743 newval, record._origin, subfields.get(name),
5747 # keep result: newval may have been dirty before
5750 if newval != oldval:
5751 # put new value in result
5752 result['value'][name] = field.convert_to_write(
5753 newval, record._origin, subfields.get(name),
5757 # clean up result to not return another value
5758 result['value'].pop(name, None)
5760 # At the moment, the client does not support updates on a *2many field
5761 # while this one is modified by the user.
5762 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5763 result['value'].pop(field_name, None)
5768 class RecordCache(MutableMapping):
5769 """ Implements a proxy dictionary to read/update the cache of a record.
5770 Upon iteration, it looks like a dictionary mapping field names to
5771 values. However, fields may be used as keys as well.
5773 def __init__(self, records):
5774 self._recs = records
5776 def contains(self, field):
5777 """ Return whether `records[0]` has a value for `field` in cache. """
5778 if isinstance(field, basestring):
5779 field = self._recs._fields[field]
5780 return self._recs.id in self._recs.env.cache[field]
5782 def __contains__(self, field):
5783 """ Return whether `records[0]` has a regular value for `field` in cache. """
5784 if isinstance(field, basestring):
5785 field = self._recs._fields[field]
5786 dummy = SpecialValue(None)
5787 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5788 return not isinstance(value, SpecialValue)
5790 def __getitem__(self, field):
5791 """ Return the cached value of `field` for `records[0]`. """
5792 if isinstance(field, basestring):
5793 field = self._recs._fields[field]
5794 value = self._recs.env.cache[field][self._recs.id]
5795 return value.get() if isinstance(value, SpecialValue) else value
5797 def __setitem__(self, field, value):
5798 """ Assign the cached value of `field` for all records in `records`. """
5799 if isinstance(field, basestring):
5800 field = self._recs._fields[field]
5801 values = dict.fromkeys(self._recs._ids, value)
5802 self._recs.env.cache[field].update(values)
5804 def update(self, *args, **kwargs):
5805 """ Update the cache of all records in `records`. If the argument is a
5806 `SpecialValue`, update all fields (except "magic" columns).
5808 if args and isinstance(args[0], SpecialValue):
5809 values = dict.fromkeys(self._recs._ids, args[0])
5810 for name, field in self._recs._fields.iteritems():
5812 self._recs.env.cache[field].update(values)
5814 return super(RecordCache, self).update(*args, **kwargs)
5816 def __delitem__(self, field):
5817 """ Remove the cached value of `field` for all `records`. """
5818 if isinstance(field, basestring):
5819 field = self._recs._fields[field]
5820 field_cache = self._recs.env.cache[field]
5821 for id in self._recs._ids:
5822 field_cache.pop(id, None)
5825 """ Iterate over the field names with a regular value in cache. """
5826 cache, id = self._recs.env.cache, self._recs.id
5827 dummy = SpecialValue(None)
5828 for name, field in self._recs._fields.iteritems():
5829 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5833 """ Return the number of fields with a regular value in cache. """
5834 return sum(1 for name in self)
5836 class Model(BaseModel):
5837 """Main super-class for regular database-persisted OpenERP models.
5839 OpenERP models are created by inheriting from this class::
5844 The system will later instantiate the class once per database (on
5845 which the class' module is installed).
5848 _register = False # not visible in ORM registry, meant to be python-inherited only
5849 _transient = False # True in a TransientModel
5851 class TransientModel(BaseModel):
5852 """Model super-class for transient records, meant to be temporarily
5853 persisted, and regularly vaccuum-cleaned.
5855 A TransientModel has a simplified access rights management,
5856 all users can create new records, and may only access the
5857 records they created. The super-user has unrestricted access
5858 to all TransientModel records.
5861 _register = False # not visible in ORM registry, meant to be python-inherited only
5864 class AbstractModel(BaseModel):
5865 """Abstract Model super-class for creating an abstract class meant to be
5866 inherited by regular models (Models or TransientModels) but not meant to
5867 be usable on its own, or persisted.
5869 Technical note: we don't want to make AbstractModel the super-class of
5870 Model or BaseModel because it would not make sense to put the main
5871 definition of persistence methods such as create() in it, and still we
5872 should be able to override them within an AbstractModel.
5874 _auto = False # don't create any database backend for AbstractModels
5875 _register = False # not visible in ORM registry, meant to be python-inherited only
5878 def itemgetter_tuple(items):
5879 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5880 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5885 return lambda gettable: (gettable[items[0]],)
5886 return operator.itemgetter(*items)
5888 def convert_pgerror_23502(model, fields, info, e):
5889 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5890 r'not-null constraint\n',
5892 field_name = m and m.group('field')
5893 if not m or field_name not in fields:
5894 return {'message': unicode(e)}
5895 message = _(u"Missing required value for the field '%s'.") % field_name
5896 field = fields.get(field_name)
5898 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5901 'field': field_name,
5904 def convert_pgerror_23505(model, fields, info, e):
5905 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5907 field_name = m and m.group('field')
5908 if not m or field_name not in fields:
5909 return {'message': unicode(e)}
5910 message = _(u"The value for the field '%s' already exists.") % field_name
5911 field = fields.get(field_name)
5913 message = _(u"%s This might be '%s' in the current model, or a field "
5914 u"of the same name in an o2m.") % (message, field['string'])
5917 'field': field_name,
5920 PGERROR_TO_OE = defaultdict(
5921 # shape of mapped converters
5922 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5923 # not_null_violation
5924 '23502': convert_pgerror_23502,
5925 # unique constraint error
5926 '23505': convert_pgerror_23505,
5929 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5930 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5932 Various implementations were tested on the corpus of all browse() calls
5933 performed during a full crawler run (after having installed all website_*
5934 modules) and this one was the most efficient overall.
5936 A possible bit of correctness was sacrificed by not doing any test on
5937 Iterable and just assuming that any non-atomic type was an iterable of
5942 # much of the corpus is falsy objects (empty list, tuple or set, None)
5946 # `type in set` is significantly faster (because more restrictive) than
5947 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5948 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5949 # (and looks much worse) in most cases, but over millions of calls it
5950 # does have a very minor effect.
5951 if arg.__class__ in atoms:
5956 # keep those imports here to avoid dependency cycle errors
5957 from .osv import expression
5958 from .fields import Field, SpecialValue, FailedValue
5960 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: