1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
52 from collections import defaultdict, MutableMapping
53 from inspect import getmembers
56 import dateutil.relativedelta
58 from lxml import etree
61 from . import SUPERUSER_ID
64 from .api import Environment
65 from .exceptions import except_orm, AccessError, MissingError
66 from .osv import fields
67 from .osv.query import Query
68 from .tools import lazy_property
69 from .tools.config import config
70 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
71 from .tools.safe_eval import safe_eval as eval
72 from .tools.translate import _
74 _logger = logging.getLogger(__name__)
75 _schema = logging.getLogger(__name__ + '.schema')
77 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
78 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
79 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
81 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
84 def check_object_name(name):
85 """ Check if the given name is a valid openerp object name.
87 The _name attribute in osv and osv_memory object is subject to
88 some restrictions. This function returns True or False whether
89 the given name is allowed or not.
91 TODO: this is an approximation. The goal in this approximation
92 is to disallow uppercase characters (in some places, we quote
93 table/column names and in other not, which leads to this kind
96 psycopg2.ProgrammingError: relation "xxx" does not exist).
98 The same restriction should apply to both osv and osv_memory
99 objects for consistency.
102 if regex_object_name.match(name) is None:
106 def raise_on_invalid_object_name(name):
107 if not check_object_name(name):
108 msg = "The _name attribute %s is not valid." % name
110 raise except_orm('ValueError', msg)
112 POSTGRES_CONFDELTYPES = {
120 def intersect(la, lb):
121 return filter(lambda x: x in lb, la)
124 """ Test whether functions `f` and `g` are identical or have the same name """
125 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
127 def fix_import_export_id_paths(fieldname):
129 Fixes the id fields in import and exports, and splits field paths
132 :param str fieldname: name of the field to import/export
133 :return: split field name
136 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
137 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
138 return fixed_external_id.split('/')
140 def pg_varchar(size=0):
141 """ Returns the VARCHAR declaration for the provided size:
143 * If no size (or an empty or negative size is provided) return an
145 * Otherwise return a VARCHAR(n)
147 :type int size: varchar size, optional
151 if not isinstance(size, int):
152 raise TypeError("VARCHAR parameter should be an int, got %s"
155 return 'VARCHAR(%d)' % size
158 FIELDS_TO_PGTYPES = {
159 fields.boolean: 'bool',
160 fields.integer: 'int4',
164 fields.datetime: 'timestamp',
165 fields.binary: 'bytea',
166 fields.many2one: 'int4',
167 fields.serialized: 'text',
170 def get_pg_type(f, type_override=None):
172 :param fields._column f: field to get a Postgres type for
173 :param type type_override: use the provided type for dispatching instead of the field's own type
174 :returns: (postgres_identification_type, postgres_type_specification)
177 field_type = type_override or type(f)
179 if field_type in FIELDS_TO_PGTYPES:
180 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
181 elif issubclass(field_type, fields.float):
183 pg_type = ('numeric', 'NUMERIC')
185 pg_type = ('float8', 'DOUBLE PRECISION')
186 elif issubclass(field_type, (fields.char, fields.reference)):
187 pg_type = ('varchar', pg_varchar(f.size))
188 elif issubclass(field_type, fields.selection):
189 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
190 or getattr(f, 'size', None) == -1:
191 pg_type = ('int4', 'INTEGER')
193 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
194 elif issubclass(field_type, fields.function):
195 if f._type == 'selection':
196 pg_type = ('varchar', pg_varchar())
198 pg_type = get_pg_type(f, getattr(fields, f._type))
200 _logger.warning('%s type not supported!', field_type)
206 class MetaModel(api.Meta):
207 """ Metaclass for the models.
209 This class is used as the metaclass for the class :class:`BaseModel` to
210 discover the models defined in a module (without instanciating them).
211 If the automatic discovery is not needed, it is possible to set the model's
212 ``_register`` attribute to False.
216 module_to_models = {}
218 def __init__(self, name, bases, attrs):
219 if not self._register:
220 self._register = True
221 super(MetaModel, self).__init__(name, bases, attrs)
224 if not hasattr(self, '_module'):
225 # The (OpenERP) module name can be in the `openerp.addons` namespace
226 # or not. For instance, module `sale` can be imported as
227 # `openerp.addons.sale` (the right way) or `sale` (for backward
229 module_parts = self.__module__.split('.')
230 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
231 module_name = self.__module__.split('.')[2]
233 module_name = self.__module__.split('.')[0]
234 self._module = module_name
236 # Remember which models to instanciate for this module.
238 self.module_to_models.setdefault(self._module, []).append(self)
242 """ Pseudo-ids for new records. """
243 def __nonzero__(self):
246 IdType = (int, long, basestring, NewId)
249 # special columns automatically created by the ORM
250 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
251 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
253 class BaseModel(object):
254 """ Base class for OpenERP models.
256 OpenERP models are created by inheriting from this class' subclasses:
258 * :class:`Model` for regular database-persisted models
260 * :class:`TransientModel` for temporary data, stored in the database but
261 automatically vaccuumed every so often
263 * :class:`AbstractModel` for abstract super classes meant to be shared by
264 multiple inheriting model
266 The system automatically instantiates every model once per database. Those
267 instances represent the available models on each database, and depend on
268 which modules are installed on that database. The actual class of each
269 instance is built from the Python classes that create and inherit from the
272 Every model instance is a "recordset", i.e., an ordered collection of
273 records of the model. Recordsets are returned by methods like
274 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
275 explicit representation: a record is represented as a recordset of one
278 To create a class that should not be instantiated, the _register class
279 attribute may be set to False.
281 __metaclass__ = MetaModel
282 _auto = True # create database backend
283 _register = False # Set to false if the model shouldn't be automatically discovered.
290 _parent_name = 'parent_id'
291 _parent_store = False
292 _parent_order = False
299 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
300 # to include in the _read_group, if grouped on this field
304 _transient = False # True in a TransientModel
307 # { 'parent_model': 'm2o_field', ... }
310 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
311 # model from which it is inherits'd, r is the (local) field towards m, f
312 # is the _column object itself, and n is the original (i.e. top-most)
315 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
316 # field_column_obj, origina_parent_model), ... }
319 # Mapping field name/column_info object
320 # This is similar to _inherit_fields but:
321 # 1. includes self fields,
322 # 2. uses column_info instead of a triple.
327 _sql_constraints = []
329 # model dependencies, for models backed up by sql views:
330 # {model_name: field_names, ...}
333 CONCURRENCY_CHECK_FIELD = '__last_update'
335 def log(self, cr, uid, id, message, secondary=False, context=None):
336 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
338 def view_init(self, cr, uid, fields_list, context=None):
339 """Override this method to do specific things when a view on the object is opened."""
342 def _field_create(self, cr, context=None):
343 """ Create entries in ir_model_fields for all the model's fields.
345 If necessary, also create an entry in ir_model, and if called from the
346 modules loading scheme (by receiving 'module' in the context), also
347 create entries in ir_model_data (for the model and the fields).
349 - create an entry in ir_model (if there is not already one),
350 - create an entry in ir_model_data (if there is not already one, and if
351 'module' is in the context),
352 - update ir_model_fields with the fields found in _columns
353 (TODO there is some redundancy as _columns is updated from
354 ir_model_fields in __init__).
359 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
361 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
362 model_id = cr.fetchone()[0]
363 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
365 model_id = cr.fetchone()[0]
366 if 'module' in context:
367 name_id = 'model_'+self._name.replace('.', '_')
368 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
370 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
371 (name_id, context['module'], 'ir.model', model_id)
374 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
376 for rec in cr.dictfetchall():
377 cols[rec['name']] = rec
379 ir_model_fields_obj = self.pool.get('ir.model.fields')
381 # sparse field should be created at the end, as it depends on its serialized field already existing
382 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
383 for (k, f) in model_fields:
385 'model_id': model_id,
388 'field_description': f.string,
390 'relation': f._obj or '',
391 'select_level': tools.ustr(int(f.select)),
392 'readonly': (f.readonly and 1) or 0,
393 'required': (f.required and 1) or 0,
394 'selectable': (f.selectable and 1) or 0,
395 'translate': (f.translate and 1) or 0,
396 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
397 'serialization_field_id': None,
399 if getattr(f, 'serialization_field', None):
400 # resolve link to serialization_field if specified by name
401 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
402 if not serialization_field_id:
403 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
404 vals['serialization_field_id'] = serialization_field_id[0]
406 # When its a custom field,it does not contain f.select
407 if context.get('field_state', 'base') == 'manual':
408 if context.get('field_name', '') == k:
409 vals['select_level'] = context.get('select', '0')
410 #setting value to let the problem NOT occur next time
412 vals['select_level'] = cols[k]['select_level']
415 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
416 id = cr.fetchone()[0]
418 cr.execute("""INSERT INTO ir_model_fields (
419 id, model_id, model, name, field_description, ttype,
420 relation,state,select_level,relation_field, translate, serialization_field_id
422 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
424 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
425 vals['relation'], 'base',
426 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
428 if 'module' in context:
429 name1 = 'field_' + self._table + '_' + k
430 cr.execute("select name from ir_model_data where name=%s", (name1,))
432 name1 = name1 + "_" + str(id)
433 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
434 (name1, context['module'], 'ir.model.fields', id)
437 for key, val in vals.items():
438 if cols[k][key] != vals[key]:
439 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
440 cr.execute("""UPDATE ir_model_fields SET
441 model_id=%s, field_description=%s, ttype=%s, relation=%s,
442 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
444 model=%s AND name=%s""", (
445 vals['model_id'], vals['field_description'], vals['ttype'],
447 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
450 self.invalidate_cache(cr, SUPERUSER_ID)
453 def _add_field(cls, name, field):
454 """ Add the given `field` under the given `name` in the class """
455 field.set_class_name(cls, name)
457 # add field in _fields (for reflection)
458 cls._fields[name] = field
460 # add field as an attribute, unless another kind of value already exists
461 if isinstance(getattr(cls, name, field), Field):
462 setattr(cls, name, field)
464 _logger.warning("In model %r, member %r is not a field", cls._name, name)
467 cls._columns[name] = field.to_column()
469 # remove potential column that may be overridden by field
470 cls._columns.pop(name, None)
473 def _add_magic_fields(cls):
474 """ Introduce magic fields on the current class
476 * id is a "normal" field (with a specific getter)
477 * create_uid, create_date, write_uid and write_date have become
479 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
480 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
481 to get the same structure as the previous
482 ``(now() at time zone 'UTC')::timestamp``::
484 # select (now() at time zone 'UTC')::timestamp;
486 ----------------------------
487 2013-06-18 08:30:37.292809
489 >>> str(datetime.datetime.utcnow())
490 '2013-06-18 08:31:32.821177'
492 def add(name, field):
493 """ add `field` with the given `name` if it does not exist yet """
494 if name not in cls._columns and name not in cls._fields:
495 cls._add_field(name, field)
500 # this field 'id' must override any other column or field
501 cls._add_field('id', fields.Id(automatic=True))
503 add('display_name', fields.Char(string='Name',
504 compute='_compute_display_name', inverse='_inverse_display_name',
505 search='_search_display_name', automatic=True))
508 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
509 add('create_date', fields.Datetime(string='Created on', automatic=True))
510 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
511 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
512 last_modified_name = 'compute_concurrency_field_with_access'
514 last_modified_name = 'compute_concurrency_field'
516 # this field must override any other column or field
517 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
518 string='Last Modified on', compute=last_modified_name, automatic=True))
521 def compute_concurrency_field(self):
522 self[self.CONCURRENCY_CHECK_FIELD] = \
523 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
526 @api.depends('create_date', 'write_date')
527 def compute_concurrency_field_with_access(self):
528 self[self.CONCURRENCY_CHECK_FIELD] = \
529 self.write_date or self.create_date or \
530 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
533 # Goal: try to apply inheritance at the instanciation level and
534 # put objects in the pool var
537 def _build_model(cls, pool, cr):
538 """ Instanciate a given model.
540 This class method instanciates the class of some model (i.e. a class
541 deriving from osv or osv_memory). The class might be the class passed
542 in argument or, if it inherits from another class, a class constructed
543 by combining the two classes.
547 # IMPORTANT: the registry contains an instance for each model. The class
548 # of each model carries inferred metadata that is shared among the
549 # model's instances for this registry, but not among registries. Hence
550 # we cannot use that "registry class" for combining model classes by
551 # inheritance, since it confuses the metadata inference process.
553 # Keep links to non-inherited constraints in cls; this is useful for
554 # instance when exporting translations
555 cls._local_constraints = cls.__dict__.get('_constraints', [])
556 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
558 # determine inherited models
559 parents = getattr(cls, '_inherit', [])
560 parents = [parents] if isinstance(parents, basestring) else (parents or [])
562 # determine the model's name
563 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
565 # determine the module that introduced the model
566 original_module = pool[name]._original_module if name in parents else cls._module
568 # build the class hierarchy for the model
569 for parent in parents:
570 if parent not in pool:
571 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
572 'You may need to add a dependency on the parent class\' module.' % (name, parent))
573 parent_model = pool[parent]
575 # do no use the class of parent_model, since that class contains
576 # inferred metadata; use its ancestor instead
577 parent_class = type(parent_model).__base__
579 # don't inherit custom fields
580 columns = dict((key, val)
581 for key, val in parent_class._columns.iteritems()
584 columns.update(cls._columns)
586 defaults = dict(parent_class._defaults)
587 defaults.update(cls._defaults)
589 inherits = dict(parent_class._inherits)
590 inherits.update(cls._inherits)
592 depends = dict(parent_class._depends)
593 for m, fs in cls._depends.iteritems():
594 depends.setdefault(m, []).extend(fs)
596 old_constraints = parent_class._constraints
597 new_constraints = cls._constraints
598 # filter out from old_constraints the ones overridden by a
599 # constraint with the same function name in new_constraints
600 constraints = new_constraints + [oldc
601 for oldc in old_constraints
602 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
603 for newc in new_constraints)
606 sql_constraints = cls._sql_constraints + \
607 parent_class._sql_constraints
613 '_defaults': defaults,
614 '_inherits': inherits,
616 '_constraints': constraints,
617 '_sql_constraints': sql_constraints,
619 cls = type(name, (cls, parent_class), attrs)
621 # introduce the "registry class" of the model;
622 # duplicate some attributes so that the ORM can modify them
626 '_columns': dict(cls._columns),
627 '_defaults': dict(cls._defaults),
628 '_inherits': dict(cls._inherits),
629 '_depends': dict(cls._depends),
630 '_constraints': list(cls._constraints),
631 '_sql_constraints': list(cls._sql_constraints),
632 '_original_module': original_module,
634 cls = type(cls._name, (cls,), attrs)
636 # float fields are registry-dependent (digit attribute); duplicate them
638 for key, col in cls._columns.items():
639 if col._type == 'float':
640 cls._columns[key] = copy.copy(col)
642 # instantiate the model, and initialize it
643 model = object.__new__(cls)
644 model.__init__(pool, cr)
648 def _init_function_fields(cls, pool, cr):
649 # initialize the list of non-stored function fields for this model
650 pool._pure_function_fields[cls._name] = []
652 # process store of low-level function fields
653 for fname, column in cls._columns.iteritems():
654 if hasattr(column, 'digits_change'):
655 column.digits_change(cr)
656 # filter out existing store about this field
657 pool._store_function[cls._name] = [
659 for stored in pool._store_function.get(cls._name, [])
660 if (stored[0], stored[1]) != (cls._name, fname)
662 if not isinstance(column, fields.function):
665 # register it on the pool for invalidation
666 pool._pure_function_fields[cls._name].append(fname)
668 # process store parameter
671 get_ids = lambda self, cr, uid, ids, c={}: ids
672 store = {cls._name: (get_ids, None, column.priority, None)}
673 for model, spec in store.iteritems():
675 (fnct, fields2, order, length) = spec
677 (fnct, fields2, order) = spec
680 raise except_orm('Error',
681 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
682 pool._store_function.setdefault(model, [])
683 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
684 if t not in pool._store_function[model]:
685 pool._store_function[model].append(t)
686 pool._store_function[model].sort(key=lambda x: x[4])
689 def _init_manual_fields(cls, pool, cr):
690 # Check whether the query is already done
691 if pool.fields_by_model is not None:
692 manual_fields = pool.fields_by_model.get(cls._name, [])
694 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
695 manual_fields = cr.dictfetchall()
697 for field in manual_fields:
698 if field['name'] in cls._columns:
701 'string': field['field_description'],
702 'required': bool(field['required']),
703 'readonly': bool(field['readonly']),
704 'domain': eval(field['domain']) if field['domain'] else None,
705 'size': field['size'] or None,
706 'ondelete': field['on_delete'],
707 'translate': (field['translate']),
710 #'select': int(field['select_level'])
712 if field['serialization_field_id']:
713 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
714 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
715 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
716 attrs.update({'relation': field['relation']})
717 cls._columns[field['name']] = fields.sparse(**attrs)
718 elif field['ttype'] == 'selection':
719 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
720 elif field['ttype'] == 'reference':
721 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
722 elif field['ttype'] == 'many2one':
723 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
724 elif field['ttype'] == 'one2many':
725 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
726 elif field['ttype'] == 'many2many':
727 _rel1 = field['relation'].replace('.', '_')
728 _rel2 = field['model'].replace('.', '_')
729 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
730 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
732 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
735 def _init_constraints_onchanges(cls):
736 # store sql constraint error messages
737 for (key, _, msg) in cls._sql_constraints:
738 cls.pool._sql_error[cls._table + '_' + key] = msg
740 # collect constraint and onchange methods
741 cls._constraint_methods = []
742 cls._onchange_methods = defaultdict(list)
743 for attr, func in getmembers(cls, callable):
744 if hasattr(func, '_constrains'):
745 if not all(name in cls._fields for name in func._constrains):
746 _logger.warning("@constrains%r parameters must be field names", func._constrains)
747 cls._constraint_methods.append(func)
748 if hasattr(func, '_onchange'):
749 if not all(name in cls._fields for name in func._onchange):
750 _logger.warning("@onchange%r parameters must be field names", func._onchange)
751 for name in func._onchange:
752 cls._onchange_methods[name].append(func)
755 # In the past, this method was registering the model class in the server.
756 # This job is now done entirely by the metaclass MetaModel.
758 # Do not create an instance here. Model instances are created by method
762 def __init__(self, pool, cr):
763 """ Initialize a model and make it part of the given registry.
765 - copy the stored fields' functions in the registry,
766 - retrieve custom fields and add them in the model,
767 - ensure there is a many2one for each _inherits'd parent,
768 - update the children's _columns,
769 - give a chance to each field to initialize itself.
774 # link the class to the registry, and update the registry
776 cls._model = self # backward compatibility
777 pool.add(cls._name, self)
779 # determine description, table, sequence and log_access
780 if not cls._description:
781 cls._description = cls._name
783 cls._table = cls._name.replace('.', '_')
784 if not cls._sequence:
785 cls._sequence = cls._table + '_id_seq'
786 if not hasattr(cls, '_log_access'):
787 # If _log_access is not specified, it is the same value as _auto.
788 cls._log_access = cls._auto
791 if cls.is_transient():
792 cls._transient_check_count = 0
793 cls._transient_max_count = config.get('osv_memory_count_limit')
794 cls._transient_max_hours = config.get('osv_memory_age_limit')
795 assert cls._log_access, \
796 "TransientModels must have log_access turned on, " \
797 "in order to implement their access rights policy"
799 # retrieve new-style fields and duplicate them (to avoid clashes with
800 # inheritance between different models)
802 for attr, field in getmembers(cls, Field.__instancecheck__):
803 if not field._origin:
804 cls._add_field(attr, field.copy())
806 # introduce magic fields
807 cls._add_magic_fields()
809 # register stuff about low-level function fields and custom fields
810 cls._init_function_fields(pool, cr)
811 cls._init_manual_fields(pool, cr)
814 cls._inherits_check()
815 cls._inherits_reload()
817 # register constraints and onchange methods
818 cls._init_constraints_onchanges()
821 for k in cls._defaults:
822 assert k in cls._fields, \
823 "Model %s has a default for nonexiting field %s" % (cls._name, k)
826 for column in cls._columns.itervalues():
831 assert cls._rec_name in cls._fields, \
832 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
833 elif 'name' in cls._fields:
834 cls._rec_name = 'name'
836 # prepare ormcache, which must be shared by all instances of the model
839 def __export_xml_id(self):
840 """ Return a valid xml_id for the record `self`. """
841 ir_model_data = self.sudo().env['ir.model.data']
842 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
845 return '%s.%s' % (data.module, data.name)
850 name = '%s_%s' % (self._table, self.id)
851 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
853 name = '%s_%s_%s' % (self._table, self.id, postfix)
854 ir_model_data.create({
857 'module': '__export__',
860 return '__export__.' + name
863 def __export_rows(self, fields):
864 """ Export fields of the records in `self`.
866 :param fields: list of lists of fields to traverse
867 :return: list of lists of corresponding values
871 # main line of record, initially empty
872 current = [''] * len(fields)
873 lines.append(current)
875 # list of primary fields followed by secondary field(s)
878 # process column by column
879 for i, path in enumerate(fields):
884 if name in primary_done:
888 current[i] = str(record.id)
890 current[i] = record.__export_xml_id()
892 field = record._fields[name]
895 # this part could be simpler, but it has to be done this way
896 # in order to reproduce the former behavior
897 if not isinstance(value, BaseModel):
898 current[i] = field.convert_to_export(value, self.env)
900 primary_done.append(name)
902 # This is a special case, its strange behavior is intended!
903 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
904 xml_ids = [r.__export_xml_id() for r in value]
905 current[i] = ','.join(xml_ids) or False
908 # recursively export the fields that follow name
909 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
910 lines2 = value.__export_rows(fields2)
912 # merge first line with record's main line
913 for j, val in enumerate(lines2[0]):
916 # check value of current field
918 # assign xml_ids, and forget about remaining lines
919 xml_ids = [item[1] for item in value.name_get()]
920 current[i] = ','.join(xml_ids)
922 # append the other lines at the end
930 def export_data(self, fields_to_export, raw_data=False):
931 """ Export fields for selected objects
933 :param fields_to_export: list of fields
934 :param raw_data: True to return value in native Python type
935 :rtype: dictionary with a *datas* matrix
937 This method is used when exporting data via client menu
939 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
941 self = self.with_context(export_raw_data=True)
942 return {'datas': self.__export_rows(fields_to_export)}
944 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
947 Use :meth:`~load` instead
949 Import given data in given module
951 This method is used when importing data via client menu.
953 Example of fields to import for a sale.order::
956 partner_id, (=name_search)
957 order_line/.id, (=database_id)
959 order_line/product_id/id, (=xml id)
960 order_line/price_unit,
961 order_line/product_uom_qty,
962 order_line/product_uom/id (=xml_id)
964 This method returns a 4-tuple with the following structure::
966 (return_code, errored_resource, error_message, unused)
968 * The first item is a return code, it is ``-1`` in case of
969 import error, or the last imported row number in case of success
970 * The second item contains the record data dict that failed to import
971 in case of error, otherwise it's 0
972 * The third item contains an error message string in case of error,
974 * The last item is currently unused, with no specific semantics
976 :param fields: list of fields to import
977 :param datas: data to import
978 :param mode: 'init' or 'update' for record creation
979 :param current_module: module name
980 :param noupdate: flag for record creation
981 :param filename: optional file to store partial import state for recovery
982 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
983 :rtype: (int, dict or 0, str or 0, str or 0)
985 context = dict(context) if context is not None else {}
986 context['_import_current_module'] = current_module
988 fields = map(fix_import_export_id_paths, fields)
989 ir_model_data_obj = self.pool.get('ir.model.data')
992 if m['type'] == 'error':
993 raise Exception(m['message'])
995 if config.get('import_partial') and filename:
996 with open(config.get('import_partial'), 'rb') as partial_import_file:
997 data = pickle.load(partial_import_file)
998 position = data.get(filename, 0)
1002 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1003 self._extract_records(cr, uid, fields, datas,
1004 context=context, log=log),
1005 context=context, log=log):
1006 ir_model_data_obj._update(cr, uid, self._name,
1007 current_module, res, mode=mode, xml_id=xml_id,
1008 noupdate=noupdate, res_id=res_id, context=context)
1009 position = info.get('rows', {}).get('to', 0) + 1
1010 if config.get('import_partial') and filename and (not (position%100)):
1011 with open(config.get('import_partial'), 'rb') as partial_import:
1012 data = pickle.load(partial_import)
1013 data[filename] = position
1014 with open(config.get('import_partial'), 'wb') as partial_import:
1015 pickle.dump(data, partial_import)
1016 if context.get('defer_parent_store_computation'):
1017 self._parent_store_compute(cr)
1019 except Exception, e:
1021 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1023 if context.get('defer_parent_store_computation'):
1024 self._parent_store_compute(cr)
1025 return position, 0, 0, 0
1027 def load(self, cr, uid, fields, data, context=None):
1029 Attempts to load the data matrix, and returns a list of ids (or
1030 ``False`` if there was an error and no id could be generated) and a
1033 The ids are those of the records created and saved (in database), in
1034 the same order they were extracted from the file. They can be passed
1035 directly to :meth:`~read`
1037 :param fields: list of fields to import, at the same index as the corresponding data
1038 :type fields: list(str)
1039 :param data: row-major matrix of data to import
1040 :type data: list(list(str))
1041 :param dict context:
1042 :returns: {ids: list(int)|False, messages: [Message]}
1044 cr.execute('SAVEPOINT model_load')
1047 fields = map(fix_import_export_id_paths, fields)
1048 ModelData = self.pool['ir.model.data'].clear_caches()
1050 fg = self.fields_get(cr, uid, context=context)
1057 for id, xid, record, info in self._convert_records(cr, uid,
1058 self._extract_records(cr, uid, fields, data,
1059 context=context, log=messages.append),
1060 context=context, log=messages.append):
1062 cr.execute('SAVEPOINT model_load_save')
1063 except psycopg2.InternalError, e:
1064 # broken transaction, exit and hope the source error was
1066 if not any(message['type'] == 'error' for message in messages):
1067 messages.append(dict(info, type='error',message=
1068 u"Unknown database error: '%s'" % e))
1071 ids.append(ModelData._update(cr, uid, self._name,
1072 current_module, record, mode=mode, xml_id=xid,
1073 noupdate=noupdate, res_id=id, context=context))
1074 cr.execute('RELEASE SAVEPOINT model_load_save')
1075 except psycopg2.Warning, e:
1076 messages.append(dict(info, type='warning', message=str(e)))
1077 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1078 except psycopg2.Error, e:
1079 messages.append(dict(
1081 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1082 # Failed to write, log to messages, rollback savepoint (to
1083 # avoid broken transaction) and keep going
1084 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1085 if any(message['type'] == 'error' for message in messages):
1086 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1088 return {'ids': ids, 'messages': messages}
1090 def _extract_records(self, cr, uid, fields_, data,
1091 context=None, log=lambda a: None):
1092 """ Generates record dicts from the data sequence.
1094 The result is a generator of dicts mapping field names to raw
1095 (unconverted, unvalidated) values.
1097 For relational fields, if sub-fields were provided the value will be
1098 a list of sub-records
1100 The following sub-fields may be set on the record (by key):
1101 * None is the name_get for the record (to use with name_create/name_search)
1102 * "id" is the External ID for the record
1103 * ".id" is the Database ID for the record
1105 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1106 # Fake columns to avoid special cases in extractor
1107 columns[None] = fields.char('rec_name')
1108 columns['id'] = fields.char('External ID')
1109 columns['.id'] = fields.integer('Database ID')
1111 # m2o fields can't be on multiple lines so exclude them from the
1112 # is_relational field rows filter, but special-case it later on to
1113 # be handled with relational fields (as it can have subfields)
1114 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1115 get_o2m_values = itemgetter_tuple(
1116 [index for index, field in enumerate(fields_)
1117 if columns[field[0]]._type == 'one2many'])
1118 get_nono2m_values = itemgetter_tuple(
1119 [index for index, field in enumerate(fields_)
1120 if columns[field[0]]._type != 'one2many'])
1121 # Checks if the provided row has any non-empty non-relational field
1122 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1123 return any(g(row)) and not any(f(row))
1127 if index >= len(data): return
1130 # copy non-relational fields to record dict
1131 record = dict((field[0], value)
1132 for field, value in itertools.izip(fields_, row)
1133 if not is_relational(field[0]))
1135 # Get all following rows which have relational values attached to
1136 # the current record (no non-relational values)
1137 record_span = itertools.takewhile(
1138 only_o2m_values, itertools.islice(data, index + 1, None))
1139 # stitch record row back on for relational fields
1140 record_span = list(itertools.chain([row], record_span))
1141 for relfield in set(
1142 field[0] for field in fields_
1143 if is_relational(field[0])):
1144 column = columns[relfield]
1145 # FIXME: how to not use _obj without relying on fields_get?
1146 Model = self.pool[column._obj]
1148 # get only cells for this sub-field, should be strictly
1149 # non-empty, field path [None] is for name_get column
1150 indices, subfields = zip(*((index, field[1:] or [None])
1151 for index, field in enumerate(fields_)
1152 if field[0] == relfield))
1154 # return all rows which have at least one value for the
1155 # subfields of relfield
1156 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1157 record[relfield] = [subrecord
1158 for subrecord, _subinfo in Model._extract_records(
1159 cr, uid, subfields, relfield_data,
1160 context=context, log=log)]
1162 yield record, {'rows': {
1164 'to': index + len(record_span) - 1
1166 index += len(record_span)
1168 def _convert_records(self, cr, uid, records,
1169 context=None, log=lambda a: None):
1170 """ Converts records from the source iterable (recursive dicts of
1171 strings) into forms which can be written to the database (via
1172 self.create or (ir.model.data)._update)
1174 :returns: a list of triplets of (id, xid, record)
1175 :rtype: list((int|None, str|None, dict))
1177 if context is None: context = {}
1178 Converter = self.pool['ir.fields.converter']
1179 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1180 Translation = self.pool['ir.translation']
1182 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1183 context.get('lang'))
1185 for f, column in columns.iteritems())
1187 convert = Converter.for_model(cr, uid, self, context=context)
1189 def _log(base, field, exception):
1190 type = 'warning' if isinstance(exception, Warning) else 'error'
1191 # logs the logical (not human-readable) field name for automated
1192 # processing of response, but injects human readable in message
1193 record = dict(base, type=type, field=field,
1194 message=unicode(exception.args[0]) % base)
1195 if len(exception.args) > 1 and exception.args[1]:
1196 record.update(exception.args[1])
1199 stream = CountingStream(records)
1200 for record, extras in stream:
1203 # name_get/name_create
1204 if None in record: pass
1211 dbid = int(record['.id'])
1213 # in case of overridden id column
1214 dbid = record['.id']
1215 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1218 record=stream.index,
1220 message=_(u"Unknown database identifier '%s'") % dbid))
1223 converted = convert(record, lambda field, err:\
1224 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1226 yield dbid, xid, converted, dict(extras, record=stream.index)
1229 def _validate_fields(self, field_names):
1230 field_names = set(field_names)
1232 # old-style constraint methods
1233 trans = self.env['ir.translation']
1234 cr, uid, context = self.env.args
1237 for fun, msg, names in self._constraints:
1239 # validation must be context-independent; call `fun` without context
1240 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1242 except Exception, e:
1243 _logger.debug('Exception while validating constraint', exc_info=True)
1245 extra_error = tools.ustr(e)
1248 res_msg = msg(self._model, cr, uid, ids, context=context)
1249 if isinstance(res_msg, tuple):
1250 template, params = res_msg
1251 res_msg = template % params
1253 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1255 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1257 _("Field(s) `%s` failed against a constraint: %s") %
1258 (', '.join(names), res_msg)
1261 raise except_orm('ValidateError', '\n'.join(errors))
1263 # new-style constraint methods
1264 for check in self._constraint_methods:
1265 if set(check._constrains) & field_names:
1268 def default_get(self, cr, uid, fields_list, context=None):
1269 """ Return default values for the fields in `fields_list`. Default
1270 values are determined by the context, user defaults, and the model
1273 :param fields_list: a list of field names
1274 :return: a dictionary mapping each field name to its corresponding
1275 default value; the keys of the dictionary are the fields in
1276 `fields_list` that have a default value different from ``False``.
1278 This method should not be overridden. In order to change the
1279 mechanism for determining default values, you should override method
1280 :meth:`add_default_value` instead.
1282 # trigger view init hook
1283 self.view_init(cr, uid, fields_list, context)
1285 # use a new record to determine default values
1286 record = self.new(cr, uid, {}, context=context)
1287 for name in fields_list:
1288 if name in self._fields:
1289 record[name] # force evaluation of defaults
1291 # retrieve defaults from record's cache
1292 return self._convert_to_write(record._cache)
1294 def add_default_value(self, field):
1295 """ Set the default value of `field` to the new record `self`.
1296 The value must be assigned to `self`.
1298 assert not self.id, "Expected new record: %s" % self
1299 cr, uid, context = self.env.args
1302 # 1. look up context
1303 key = 'default_' + name
1305 self[name] = context[key]
1308 # 2. look up ir_values
1309 # Note: performance is good, because get_defaults_dict is cached!
1310 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1311 if name in ir_values_dict:
1312 self[name] = ir_values_dict[name]
1315 # 3. look up property fields
1316 # TODO: get rid of this one
1317 column = self._columns.get(name)
1318 if isinstance(column, fields.property):
1319 self[name] = self.env['ir.property'].get(name, self._name)
1322 # 4. look up _defaults
1323 if name in self._defaults:
1324 value = self._defaults[name]
1326 value = value(self._model, cr, uid, context)
1330 # 5. delegate to field
1331 field.determine_default(self)
1333 def fields_get_keys(self, cr, user, context=None):
1334 res = self._columns.keys()
1335 # TODO I believe this loop can be replace by
1336 # res.extend(self._inherit_fields.key())
1337 for parent in self._inherits:
1338 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1341 def _rec_name_fallback(self, cr, uid, context=None):
1342 rec_name = self._rec_name
1343 if rec_name not in self._columns:
1344 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1348 # Overload this method if you need a window title which depends on the context
1350 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1353 def user_has_groups(self, cr, uid, groups, context=None):
1354 """Return true if the user is at least member of one of the groups
1355 in groups_str. Typically used to resolve `groups` attribute
1356 in view and model definitions.
1358 :param str groups: comma-separated list of fully-qualified group
1359 external IDs, e.g.: ``base.group_user,base.group_system``
1360 :return: True if the current user is a member of one of the
1363 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1364 for group_ext_id in groups.split(','))
1366 def _get_default_form_view(self, cr, user, context=None):
1367 """ Generates a default single-line form view using all fields
1368 of the current model except the m2m and o2m ones.
1370 :param cr: database cursor
1371 :param int user: user id
1372 :param dict context: connection context
1373 :returns: a form view as an lxml document
1374 :rtype: etree._Element
1376 view = etree.Element('form', string=self._description)
1377 group = etree.SubElement(view, 'group', col="4")
1378 for fname, field in self._fields.iteritems():
1379 if field.automatic or field.type in ('one2many', 'many2many'):
1382 etree.SubElement(group, 'field', name=fname)
1383 if field.type == 'text':
1384 etree.SubElement(group, 'newline')
1387 def _get_default_search_view(self, cr, user, context=None):
1388 """ Generates a single-field search view, based on _rec_name.
1390 :param cr: database cursor
1391 :param int user: user id
1392 :param dict context: connection context
1393 :returns: a tree view as an lxml document
1394 :rtype: etree._Element
1396 view = etree.Element('search', string=self._description)
1397 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1400 def _get_default_tree_view(self, cr, user, context=None):
1401 """ Generates a single-field tree view, based on _rec_name.
1403 :param cr: database cursor
1404 :param int user: user id
1405 :param dict context: connection context
1406 :returns: a tree view as an lxml document
1407 :rtype: etree._Element
1409 view = etree.Element('tree', string=self._description)
1410 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1413 def _get_default_calendar_view(self, cr, user, context=None):
1414 """ Generates a default calendar view by trying to infer
1415 calendar fields from a number of pre-set attribute names
1417 :param cr: database cursor
1418 :param int user: user id
1419 :param dict context: connection context
1420 :returns: a calendar view
1421 :rtype: etree._Element
1423 def set_first_of(seq, in_, to):
1424 """Sets the first value of `seq` also found in `in_` to
1425 the `to` attribute of the view being closed over.
1427 Returns whether it's found a suitable value (and set it on
1428 the attribute) or not
1436 view = etree.Element('calendar', string=self._description)
1437 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1439 if self._date_name not in self._columns:
1441 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1442 if dt in self._columns:
1443 self._date_name = dt
1448 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1449 view.set('date_start', self._date_name)
1451 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1452 self._columns, 'color')
1454 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1455 self._columns, 'date_stop'):
1456 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1457 self._columns, 'date_delay'):
1459 _('Invalid Object Architecture!'),
1460 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1464 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1466 Get the detailed composition of the requested view like fields, model, view architecture
1468 :param view_id: id of the view or None
1469 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1470 :param toolbar: true to include contextual actions
1471 :param submenu: deprecated
1472 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1473 :raise AttributeError:
1474 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1475 * if some tag other than 'position' is found in parent view
1476 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1480 View = self.pool['ir.ui.view']
1483 'model': self._name,
1484 'field_parent': False,
1487 # try to find a view_id if none provided
1489 # <view_type>_view_ref in context can be used to overrride the default view
1490 view_ref_key = view_type + '_view_ref'
1491 view_ref = context.get(view_ref_key)
1494 module, view_ref = view_ref.split('.', 1)
1495 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1496 view_ref_res = cr.fetchone()
1498 view_id = view_ref_res[0]
1500 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1501 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1505 # otherwise try to find the lowest priority matching ir.ui.view
1506 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1508 # context for post-processing might be overriden
1511 # read the view with inherited views applied
1512 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1513 result['arch'] = root_view['arch']
1514 result['name'] = root_view['name']
1515 result['type'] = root_view['type']
1516 result['view_id'] = root_view['id']
1517 result['field_parent'] = root_view['field_parent']
1518 # override context fro postprocessing
1519 if root_view.get('model') != self._name:
1520 ctx = dict(context, base_model_name=root_view.get('model'))
1522 # fallback on default views methods if no ir.ui.view could be found
1524 get_func = getattr(self, '_get_default_%s_view' % view_type)
1525 arch_etree = get_func(cr, uid, context)
1526 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1527 result['type'] = view_type
1528 result['name'] = 'default'
1529 except AttributeError:
1530 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1532 # Apply post processing, groups and modifiers etc...
1533 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1534 result['arch'] = xarch
1535 result['fields'] = xfields
1537 # Add related action information if aksed
1539 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1545 ir_values_obj = self.pool.get('ir.values')
1546 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1547 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1548 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1549 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1550 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1551 #When multi="True" set it will display only in More of the list view
1552 resrelate = [clean(action) for action in resrelate
1553 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1555 for x in itertools.chain(resprint, resaction, resrelate):
1556 x['string'] = x['name']
1558 result['toolbar'] = {
1560 'action': resaction,
1565 def get_formview_id(self, cr, uid, id, context=None):
1566 """ Return an view id to open the document with. This method is meant to be
1567 overridden in addons that want to give specific view ids for example.
1569 :param int id: id of the document to open
1573 def get_formview_action(self, cr, uid, id, context=None):
1574 """ Return an action to open the document. This method is meant to be
1575 overridden in addons that want to give specific view ids for example.
1577 :param int id: id of the document to open
1579 view_id = self.get_formview_id(cr, uid, id, context=context)
1581 'type': 'ir.actions.act_window',
1582 'res_model': self._name,
1583 'view_type': 'form',
1584 'view_mode': 'form',
1585 'views': [(view_id, 'form')],
1586 'target': 'current',
1590 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1591 return self.pool['ir.ui.view'].postprocess_and_fields(
1592 cr, uid, self._name, node, view_id, context=context)
1594 def search_count(self, cr, user, args, context=None):
1595 res = self.search(cr, user, args, context=context, count=True)
1596 if isinstance(res, list):
1600 @api.returns('self')
1601 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1603 Search for records based on a search domain.
1605 :param cr: database cursor
1606 :param user: current user id
1607 :param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
1608 :param offset: optional number of results to skip in the returned values (default: 0)
1609 :param limit: optional max number of records to return (default: **None**)
1610 :param order: optional columns to sort by (default: self._order=id )
1611 :param context: optional context arguments, like lang, time zone
1612 :type context: dictionary
1613 :param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
1614 :return: id or list of ids of records matching the criteria
1615 :rtype: integer or list of integers
1616 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1618 **Expressing a search domain (args)**
1620 Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
1622 * **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
1623 * **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
1624 The semantics of most of these operators are obvious.
1625 The ``child_of`` operator will look for records who are children or grand-children of a given record,
1626 according to the semantics of this model (i.e following the relationship field named by
1627 ``self._parent_name``, by default ``parent_id``.
1628 * **value** must be a valid value to compare with the values of **field_name**, depending on its type.
1630 Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
1631 These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
1632 Be very careful about this when you combine them the first time.
1634 Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
1636 [('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
1638 The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
1640 (name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
1643 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1646 # display_name, name_get, name_create, name_search
1649 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1650 def _compute_display_name(self):
1651 name = self._rec_name
1652 if name in self._fields:
1653 convert = self._fields[name].convert_to_display_name
1655 record.display_name = convert(record[name])
1658 record.display_name = "%s,%s" % (record._name, record.id)
1660 def _inverse_display_name(self):
1661 name = self._rec_name
1662 if name in self._fields and not self._fields[name].relational:
1664 record[name] = record.display_name
1666 _logger.warning("Cannot inverse field display_name on %s", self._name)
1668 def _search_display_name(self, operator, value):
1669 name = self._rec_name
1670 if name in self._fields:
1671 return [(name, operator, value)]
1673 _logger.warning("Cannot search field display_name on %s", self._name)
1674 return [(0, '=', 1)]
1678 """ Return a textual representation for the records in `self`.
1679 By default this is the value of field ``display_name``.
1682 :return: list of pairs ``(id, text_repr)`` for all records
1687 result.append((record.id, record.display_name))
1688 except MissingError:
1693 def name_create(self, name):
1694 """ Create a new record by calling :meth:`~.create` with only one value
1695 provided: the display name of the new record.
1697 The new record will be initialized with any default values
1698 applicable to this model, or provided through the context. The usual
1699 behavior of :meth:`~.create` applies.
1701 :param name: display name of the record to create
1703 :return: the :meth:`~.name_get` pair value of the created record
1705 # Shortcut the inverse function of 'display_name' with self._rec_name.
1706 # This is useful when self._rec_name is a required field: in that case,
1707 # create() creates a record without the field, and inverse display_name
1709 field_name = self._rec_name if self._rec_name else 'display_name'
1710 record = self.create({field_name: name})
1711 return (record.id, record.display_name)
1714 def name_search(self, name='', args=None, operator='ilike', limit=100):
1715 """ Search for records that have a display name matching the given
1716 `name` pattern when compared with the given `operator`, while also
1717 matching the optional search domain (`args`).
1719 This is used for example to provide suggestions based on a partial
1720 value for a relational field. Sometimes be seen as the inverse
1721 function of :meth:`~.name_get`, but it is not guaranteed to be.
1723 This method is equivalent to calling :meth:`~.search` with a search
1724 domain based on `display_name` and then :meth:`~.name_get` on the
1725 result of the search.
1727 :param name: the name pattern to match
1728 :param list args: optional search domain (see :meth:`~.search` for
1729 syntax), specifying further restrictions
1730 :param str operator: domain operator for matching `name`, such as
1731 ``'like'`` or ``'='``.
1732 :param int limit: optional max number of records to return
1734 :return: list of pairs ``(id, text_repr)`` for all matching records.
1736 args = list(args or [])
1737 if not (name == '' and operator == 'ilike'):
1738 args += [('display_name', operator, name)]
1739 return self.search(args, limit=limit).name_get()
1741 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1742 # private implementation of name_search, allows passing a dedicated user
1743 # for the name_get part to solve some access rights issues
1744 args = list(args or [])
1745 # optimize out the default criterion of ``ilike ''`` that matches everything
1746 if not (name == '' and operator == 'ilike'):
1747 args += [('display_name', operator, name)]
1748 access_rights_uid = name_get_uid or user
1749 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1750 res = self.name_get(cr, access_rights_uid, ids, context)
1753 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1756 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1758 fields = self._columns.keys() + self._inherit_fields.keys()
1759 #FIXME: collect all calls to _get_source into one SQL call.
1761 res[lang] = {'code': lang}
1763 if f in self._columns:
1764 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1766 res[lang][f] = res_trans
1768 res[lang][f] = self._columns[f].string
1769 for table in self._inherits:
1770 cols = intersect(self._inherit_fields.keys(), fields)
1771 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1774 res[lang]['code'] = lang
1775 for f in res2[lang]:
1776 res[lang][f] = res2[lang][f]
1779 def write_string(self, cr, uid, id, langs, vals, context=None):
1780 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1781 #FIXME: try to only call the translation in one SQL
1784 if field in self._columns:
1785 src = self._columns[field].string
1786 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1787 for table in self._inherits:
1788 cols = intersect(self._inherit_fields.keys(), vals)
1790 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1793 def _add_missing_default_values(self, cr, uid, values, context=None):
1794 # avoid overriding inherited values when parent is set
1796 for tables, parent_field in self._inherits.items():
1797 if parent_field in values:
1798 avoid_tables.append(tables)
1800 # compute missing fields
1801 missing_defaults = set()
1802 for field in self._columns.keys():
1803 if not field in values:
1804 missing_defaults.add(field)
1805 for field in self._inherit_fields.keys():
1806 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1807 missing_defaults.add(field)
1808 # discard magic fields
1809 missing_defaults -= set(MAGIC_COLUMNS)
1811 if missing_defaults:
1812 # override defaults with the provided values, never allow the other way around
1813 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1815 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1816 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1817 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1818 defaults[dv] = [(6, 0, defaults[dv])]
1819 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1820 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1821 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1822 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1823 defaults.update(values)
1827 def clear_caches(self):
1828 """ Clear the caches
1830 This clears the caches associated to methods decorated with
1831 ``tools.ormcache`` or ``tools.ormcache_multi``.
1834 self._ormcache.clear()
1835 self.pool._any_cache_cleared = True
1836 except AttributeError:
1840 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,
1841 read_group_result, read_group_order=None, context=None):
1842 """Helper method for filling in empty groups for all possible values of
1843 the field being grouped by"""
1845 # self._group_by_full should map groupable fields to a method that returns
1846 # a list of all aggregated values that we want to display for this field,
1847 # in the form of a m2o-like pair (key,label).
1848 # This is useful to implement kanban views for instance, where all columns
1849 # should be displayed even if they don't contain any record.
1851 # Grab the list of all groups that should be displayed, including all present groups
1852 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1853 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1854 read_group_order=read_group_order,
1855 access_rights_uid=openerp.SUPERUSER_ID,
1858 result_template = dict.fromkeys(aggregated_fields, False)
1859 result_template[groupby + '_count'] = 0
1860 if remaining_groupbys:
1861 result_template['__context'] = {'group_by': remaining_groupbys}
1863 # Merge the left_side (current results as dicts) with the right_side (all
1864 # possible values as m2o pairs). Both lists are supposed to be using the
1865 # same ordering, and can be merged in one pass.
1868 def append_left(left_side):
1869 grouped_value = left_side[groupby] and left_side[groupby][0]
1870 if not grouped_value in known_values:
1871 result.append(left_side)
1872 known_values[grouped_value] = left_side
1874 count_attr = groupby + '_count'
1875 known_values[grouped_value].update({count_attr: left_side[count_attr]})
1876 def append_right(right_side):
1877 grouped_value = right_side[0]
1878 if not grouped_value in known_values:
1879 line = dict(result_template)
1880 line[groupby] = right_side
1881 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1883 known_values[grouped_value] = line
1884 while read_group_result or all_groups:
1885 left_side = read_group_result[0] if read_group_result else None
1886 right_side = all_groups[0] if all_groups else None
1887 assert left_side is None or left_side[groupby] is False \
1888 or isinstance(left_side[groupby], (tuple,list)), \
1889 'M2O-like pair expected, got %r' % left_side[groupby]
1890 assert right_side is None or isinstance(right_side, (tuple,list)), \
1891 'M2O-like pair expected, got %r' % right_side
1892 if left_side is None:
1893 append_right(all_groups.pop(0))
1894 elif right_side is None:
1895 append_left(read_group_result.pop(0))
1896 elif left_side[groupby] == right_side:
1897 append_left(read_group_result.pop(0))
1898 all_groups.pop(0) # discard right_side
1899 elif not left_side[groupby] or not left_side[groupby][0]:
1900 # left side == "Undefined" entry, not present on right_side
1901 append_left(read_group_result.pop(0))
1903 append_right(all_groups.pop(0))
1907 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1910 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1912 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1913 to the query if order should be computed against m2o field.
1914 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1915 :param aggregated_fields: list of aggregated fields in the query
1916 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1917 These dictionaries contains the qualified name of each groupby
1918 (fully qualified SQL name for the corresponding field),
1919 and the (non raw) field name.
1920 :param osv.Query query: the query under construction
1921 :return: (groupby_terms, orderby_terms)
1924 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1925 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1927 return groupby_terms, orderby_terms
1929 self._check_qorder(orderby)
1930 for order_part in orderby.split(','):
1931 order_split = order_part.split()
1932 order_field = order_split[0]
1933 if order_field in groupby_fields:
1935 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1936 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1938 orderby_terms.append(order_clause)
1939 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1941 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1942 orderby_terms.append(order)
1943 elif order_field in aggregated_fields:
1944 orderby_terms.append(order_part)
1946 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1947 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1948 self._name, order_part)
1949 return groupby_terms, orderby_terms
1951 def _read_group_process_groupby(self, gb, query, context):
1953 Helper method to collect important information about groupbys: raw
1954 field name, type, time informations, qualified name, ...
1956 split = gb.split(':')
1957 field_type = self._all_columns[split[0]].column._type
1958 gb_function = split[1] if len(split) == 2 else None
1959 temporal = field_type in ('date', 'datetime')
1960 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1961 qualified_field = self._inherits_join_calc(split[0], query)
1964 'day': 'dd MMM YYYY',
1965 'week': "'W'w YYYY",
1966 'month': 'MMMM YYYY',
1967 'quarter': 'QQQ YYYY',
1971 'day': dateutil.relativedelta.relativedelta(days=1),
1972 'week': datetime.timedelta(days=7),
1973 'month': dateutil.relativedelta.relativedelta(months=1),
1974 'quarter': dateutil.relativedelta.relativedelta(months=3),
1975 'year': dateutil.relativedelta.relativedelta(years=1)
1978 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
1979 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
1980 if field_type == 'boolean':
1981 qualified_field = "coalesce(%s,false)" % qualified_field
1986 'display_format': display_formats[gb_function or 'month'] if temporal else None,
1987 'interval': time_intervals[gb_function or 'month'] if temporal else None,
1988 'tz_convert': tz_convert,
1989 'qualified_field': qualified_field
1992 def _read_group_prepare_data(self, key, value, groupby_dict, context):
1994 Helper method to sanitize the data received by read_group. The None
1995 values are converted to False, and the date/datetime are formatted,
1996 and corrected according to the timezones.
1998 value = False if value is None else value
1999 gb = groupby_dict.get(key)
2000 if gb and gb['type'] in ('date', 'datetime') and value:
2001 if isinstance(value, basestring):
2002 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2003 value = datetime.datetime.strptime(value, dt_format)
2004 if gb['tz_convert']:
2005 value = pytz.timezone(context['tz']).localize(value)
2008 def _read_group_get_domain(self, groupby, value):
2010 Helper method to construct the domain corresponding to a groupby and
2011 a given value. This is mostly relevant for date/datetime.
2013 if groupby['type'] in ('date', 'datetime') and value:
2014 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2015 domain_dt_begin = value
2016 domain_dt_end = value + groupby['interval']
2017 if groupby['tz_convert']:
2018 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2019 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2020 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2021 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2022 if groupby['type'] == 'many2one' and value:
2024 return [(groupby['field'], '=', value)]
2026 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2028 Helper method to format the data contained in the dictianary data by
2029 adding the domain corresponding to its values, the groupbys in the
2030 context and by properly formatting the date/datetime values.
2032 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2033 for k,v in data.iteritems():
2034 gb = groupby_dict.get(k)
2035 if gb and gb['type'] in ('date', 'datetime') and v:
2036 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2038 data['__domain'] = domain_group + domain
2039 if len(groupby) - len(annotated_groupbys) >= 1:
2040 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2044 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2046 Get the list of records in list view grouped by the given ``groupby`` fields
2048 :param cr: database cursor
2049 :param uid: current user id
2050 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2051 :param list fields: list of fields present in the list view specified on the object
2052 :param list groupby: list of groupby descriptions by which the records will be grouped.
2053 A groupby description is either a field (then it will be grouped by that field)
2054 or a string 'field:groupby_function'. Right now, the only functions supported
2055 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2056 date/datetime fields.
2057 :param int offset: optional number of records to skip
2058 :param int limit: optional max number of records to return
2059 :param dict context: context arguments, like lang, time zone.
2060 :param list orderby: optional ``order by`` specification, for
2061 overriding the natural sort ordering of the
2062 groups, see also :py:meth:`~osv.osv.osv.search`
2063 (supported only for many2one fields currently)
2064 :param bool lazy: if true, the results are only grouped by the first groupby and the
2065 remaining groupbys are put in the __context key. If false, all the groupbys are
2067 :return: list of dictionaries(one dictionary for each record) containing:
2069 * the values of fields grouped by the fields in ``groupby`` argument
2070 * __domain: list of tuples specifying the search criteria
2071 * __context: dictionary with argument like ``groupby``
2072 :rtype: [{'field_name_1': value, ...]
2073 :raise AccessError: * if user has no read rights on the requested object
2074 * if user tries to bypass access rules for read on the requested object
2078 self.check_access_rights(cr, uid, 'read')
2079 query = self._where_calc(cr, uid, domain, context=context)
2080 fields = fields or self._columns.keys()
2082 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2083 groupby_list = groupby[:1] if lazy else groupby
2084 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2085 for gb in groupby_list]
2086 groupby_fields = [g['field'] for g in annotated_groupbys]
2087 order = orderby or ','.join([g for g in groupby_list])
2088 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2090 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2091 for gb in groupby_fields:
2092 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2093 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2094 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2095 if not (gb in self._all_columns):
2096 # Don't allow arbitrary values, as this would be a SQL injection vector!
2097 raise except_orm(_('Invalid group_by'),
2098 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2100 aggregated_fields = [
2102 if f not in ('id', 'sequence')
2103 if f not in groupby_fields
2104 if self._all_columns[f].column._type in ('integer', 'float')
2105 if getattr(self._all_columns[f].column, '_classic_write')]
2107 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2108 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2110 for gb in annotated_groupbys:
2111 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2113 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2114 from_clause, where_clause, where_clause_params = query.get_sql()
2115 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2116 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2120 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2121 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2124 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count %(extra_fields)s
2132 'table': self._table,
2133 'count_field': count_field,
2134 'extra_fields': prefix_terms(',', select_terms),
2135 'from': from_clause,
2136 'where': prefix_term('WHERE', where_clause),
2137 'groupby': prefix_terms('GROUP BY', groupby_terms),
2138 'orderby': prefix_terms('ORDER BY', orderby_terms),
2139 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2140 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2142 cr.execute(query, where_clause_params)
2143 fetched_data = cr.dictfetchall()
2145 if not groupby_fields:
2148 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2150 data_ids = [r['id'] for r in fetched_data]
2151 many2onefields = list(set(many2onefields))
2152 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2153 for d in fetched_data:
2154 d.update(data_dict[d['id']])
2156 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2157 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2158 if lazy and groupby_fields[0] in self._group_by_full:
2159 # Right now, read_group only fill results in lazy mode (by default).
2160 # If you need to have the empty groups in 'eager' mode, then the
2161 # method _read_group_fill_results need to be completely reimplemented
2163 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2164 aggregated_fields, result, read_group_order=order,
2168 def _inherits_join_add(self, current_model, parent_model_name, query):
2170 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2171 :param current_model: current model object
2172 :param parent_model_name: name of the parent model for which the clauses should be added
2173 :param query: query object on which the JOIN should be added
2175 inherits_field = current_model._inherits[parent_model_name]
2176 parent_model = self.pool[parent_model_name]
2177 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2180 def _inherits_join_calc(self, field, query):
2182 Adds missing table select and join clause(s) to ``query`` for reaching
2183 the field coming from an '_inherits' parent table (no duplicates).
2185 :param field: name of inherited field to reach
2186 :param query: query object on which the JOIN should be added
2187 :return: qualified name of field, to be used in SELECT clause
2189 current_table = self
2190 parent_alias = '"%s"' % current_table._table
2191 while field in current_table._inherit_fields and not field in current_table._columns:
2192 parent_model_name = current_table._inherit_fields[field][0]
2193 parent_table = self.pool[parent_model_name]
2194 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2195 current_table = parent_table
2196 return '%s."%s"' % (parent_alias, field)
2198 def _parent_store_compute(self, cr):
2199 if not self._parent_store:
2201 _logger.info('Computing parent left and right for table %s...', self._table)
2202 def browse_rec(root, pos=0):
2204 where = self._parent_name+'='+str(root)
2206 where = self._parent_name+' IS NULL'
2207 if self._parent_order:
2208 where += ' order by '+self._parent_order
2209 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2211 for id in cr.fetchall():
2212 pos2 = browse_rec(id[0], pos2)
2213 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2215 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2216 if self._parent_order:
2217 query += ' order by ' + self._parent_order
2220 for (root,) in cr.fetchall():
2221 pos = browse_rec(root, pos)
2222 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2225 def _update_store(self, cr, f, k):
2226 _logger.info("storing computed values of fields.function '%s'", k)
2227 ss = self._columns[k]._symbol_set
2228 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2229 cr.execute('select id from '+self._table)
2230 ids_lst = map(lambda x: x[0], cr.fetchall())
2232 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2233 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2234 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2235 for key, val in res.items():
2238 # if val is a many2one, just write the ID
2239 if type(val) == tuple:
2241 if val is not False:
2242 cr.execute(update_query, (ss[1](val), key))
2244 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2245 """Raise except_orm if value is not among the valid values for the selection field"""
2246 if self._columns[field]._type == 'reference':
2247 val_model, val_id_str = value.split(',', 1)
2250 val_id = long(val_id_str)
2254 raise except_orm(_('ValidateError'),
2255 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2259 if isinstance(self._columns[field].selection, (tuple, list)):
2260 if val in dict(self._columns[field].selection):
2262 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2264 raise except_orm(_('ValidateError'),
2265 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
2267 def _check_removed_columns(self, cr, log=False):
2268 # iterate on the database columns to drop the NOT NULL constraints
2269 # of fields which were required but have been removed (or will be added by another module)
2270 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2271 columns += MAGIC_COLUMNS
2272 cr.execute("SELECT a.attname, a.attnotnull"
2273 " FROM pg_class c, pg_attribute a"
2274 " WHERE c.relname=%s"
2275 " AND c.oid=a.attrelid"
2276 " AND a.attisdropped=%s"
2277 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2278 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2280 for column in cr.dictfetchall():
2282 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2283 column['attname'], self._table, self._name)
2284 if column['attnotnull']:
2285 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2286 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2287 self._table, column['attname'])
2289 def _save_constraint(self, cr, constraint_name, type):
2291 Record the creation of a constraint for this model, to make it possible
2292 to delete it later when the module is uninstalled. Type can be either
2293 'f' or 'u' depending on the constraint being a foreign key or not.
2295 if not self._module:
2296 # no need to save constraints for custom models as they're not part
2299 assert type in ('f', 'u')
2301 SELECT 1 FROM ir_model_constraint, ir_module_module
2302 WHERE ir_model_constraint.module=ir_module_module.id
2303 AND ir_model_constraint.name=%s
2304 AND ir_module_module.name=%s
2305 """, (constraint_name, self._module))
2308 INSERT INTO ir_model_constraint
2309 (name, date_init, date_update, module, model, type)
2310 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2311 (SELECT id FROM ir_module_module WHERE name=%s),
2312 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2313 (constraint_name, self._module, self._name, type))
2315 def _save_relation_table(self, cr, relation_table):
2317 Record the creation of a many2many for this model, to make it possible
2318 to delete it later when the module is uninstalled.
2321 SELECT 1 FROM ir_model_relation, ir_module_module
2322 WHERE ir_model_relation.module=ir_module_module.id
2323 AND ir_model_relation.name=%s
2324 AND ir_module_module.name=%s
2325 """, (relation_table, self._module))
2327 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2328 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2329 (SELECT id FROM ir_module_module WHERE name=%s),
2330 (SELECT id FROM ir_model WHERE model=%s))""",
2331 (relation_table, self._module, self._name))
2332 self.invalidate_cache(cr, SUPERUSER_ID)
2334 # checked version: for direct m2o starting from `self`
2335 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2336 assert self.is_transient() or not dest_model.is_transient(), \
2337 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2338 if self.is_transient() and not dest_model.is_transient():
2339 # TransientModel relationships to regular Models are annoying
2340 # usually because they could block deletion due to the FKs.
2341 # So unless stated otherwise we default them to ondelete=cascade.
2342 ondelete = ondelete or 'cascade'
2343 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2344 self._foreign_keys.add(fk_def)
2345 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2347 # unchecked version: for custom cases, such as m2m relationships
2348 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2349 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2350 self._foreign_keys.add(fk_def)
2351 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2353 def _drop_constraint(self, cr, source_table, constraint_name):
2354 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2356 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2357 # Find FK constraint(s) currently established for the m2o field,
2358 # and see whether they are stale or not
2359 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2360 cl2.relname as foreign_table
2361 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2362 pg_attribute as att1, pg_attribute as att2
2363 WHERE con.conrelid = cl1.oid
2364 AND cl1.relname = %s
2365 AND con.confrelid = cl2.oid
2366 AND array_lower(con.conkey, 1) = 1
2367 AND con.conkey[1] = att1.attnum
2368 AND att1.attrelid = cl1.oid
2369 AND att1.attname = %s
2370 AND array_lower(con.confkey, 1) = 1
2371 AND con.confkey[1] = att2.attnum
2372 AND att2.attrelid = cl2.oid
2373 AND att2.attname = %s
2374 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2375 constraints = cr.dictfetchall()
2377 if len(constraints) == 1:
2378 # Is it the right constraint?
2380 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2381 or cons['foreign_table'] != dest_model._table:
2382 # Wrong FK: drop it and recreate
2383 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2384 source_table, cons['constraint_name'])
2385 self._drop_constraint(cr, source_table, cons['constraint_name'])
2387 # it's all good, nothing to do!
2390 # Multiple FKs found for the same field, drop them all, and re-create
2391 for cons in constraints:
2392 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2393 source_table, cons['constraint_name'])
2394 self._drop_constraint(cr, source_table, cons['constraint_name'])
2396 # (re-)create the FK
2397 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2400 def _set_default_value_on_column(self, cr, column_name, context=None):
2401 # ideally should use add_default_value but fails
2402 # due to ir.values not being ready
2404 # get old-style default
2405 default = self._defaults.get(column_name)
2406 if callable(default):
2407 default = default(self, cr, SUPERUSER_ID, context)
2409 # get new_style default if no old-style
2411 record = self.new(cr, SUPERUSER_ID, context=context)
2412 field = self._fields[column_name]
2413 field.determine_default(record)
2414 defaults = dict(record._cache)
2415 if column_name in defaults:
2416 default = field.convert_to_write(defaults[column_name])
2418 if default is not None:
2419 _logger.debug("Table '%s': setting default value of new column %s",
2420 self._table, column_name)
2421 ss = self._columns[column_name]._symbol_set
2422 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2423 self._table, column_name, ss[0], column_name)
2424 cr.execute(query, (ss[1](default),))
2425 # this is a disgrace
2428 def _auto_init(self, cr, context=None):
2431 Call _field_create and, unless _auto is False:
2433 - create the corresponding table in database for the model,
2434 - possibly add the parent columns in database,
2435 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2436 'write_date' in database if _log_access is True (the default),
2437 - report on database columns no more existing in _columns,
2438 - remove no more existing not null constraints,
2439 - alter existing database columns to match _columns,
2440 - create database tables to match _columns,
2441 - add database indices to match _columns,
2442 - save in self._foreign_keys a list a foreign keys to create (see
2446 self._foreign_keys = set()
2447 raise_on_invalid_object_name(self._name)
2450 store_compute = False
2451 stored_fields = [] # new-style stored fields with compute
2453 update_custom_fields = context.get('update_custom_fields', False)
2454 self._field_create(cr, context=context)
2455 create = not self._table_exist(cr)
2459 self._create_table(cr)
2462 if self._parent_store:
2463 if not self._parent_columns_exist(cr):
2464 self._create_parent_columns(cr)
2465 store_compute = True
2467 self._check_removed_columns(cr, log=False)
2469 # iterate on the "object columns"
2470 column_data = self._select_column_data(cr)
2472 for k, f in self._columns.iteritems():
2473 if k == 'id': # FIXME: maybe id should be a regular column?
2475 # Don't update custom (also called manual) fields
2476 if f.manual and not update_custom_fields:
2479 if isinstance(f, fields.one2many):
2480 self._o2m_raise_on_missing_reference(cr, f)
2482 elif isinstance(f, fields.many2many):
2483 self._m2m_raise_or_create_relation(cr, f)
2486 res = column_data.get(k)
2488 # The field is not found as-is in database, try if it
2489 # exists with an old name.
2490 if not res and hasattr(f, 'oldname'):
2491 res = column_data.get(f.oldname)
2493 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2495 column_data[k] = res
2496 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2497 self._table, f.oldname, k)
2499 # The field already exists in database. Possibly
2500 # change its type, rename it, drop it or change its
2503 f_pg_type = res['typname']
2504 f_pg_size = res['size']
2505 f_pg_notnull = res['attnotnull']
2506 if isinstance(f, fields.function) and not f.store and\
2507 not getattr(f, 'nodrop', False):
2508 _logger.info('column %s (%s) converted to a function, removed from table %s',
2509 k, f.string, self._table)
2510 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2512 _schema.debug("Table '%s': dropped column '%s' with cascade",
2516 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2521 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2522 ('varchar', 'text', 'TEXT', ''),
2523 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2524 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2525 ('timestamp', 'date', 'date', '::date'),
2526 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2527 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2529 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2531 with cr.savepoint():
2532 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2533 except psycopg2.NotSupportedError:
2534 # In place alter table cannot be done because a view is depending of this field.
2535 # Do a manual copy. This will drop the view (that will be recreated later)
2536 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2537 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2538 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2539 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2541 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2542 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2544 if (f_pg_type==c[0]) and (f._type==c[1]):
2545 if f_pg_type != f_obj_type:
2547 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2548 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2549 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2550 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2552 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2553 self._table, k, c[0], c[1])
2556 if f_pg_type != f_obj_type:
2560 newname = k + '_moved' + str(i)
2561 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2562 "WHERE c.relname=%s " \
2563 "AND a.attname=%s " \
2564 "AND c.oid=a.attrelid ", (self._table, newname))
2565 if not cr.fetchone()[0]:
2569 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2570 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2571 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2572 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2573 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2574 self._table, k, f_pg_type, f._type, newname)
2576 # if the field is required and hasn't got a NOT NULL constraint
2577 if f.required and f_pg_notnull == 0:
2578 self._set_default_value_on_column(cr, k, context=context)
2579 # add the NOT NULL constraint
2581 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2583 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2586 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2587 "If you want to have it, you should update the records and execute manually:\n"\
2588 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2589 _schema.warning(msg, self._table, k, self._table, k)
2591 elif not f.required and f_pg_notnull == 1:
2592 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2594 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2597 indexname = '%s_%s_index' % (self._table, k)
2598 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2599 res2 = cr.dictfetchall()
2600 if not res2 and f.select:
2601 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2603 if f._type == 'text':
2604 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2605 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2606 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2607 " because there is a length limit for indexable btree values!\n"\
2608 "Use a search view instead if you simply want to make the field searchable."
2609 _schema.warning(msg, self._table, f._type, k)
2610 if res2 and not f.select:
2611 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2613 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2614 _schema.debug(msg, self._table, k, f._type)
2616 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2617 dest_model = self.pool[f._obj]
2618 if dest_model._table != 'ir_actions':
2619 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2621 # The field doesn't exist in database. Create it if necessary.
2623 if not isinstance(f, fields.function) or f.store:
2624 # add the missing field
2625 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2626 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2627 _schema.debug("Table '%s': added column '%s' with definition=%s",
2628 self._table, k, get_pg_type(f)[1])
2632 self._set_default_value_on_column(cr, k, context=context)
2634 # remember the functions to call for the stored fields
2635 if isinstance(f, fields.function):
2637 if f.store is not True: # i.e. if f.store is a dict
2638 order = f.store[f.store.keys()[0]][2]
2639 todo_end.append((order, self._update_store, (f, k)))
2641 # remember new-style stored fields with compute method
2642 if k in self._fields and self._fields[k].depends:
2643 stored_fields.append(self._fields[k])
2645 # and add constraints if needed
2646 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2647 if f._obj not in self.pool:
2648 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2649 dest_model = self.pool[f._obj]
2650 ref = dest_model._table
2651 # ir_actions is inherited so foreign key doesn't work on it
2652 if ref != 'ir_actions':
2653 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2655 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2659 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2660 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2663 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2664 "Try to re-run: openerp-server --update=module\n"\
2665 "If it doesn't work, update records and execute manually:\n"\
2666 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2667 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2671 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2672 create = not bool(cr.fetchone())
2674 cr.commit() # start a new transaction
2677 self._add_sql_constraints(cr)
2680 self._execute_sql(cr)
2683 self._parent_store_compute(cr)
2687 # trigger computation of new-style stored fields with a compute
2689 _logger.info("Storing computed values of %s fields %s",
2690 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2691 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2692 recs = recs.search([])
2694 map(recs._recompute_todo, stored_fields)
2697 todo_end.append((1000, func, ()))
2701 def _auto_end(self, cr, context=None):
2702 """ Create the foreign keys recorded by _auto_init. """
2703 for t, k, r, d in self._foreign_keys:
2704 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2705 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2707 del self._foreign_keys
2710 def _table_exist(self, cr):
2711 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2715 def _create_table(self, cr):
2716 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2717 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2718 _schema.debug("Table '%s': created", self._table)
2721 def _parent_columns_exist(self, cr):
2722 cr.execute("""SELECT c.relname
2723 FROM pg_class c, pg_attribute a
2724 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2725 """, (self._table, 'parent_left'))
2729 def _create_parent_columns(self, cr):
2730 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2731 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2732 if 'parent_left' not in self._columns:
2733 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2735 _schema.debug("Table '%s': added column '%s' with definition=%s",
2736 self._table, 'parent_left', 'INTEGER')
2737 elif not self._columns['parent_left'].select:
2738 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2740 if 'parent_right' not in self._columns:
2741 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2743 _schema.debug("Table '%s': added column '%s' with definition=%s",
2744 self._table, 'parent_right', 'INTEGER')
2745 elif not self._columns['parent_right'].select:
2746 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2748 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2749 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2750 self._parent_name, self._name)
2755 def _select_column_data(self, cr):
2756 # attlen is the number of bytes necessary to represent the type when
2757 # the type has a fixed size. If the type has a varying size attlen is
2758 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2759 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2760 "FROM pg_class c,pg_attribute a,pg_type t " \
2761 "WHERE c.relname=%s " \
2762 "AND c.oid=a.attrelid " \
2763 "AND a.atttypid=t.oid", (self._table,))
2764 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2767 def _o2m_raise_on_missing_reference(self, cr, f):
2768 # TODO this check should be a method on fields.one2many.
2769 if f._obj in self.pool:
2770 other = self.pool[f._obj]
2771 # TODO the condition could use fields_get_keys().
2772 if f._fields_id not in other._columns.keys():
2773 if f._fields_id not in other._inherit_fields.keys():
2774 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2776 def _m2m_raise_or_create_relation(self, cr, f):
2777 m2m_tbl, col1, col2 = f._sql_names(self)
2778 self._save_relation_table(cr, m2m_tbl)
2779 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2780 if not cr.dictfetchall():
2781 if f._obj not in self.pool:
2782 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2783 dest_model = self.pool[f._obj]
2784 ref = dest_model._table
2785 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2786 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2787 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2788 if not cr.fetchall():
2789 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2790 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2791 if not cr.fetchall():
2792 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2794 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2795 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2796 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2798 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2801 def _add_sql_constraints(self, cr):
2804 Modify this model's database table constraints so they match the one in
2808 def unify_cons_text(txt):
2809 return txt.lower().replace(', ',',').replace(' (','(')
2811 for (key, con, _) in self._sql_constraints:
2812 conname = '%s_%s' % (self._table, key)
2814 self._save_constraint(cr, conname, 'u')
2815 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2816 existing_constraints = cr.dictfetchall()
2820 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2821 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2822 self._table, conname, con),
2823 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2828 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2829 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2830 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2836 if not existing_constraints:
2837 # constraint does not exists:
2838 sql_actions['add']['execute'] = True
2839 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2840 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2841 # constraint exists but its definition has changed:
2842 sql_actions['drop']['execute'] = True
2843 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2844 sql_actions['add']['execute'] = True
2845 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2847 # we need to add the constraint:
2848 sql_actions = [item for item in sql_actions.values()]
2849 sql_actions.sort(key=lambda x: x['order'])
2850 for sql_action in [action for action in sql_actions if action['execute']]:
2852 cr.execute(sql_action['query'])
2854 _schema.debug(sql_action['msg_ok'])
2856 _schema.warning(sql_action['msg_err'])
2860 def _execute_sql(self, cr):
2861 """ Execute the SQL code from the _sql attribute (if any)."""
2862 if hasattr(self, "_sql"):
2863 for line in self._sql.split(';'):
2864 line2 = line.replace('\n', '').strip()
2870 # Update objects that uses this one to update their _inherits fields
2874 def _inherits_reload_src(cls):
2875 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2876 for model in cls.pool.values():
2877 if cls._name in model._inherits:
2878 model._inherits_reload()
2881 def _inherits_reload(cls):
2882 """ Recompute the _inherit_fields mapping.
2884 This will also call itself on each inherits'd child model.
2888 for table in cls._inherits:
2889 other = cls.pool[table]
2890 for col in other._columns.keys():
2891 res[col] = (table, cls._inherits[table], other._columns[col], table)
2892 for col in other._inherit_fields.keys():
2893 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2894 cls._inherit_fields = res
2895 cls._all_columns = cls._get_column_infos()
2897 # interface columns with new-style fields
2898 for attr, column in cls._columns.items():
2899 if attr not in cls._fields:
2900 cls._add_field(attr, column.to_field())
2902 # interface inherited fields with new-style fields (note that the
2903 # reverse order is for being consistent with _all_columns above)
2904 for parent_model, parent_field in reversed(cls._inherits.items()):
2905 for attr, field in cls.pool[parent_model]._fields.iteritems():
2906 if attr not in cls._fields:
2907 new_field = field.copy(related=(parent_field, attr), _origin=field)
2908 cls._add_field(attr, new_field)
2910 cls._inherits_reload_src()
2913 def _get_column_infos(cls):
2914 """Returns a dict mapping all fields names (direct fields and
2915 inherited field via _inherits) to a ``column_info`` struct
2916 giving detailed columns """
2918 # do not inverse for loops, since local fields may hide inherited ones!
2919 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2920 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2921 for k, col in cls._columns.iteritems():
2922 result[k] = fields.column_info(k, col)
2926 def _inherits_check(cls):
2927 for table, field_name in cls._inherits.items():
2928 if field_name not in cls._columns:
2929 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2930 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2931 required=True, ondelete="cascade")
2932 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2933 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2934 cls._columns[field_name].required = True
2935 cls._columns[field_name].ondelete = "cascade"
2937 # reflect fields with delegate=True in dictionary cls._inherits
2938 for field in cls._fields.itervalues():
2939 if field.type == 'many2one' and not field.related and field.delegate:
2940 if not field.required:
2941 _logger.warning("Field %s with delegate=True must be required.", field)
2942 field.required = True
2943 if field.ondelete.lower() not in ('cascade', 'restrict'):
2944 field.ondelete = 'cascade'
2945 cls._inherits[field.comodel_name] = field.name
2948 def _prepare_setup_fields(self):
2949 """ Prepare the setup of fields once the models have been loaded. """
2950 for field in self._fields.itervalues():
2954 def _setup_fields(self):
2955 """ Setup the fields (dependency triggers, etc). """
2956 for field in self._fields.itervalues():
2957 field.setup(self.env)
2959 # group fields by compute to determine field.computed_fields
2960 fields_by_compute = defaultdict(list)
2961 for field in self._fields.itervalues():
2963 field.computed_fields = fields_by_compute[field.compute]
2964 field.computed_fields.append(field)
2966 field.computed_fields = []
2968 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
2969 """ Return the definition of each field.
2971 The returned value is a dictionary (indiced by field name) of
2972 dictionaries. The _inherits'd fields are included. The string, help,
2973 and selection (if present) attributes are translated.
2975 :param cr: database cursor
2976 :param user: current user id
2977 :param allfields: list of fields
2978 :param context: context arguments, like lang, time zone
2979 :return: dictionary of field dictionaries, each one describing a field of the business object
2980 :raise AccessError: * if user has no create/write rights on the requested object
2983 recs = self.browse(cr, user, [], context)
2986 for fname, field in self._fields.iteritems():
2987 if allfields and fname not in allfields:
2989 if field.groups and not recs.user_has_groups(field.groups):
2991 res[fname] = field.get_description(recs.env)
2993 # if user cannot create or modify records, make all fields readonly
2994 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
2995 if not (has_access('write') or has_access('create')):
2996 for description in res.itervalues():
2997 description['readonly'] = True
2998 description['states'] = {}
3002 def get_empty_list_help(self, cr, user, help, context=None):
3003 """ Generic method giving the help message displayed when having
3004 no result to display in a list or kanban view. By default it returns
3005 the help given in parameter that is generally the help message
3006 defined in the action.
3010 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3012 Check the user access rights on the given fields. This raises Access
3013 Denied if the user does not have the rights. Otherwise it returns the
3014 fields (as is if the fields is not falsy, or the readable/writable
3015 fields if fields is falsy).
3017 if user == SUPERUSER_ID:
3018 return fields or list(self._fields)
3021 """ determine whether user has access to field `fname` """
3022 field = self._fields.get(fname)
3023 if field and field.groups:
3024 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3029 fields = filter(valid, self._fields)
3031 invalid_fields = set(filter(lambda name: not valid(name), fields))
3033 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3034 operation, user, self._name, ', '.join(invalid_fields))
3036 _('The requested operation cannot be completed due to security restrictions. '
3037 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3038 (self._description, operation))
3042 # new-style implementation of read(); old-style is defined below
3044 def read(self, fields=None, load='_classic_read'):
3045 """ Read the given fields for the records in `self`.
3047 :param fields: optional list of field names to return (default is
3049 :param load: deprecated, this argument is ignored
3050 :return: a list of dictionaries mapping field names to their values,
3051 with one dictionary per record
3052 :raise AccessError: if user has no read rights on some of the given
3055 # check access rights
3056 self.check_access_rights('read')
3057 fields = self.check_field_access_rights('read', fields)
3059 # split fields into stored and computed fields
3060 stored, computed = [], []
3062 if name in self._columns:
3064 elif name in self._fields:
3065 computed.append(name)
3067 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3069 # fetch stored fields from the database to the cache
3070 self._read_from_database(stored)
3072 # retrieve results from records; this takes values from the cache and
3073 # computes remaining fields
3075 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3076 use_name_get = (load == '_classic_read')
3079 values = {'id': record.id}
3080 for name, field in name_fields:
3081 values[name] = field.convert_to_read(record[name], use_name_get)
3082 result.append(values)
3083 except MissingError:
3088 # add explicit old-style implementation to read()
3090 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3091 records = self.browse(cr, user, ids, context)
3092 result = BaseModel.read(records, fields, load=load)
3093 return result if isinstance(ids, list) else (bool(result) and result[0])
3096 def _prefetch_field(self, field):
3097 """ Read from the database in order to fetch `field` (:class:`Field`
3098 instance) for `self` in cache.
3100 # fetch the records of this model without field_name in their cache
3101 records = self._in_cache_without(field)
3103 # by default, simply fetch field
3104 fnames = set((field.name,))
3107 # columns may be missing from database, do not prefetch other fields
3109 elif self.env.in_draft:
3110 # we may be doing an onchange, do not prefetch other fields
3112 elif field in self.env.todo:
3113 # field must be recomputed, do not prefetch records to recompute
3114 records -= self.env.todo[field]
3115 elif self._columns[field.name]._prefetch:
3116 # here we can optimize: prefetch all classic and many2one fields
3118 for fname, fcolumn in self._columns.iteritems()
3119 if fcolumn._prefetch)
3121 # fetch records with read()
3122 assert self in records and field.name in fnames
3124 result = records.read(list(fnames), load='_classic_write')
3125 except AccessError as e:
3126 # update cache with the exception
3127 records._cache[field] = FailedValue(e)
3130 # check the cache, and update it if necessary
3131 if field not in self._cache:
3132 for values in result:
3133 record = self.browse(values.pop('id'))
3134 record._cache.update(record._convert_to_cache(values))
3135 if field not in self._cache:
3136 e = AccessError("No value found for %s.%s" % (self, field.name))
3137 self._cache[field] = FailedValue(e)
3140 def _read_from_database(self, field_names):
3141 """ Read the given fields of the records in `self` from the database,
3142 and store them in cache. Access errors are also stored in cache.
3145 cr, user, context = env.args
3147 # Construct a clause for the security rules.
3148 # 'tables' holds the list of tables necessary for the SELECT, including
3149 # the ir.rule clauses, and contains at least self._table.
3150 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3152 # determine the fields that are stored as columns in self._table
3153 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3155 # we need fully-qualified column names in case len(tables) > 1
3157 if isinstance(self._columns.get(f), fields.binary) and \
3158 context.get('bin_size_%s' % f, context.get('bin_size')):
3159 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3160 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3162 return '%s."%s"' % (self._table, f)
3163 qual_names = map(qualify, set(fields_pre + ['id']))
3165 query = """ SELECT %(qual_names)s FROM %(tables)s
3166 WHERE %(table)s.id IN %%s AND (%(extra)s)
3169 'qual_names': ",".join(qual_names),
3170 'tables': ",".join(tables),
3171 'table': self._table,
3172 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3173 'order': self._parent_order or self._order,
3177 for sub_ids in cr.split_for_in_conditions(self.ids):
3178 cr.execute(query, [tuple(sub_ids)] + rule_params)
3179 result.extend(cr.dictfetchall())
3181 ids = [vals['id'] for vals in result]
3184 # translate the fields if necessary
3185 if context.get('lang'):
3186 ir_translation = env['ir.translation']
3187 for f in fields_pre:
3188 if self._columns[f].translate:
3189 #TODO: optimize out of this loop
3190 res_trans = ir_translation._get_ids(
3191 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3193 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3195 # apply the symbol_get functions of the fields we just read
3196 for f in fields_pre:
3197 symbol_get = self._columns[f]._symbol_get
3200 vals[f] = symbol_get(vals[f])
3202 # store result in cache for POST fields
3204 record = self.browse(vals['id'])
3205 record._cache.update(record._convert_to_cache(vals))
3207 # determine the fields that must be processed now
3208 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3210 # Compute POST fields, grouped by multi
3211 by_multi = defaultdict(list)
3212 for f in fields_post:
3213 by_multi[self._columns[f]._multi].append(f)
3215 for multi, fs in by_multi.iteritems():
3217 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3218 assert res2 is not None, \
3219 'The function field "%s" on the "%s" model returned None\n' \
3220 '(a dictionary was expected).' % (fs[0], self._name)
3222 # TOCHECK : why got string instend of dict in python2.6
3223 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3224 multi_fields = res2.get(vals['id'], {})
3227 vals[f] = multi_fields.get(f, [])
3230 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3233 vals[f] = res2[vals['id']]
3237 # Warn about deprecated fields now that fields_pre and fields_post are computed
3238 for f in field_names:
3239 column = self._columns[f]
3240 if column.deprecated:
3241 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3243 # store result in cache
3245 record = self.browse(vals.pop('id'))
3246 record._cache.update(record._convert_to_cache(vals))
3248 # store failed values in cache for the records that could not be read
3249 fetched = self.browse(ids)
3250 missing = self - fetched
3252 extras = fetched - self
3255 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3256 ', '.join(map(repr, missing._ids)),
3257 ', '.join(map(repr, extras._ids)),
3259 # store an access error exception in existing records
3261 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3262 (self._name, 'read')
3264 forbidden = missing.exists()
3265 forbidden._cache.update(FailedValue(exc))
3266 # store a missing error exception in non-existing records
3268 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3270 (missing - forbidden)._cache.update(FailedValue(exc))
3273 def get_metadata(self):
3275 Returns some metadata about the given records.
3277 :return: list of ownership dictionaries for each requested record
3278 :rtype: list of dictionaries with the following keys:
3281 * create_uid: user who created the record
3282 * create_date: date when the record was created
3283 * write_uid: last user who changed the record
3284 * write_date: date of the last change to the record
3285 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3288 if self._log_access:
3289 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3290 quoted_table = '"%s"' % self._table
3291 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3292 query = '''SELECT %s, __imd.module, __imd.name
3293 FROM %s LEFT JOIN ir_model_data __imd
3294 ON (__imd.model = %%s and __imd.res_id = %s.id)
3295 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3296 self._cr.execute(query, (self._name, tuple(self.ids)))
3297 res = self._cr.dictfetchall()
3299 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3300 names = dict(self.env['res.users'].browse(uids).name_get())
3304 value = r[key] = r[key] or False
3305 if key in ('write_uid', 'create_uid') and value in names:
3306 r[key] = (value, names[value])
3307 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3308 del r['name'], r['module']
3311 def _check_concurrency(self, cr, ids, context):
3314 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3316 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3317 for sub_ids in cr.split_for_in_conditions(ids):
3320 id_ref = "%s,%s" % (self._name, id)
3321 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3323 ids_to_check.extend([id, update_date])
3324 if not ids_to_check:
3326 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3329 # mention the first one only to keep the error message readable
3330 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3332 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3333 """Verify the returned rows after applying record rules matches
3334 the length of `ids`, and raise an appropriate exception if it does not.
3338 ids, result_ids = set(ids), set(result_ids)
3339 missing_ids = ids - result_ids
3341 # Attempt to distinguish record rule restriction vs deleted records,
3342 # to provide a more specific error message - check if the missinf
3343 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3344 forbidden_ids = [x[0] for x in cr.fetchall()]
3346 # the missing ids are (at least partially) hidden by access rules
3347 if uid == SUPERUSER_ID:
3349 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3350 raise except_orm(_('Access Denied'),
3351 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3352 (self._description, operation))
3354 # If we get here, the missing_ids are not in the database
3355 if operation in ('read','unlink'):
3356 # No need to warn about deleting an already deleted record.
3357 # And no error when reading a record that was deleted, to prevent spurious
3358 # errors for non-transactional search/read sequences coming from clients
3360 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3361 raise except_orm(_('Missing document(s)'),
3362 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3365 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3366 """Verifies that the operation given by ``operation`` is allowed for the user
3367 according to the access rights."""
3368 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3370 def check_access_rule(self, cr, uid, ids, operation, context=None):
3371 """Verifies that the operation given by ``operation`` is allowed for the user
3372 according to ir.rules.
3374 :param operation: one of ``write``, ``unlink``
3375 :raise except_orm: * if current ir.rules do not permit this operation.
3376 :return: None if the operation is allowed
3378 if uid == SUPERUSER_ID:
3381 if self.is_transient():
3382 # Only one single implicit access rule for transient models: owner only!
3383 # This is ok to hardcode because we assert that TransientModels always
3384 # have log_access enabled so that the create_uid column is always there.
3385 # And even with _inherits, these fields are always present in the local
3386 # table too, so no need for JOINs.
3387 cr.execute("""SELECT distinct create_uid
3389 WHERE id IN %%s""" % self._table, (tuple(ids),))
3390 uids = [x[0] for x in cr.fetchall()]
3391 if len(uids) != 1 or uids[0] != uid:
3392 raise except_orm(_('Access Denied'),
3393 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3395 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3397 where_clause = ' and ' + ' and '.join(where_clause)
3398 for sub_ids in cr.split_for_in_conditions(ids):
3399 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3400 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3401 [sub_ids] + where_params)
3402 returned_ids = [x['id'] for x in cr.dictfetchall()]
3403 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3405 def create_workflow(self, cr, uid, ids, context=None):
3406 """Create a workflow instance for each given record IDs."""
3407 from openerp import workflow
3409 workflow.trg_create(uid, self._name, res_id, cr)
3410 # self.invalidate_cache(cr, uid, context=context) ?
3413 def delete_workflow(self, cr, uid, ids, context=None):
3414 """Delete the workflow instances bound to the given record IDs."""
3415 from openerp import workflow
3417 workflow.trg_delete(uid, self._name, res_id, cr)
3418 self.invalidate_cache(cr, uid, context=context)
3421 def step_workflow(self, cr, uid, ids, context=None):
3422 """Reevaluate the workflow instances of the given record IDs."""
3423 from openerp import workflow
3425 workflow.trg_write(uid, self._name, res_id, cr)
3426 # self.invalidate_cache(cr, uid, context=context) ?
3429 def signal_workflow(self, cr, uid, ids, signal, context=None):
3430 """Send given workflow signal and return a dict mapping ids to workflow results"""
3431 from openerp import workflow
3434 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3435 # self.invalidate_cache(cr, uid, context=context) ?
3438 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3439 """ Rebind the workflow instance bound to the given 'old' record IDs to
3440 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3442 from openerp import workflow
3443 for old_id, new_id in old_new_ids:
3444 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3445 self.invalidate_cache(cr, uid, context=context)
3448 def unlink(self, cr, uid, ids, context=None):
3450 Delete records with given ids
3452 :param cr: database cursor
3453 :param uid: current user id
3454 :param ids: id or list of ids
3455 :param context: (optional) context arguments, like lang, time zone
3457 :raise AccessError: * if user has no unlink rights on the requested object
3458 * if user tries to bypass access rules for unlink on the requested object
3459 :raise UserError: if the record is default property for other records
3464 if isinstance(ids, (int, long)):
3467 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3469 # for recomputing new-style fields
3470 recs = self.browse(cr, uid, ids, context)
3471 recs.modified(self._fields)
3473 self._check_concurrency(cr, ids, context)
3475 self.check_access_rights(cr, uid, 'unlink')
3477 ir_property = self.pool.get('ir.property')
3479 # Check if the records are used as default properties.
3480 domain = [('res_id', '=', False),
3481 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3483 if ir_property.search(cr, uid, domain, context=context):
3484 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3486 # Delete the records' properties.
3487 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3488 ir_property.unlink(cr, uid, property_ids, context=context)
3490 self.delete_workflow(cr, uid, ids, context=context)
3492 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3493 pool_model_data = self.pool.get('ir.model.data')
3494 ir_values_obj = self.pool.get('ir.values')
3495 for sub_ids in cr.split_for_in_conditions(ids):
3496 cr.execute('delete from ' + self._table + ' ' \
3497 'where id IN %s', (sub_ids,))
3499 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3500 # as these are not connected with real database foreign keys, and would be dangling references.
3501 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3502 # to avoid possible side-effects during admin calls.
3503 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3504 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3505 # Step 2. Marching towards the real deletion of referenced records
3507 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3509 # For the same reason, removing the record relevant to ir_values
3510 ir_value_ids = ir_values_obj.search(cr, uid,
3511 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3514 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3516 # invalidate the *whole* cache, since the orm does not handle all
3517 # changes made in the database, like cascading delete!
3518 recs.invalidate_cache()
3520 for order, obj_name, store_ids, fields in result_store:
3521 if obj_name == self._name:
3522 effective_store_ids = set(store_ids) - set(ids)
3524 effective_store_ids = store_ids
3525 if effective_store_ids:
3526 obj = self.pool[obj_name]
3527 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3528 rids = map(lambda x: x[0], cr.fetchall())
3530 obj._store_set_values(cr, uid, rids, fields, context)
3532 # recompute new-style fields
3541 def write(self, vals):
3543 Update records in `self` with the given field values.
3545 :param vals: field values to update, e.g {'field_name': new_field_value, ...}
3546 :type vals: dictionary
3548 :raise AccessError: * if user has no write rights on the requested object
3549 * if user tries to bypass access rules for write on the requested object
3550 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3551 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3553 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
3555 + For a many2many field, a list of tuples is expected.
3556 Here is the list of tuple that are accepted, with the corresponding semantics ::
3558 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3559 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3560 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3561 (3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
3562 (4, ID) link to existing record with id = ID (adds a relationship)
3563 (5) unlink all (like using (3,ID) for all linked records)
3564 (6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
3567 [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
3569 + For a one2many field, a lits of tuples is expected.
3570 Here is the list of tuple that are accepted, with the corresponding semantics ::
3572 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3573 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3574 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3577 [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
3579 + For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
3580 + For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
3586 cr, uid, context = self.env.args
3587 self._check_concurrency(self._ids)
3588 self.check_access_rights('write')
3590 # No user-driven update of these columns
3591 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3592 vals.pop(field, None)
3594 # split up fields into old-style and pure new-style ones
3595 old_vals, new_vals, unknown = {}, {}, []
3596 for key, val in vals.iteritems():
3597 if key in self._columns:
3599 elif key in self._fields:
3605 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3607 # write old-style fields with (low-level) method _write
3609 self._write(old_vals)
3611 # put the values of pure new-style fields into cache, and inverse them
3613 self._cache.update(self._convert_to_cache(new_vals))
3614 for key in new_vals:
3615 self._fields[key].determine_inverse(self)
3619 def _write(self, cr, user, ids, vals, context=None):
3620 # low-level implementation of write()
3625 self.check_field_access_rights(cr, user, 'write', vals.keys())
3626 for field in vals.keys():
3628 if field in self._columns:
3629 fobj = self._columns[field]
3630 elif field in self._inherit_fields:
3631 fobj = self._inherit_fields[field][2]
3638 for group in groups:
3639 module = group.split(".")[0]
3640 grp = group.split(".")[1]
3641 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3642 (grp, module, 'res.groups', user))
3643 readonly = cr.fetchall()
3644 if readonly[0][0] >= 1:
3651 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3653 # for recomputing new-style fields
3654 recs = self.browse(cr, user, ids, context)
3655 modified_fields = list(vals)
3656 if self._log_access:
3657 modified_fields += ['write_date', 'write_uid']
3658 recs.modified(modified_fields)
3660 parents_changed = []
3661 parent_order = self._parent_order or self._order
3662 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3663 # The parent_left/right computation may take up to
3664 # 5 seconds. No need to recompute the values if the
3665 # parent is the same.
3666 # Note: to respect parent_order, nodes must be processed in
3667 # order, so ``parents_changed`` must be ordered properly.
3668 parent_val = vals[self._parent_name]
3670 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3671 (self._table, self._parent_name, self._parent_name, parent_order)
3672 cr.execute(query, (tuple(ids), parent_val))
3674 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3675 (self._table, self._parent_name, parent_order)
3676 cr.execute(query, (tuple(ids),))
3677 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3684 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3686 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3687 if field_column and field_column.deprecated:
3688 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3689 if field in self._columns:
3690 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3691 if (not totranslate) or not self._columns[field].translate:
3692 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3693 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3694 direct.append(field)
3696 upd_todo.append(field)
3698 updend.append(field)
3699 if field in self._columns \
3700 and hasattr(self._columns[field], 'selection') \
3702 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3704 if self._log_access:
3705 upd0.append('write_uid=%s')
3706 upd0.append("write_date=(now() at time zone 'UTC')")
3710 self.check_access_rule(cr, user, ids, 'write', context=context)
3711 for sub_ids in cr.split_for_in_conditions(ids):
3712 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3713 'where id IN %s', upd1 + [sub_ids])
3714 if cr.rowcount != len(sub_ids):
3715 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3720 if self._columns[f].translate:
3721 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3724 # Inserting value to DB
3725 context_wo_lang = dict(context, lang=None)
3726 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3727 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3729 # call the 'set' method of fields which are not classic_write
3730 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3732 # default element in context must be removed when call a one2many or many2many
3733 rel_context = context.copy()
3734 for c in context.items():
3735 if c[0].startswith('default_'):
3736 del rel_context[c[0]]
3738 for field in upd_todo:
3740 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3742 unknown_fields = updend[:]
3743 for table in self._inherits:
3744 col = self._inherits[table]
3746 for sub_ids in cr.split_for_in_conditions(ids):
3747 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3748 'where id IN %s', (sub_ids,))
3749 nids.extend([x[0] for x in cr.fetchall()])
3753 if self._inherit_fields[val][0] == table:
3755 unknown_fields.remove(val)
3757 self.pool[table].write(cr, user, nids, v, context)
3761 'No such field(s) in model %s: %s.',
3762 self._name, ', '.join(unknown_fields))
3764 # check Python constraints
3765 recs._validate_fields(vals)
3767 # TODO: use _order to set dest at the right position and not first node of parent
3768 # We can't defer parent_store computation because the stored function
3769 # fields that are computer may refer (directly or indirectly) to
3770 # parent_left/right (via a child_of domain)
3773 self.pool._init_parent[self._name] = True
3775 order = self._parent_order or self._order
3776 parent_val = vals[self._parent_name]
3778 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3780 clause, params = '%s IS NULL' % (self._parent_name,), ()
3782 for id in parents_changed:
3783 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3784 pleft, pright = cr.fetchone()
3785 distance = pright - pleft + 1
3787 # Positions of current siblings, to locate proper insertion point;
3788 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3789 # after each update, in case several nodes are sequentially inserted one
3790 # next to the other (i.e computed incrementally)
3791 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3792 parents = cr.fetchall()
3794 # Find Position of the element
3796 for (parent_pright, parent_id) in parents:
3799 position = parent_pright and parent_pright + 1 or 1
3801 # It's the first node of the parent
3806 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3807 position = cr.fetchone()[0] + 1
3809 if pleft < position <= pright:
3810 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3812 if pleft < position:
3813 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3814 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3815 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3817 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3818 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3819 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3820 recs.invalidate_cache(['parent_left', 'parent_right'])
3822 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3825 # for recomputing new-style fields
3826 recs.modified(modified_fields)
3829 for order, model_name, ids_to_update, fields_to_recompute in result:
3830 key = (model_name, tuple(fields_to_recompute))
3831 done.setdefault(key, {})
3832 # avoid to do several times the same computation
3834 for id in ids_to_update:
3835 if id not in done[key]:
3836 done[key][id] = True
3838 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3840 # recompute new-style fields
3841 if context.get('recompute', True):
3844 self.step_workflow(cr, user, ids, context=context)
3848 # TODO: Should set perm to user.xxx
3851 @api.returns('self', lambda value: value.id)
3852 def create(self, vals):
3853 """ Create a new record for the model.
3855 The values for the new record are initialized using the dictionary
3856 `vals`, and if necessary the result of :meth:`default_get`.
3858 :param vals: field values like ``{'field_name': field_value, ...}``,
3859 see :meth:`write` for details about the values format
3860 :return: new record created
3861 :raise AccessError: * if user has no create rights on the requested object
3862 * if user tries to bypass access rules for create on the requested object
3863 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3864 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3866 self.check_access_rights('create')
3868 # add missing defaults, and drop fields that may not be set by user
3869 vals = self._add_missing_default_values(vals)
3870 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3871 vals.pop(field, None)
3873 # split up fields into old-style and pure new-style ones
3874 old_vals, new_vals, unknown = {}, {}, []
3875 for key, val in vals.iteritems():
3876 if key in self._all_columns:
3878 elif key in self._fields:
3884 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3886 # create record with old-style fields
3887 record = self.browse(self._create(old_vals))
3889 # put the values of pure new-style fields into cache, and inverse them
3890 record._cache.update(record._convert_to_cache(new_vals))
3891 for key in new_vals:
3892 self._fields[key].determine_inverse(record)
3896 def _create(self, cr, user, vals, context=None):
3897 # low-level implementation of create()
3901 if self.is_transient():
3902 self._transient_vacuum(cr, user)
3905 for v in self._inherits:
3906 if self._inherits[v] not in vals:
3909 tocreate[v] = {'id': vals[self._inherits[v]]}
3912 # list of column assignments defined as tuples like:
3913 # (column_name, format_string, column_value)
3914 # (column_name, sql_formula)
3915 # Those tuples will be used by the string formatting for the INSERT
3917 ('id', "nextval('%s')" % self._sequence),
3922 for v in vals.keys():
3923 if v in self._inherit_fields and v not in self._columns:
3924 (table, col, col_detail, original_parent) = self._inherit_fields[v]
3925 tocreate[table][v] = vals[v]
3928 if (v not in self._inherit_fields) and (v not in self._columns):
3930 unknown_fields.append(v)
3933 'No such field(s) in model %s: %s.',
3934 self._name, ', '.join(unknown_fields))
3936 for table in tocreate:
3937 if self._inherits[table] in vals:
3938 del vals[self._inherits[table]]
3940 record_id = tocreate[table].pop('id', None)
3942 if isinstance(record_id, dict):
3943 # Shit happens: this possibly comes from a new record
3944 tocreate[table] = dict(record_id, **tocreate[table])
3947 # When linking/creating parent records, force context without 'no_store_function' key that
3948 # defers stored functions computing, as these won't be computed in batch at the end of create().
3949 parent_context = dict(context)
3950 parent_context.pop('no_store_function', None)
3952 if record_id is None or not record_id:
3953 record_id = self.pool[table].create(cr, user, tocreate[table], context=parent_context)
3955 self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
3957 updates.append((self._inherits[table], '%s', record_id))
3959 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
3960 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
3962 for bool_field in bool_fields:
3963 if bool_field not in vals:
3964 vals[bool_field] = False
3966 for field in vals.keys():
3968 if field in self._columns:
3969 fobj = self._columns[field]
3971 fobj = self._inherit_fields[field][2]
3977 for group in groups:
3978 module = group.split(".")[0]
3979 grp = group.split(".")[1]
3980 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
3981 (grp, module, 'res.groups', user))
3982 readonly = cr.fetchall()
3983 if readonly[0][0] >= 1:
3986 elif readonly[0][0] == 0:
3994 current_field = self._columns[field]
3995 if current_field._classic_write:
3996 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
3998 #for the function fields that receive a value, we set them directly in the database
3999 #(they may be required), but we also need to trigger the _fct_inv()
4000 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4001 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4002 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4003 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4004 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4005 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4006 #after the release but, definitively, the behavior shouldn't be different for related and function
4008 upd_todo.append(field)
4010 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4011 #related. See the above TODO comment for further explanations.
4012 if not isinstance(current_field, fields.related):
4013 upd_todo.append(field)
4014 if field in self._columns \
4015 and hasattr(current_field, 'selection') \
4017 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4018 if self._log_access:
4019 updates.append(('create_uid', '%s', user))
4020 updates.append(('write_uid', '%s', user))
4021 updates.append(('create_date', "(now() at time zone 'UTC')"))
4022 updates.append(('write_date', "(now() at time zone 'UTC')"))
4024 # the list of tuples used in this formatting corresponds to
4025 # tuple(field_name, format, value)
4026 # In some case, for example (id, create_date, write_date) we does not
4027 # need to read the third value of the tuple, because the real value is
4028 # encoded in the second value (the format).
4030 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4032 ', '.join('"%s"' % u[0] for u in updates),
4033 ', '.join(u[1] for u in updates)
4035 tuple([u[2] for u in updates if len(u) > 2])
4038 id_new, = cr.fetchone()
4039 recs = self.browse(cr, user, id_new, context)
4040 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4042 if self._parent_store and not context.get('defer_parent_store_computation'):
4044 self.pool._init_parent[self._name] = True
4046 parent = vals.get(self._parent_name, False)
4048 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4050 result_p = cr.fetchall()
4051 for (pleft,) in result_p:
4056 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4057 pleft_old = cr.fetchone()[0]
4060 cr.execute('select max(parent_right) from '+self._table)
4061 pleft = cr.fetchone()[0] or 0
4062 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4063 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4064 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4065 recs.invalidate_cache(['parent_left', 'parent_right'])
4067 # default element in context must be remove when call a one2many or many2many
4068 rel_context = context.copy()
4069 for c in context.items():
4070 if c[0].startswith('default_'):
4071 del rel_context[c[0]]
4074 for field in upd_todo:
4075 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4077 # check Python constraints
4078 recs._validate_fields(vals)
4080 if not context.get('no_store_function', False):
4081 result += self._store_get_values(cr, user, [id_new],
4082 list(set(vals.keys() + self._inherits.values())),
4086 for order, model_name, ids, fields2 in result:
4087 if not (model_name, ids, fields2) in done:
4088 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4089 done.append((model_name, ids, fields2))
4091 # recompute new-style fields
4092 modified_fields = list(vals)
4093 if self._log_access:
4094 modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
4095 recs.modified(modified_fields)
4098 if self._log_create and not (context and context.get('no_store_function', False)):
4099 message = self._description + \
4101 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4102 "' " + _("created.")
4103 self.log(cr, user, id_new, message, True, context=context)
4105 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4106 self.create_workflow(cr, user, [id_new], context=context)
4109 def _store_get_values(self, cr, uid, ids, fields, context):
4110 """Returns an ordered list of fields.function to call due to
4111 an update operation on ``fields`` of records with ``ids``,
4112 obtained by calling the 'store' triggers of these fields,
4113 as setup by their 'store' attribute.
4115 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4117 if fields is None: fields = []
4118 stored_functions = self.pool._store_function.get(self._name, [])
4120 # use indexed names for the details of the stored_functions:
4121 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4123 # only keep store triggers that should be triggered for the ``fields``
4125 triggers_to_compute = (
4126 f for f in stored_functions
4127 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4131 target_id_results = {}
4132 for store_trigger in triggers_to_compute:
4133 target_func_id_ = id(store_trigger[target_ids_func_])
4134 if target_func_id_ not in target_id_results:
4135 # use admin user for accessing objects having rules defined on store fields
4136 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4137 target_ids = target_id_results[target_func_id_]
4139 # the compound key must consider the priority and model name
4140 key = (store_trigger[priority_], store_trigger[model_name_])
4141 for target_id in target_ids:
4142 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4144 # Here to_compute_map looks like:
4145 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4146 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4147 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4150 # Now we need to generate the batch function calls list
4152 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4154 for ((priority,model), id_map) in to_compute_map.iteritems():
4155 trigger_ids_maps = {}
4156 # function_ids_maps =
4157 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4158 for target_id, triggers in id_map.iteritems():
4159 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4160 for triggers, target_ids in trigger_ids_maps.iteritems():
4161 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4162 [t[func_field_to_compute_] for t in triggers]))
4165 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4168 def _store_set_values(self, cr, uid, ids, fields, context):
4169 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4170 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4175 if self._log_access:
4176 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4180 field_dict.setdefault(r[0], [])
4181 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4182 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4183 for i in self.pool._store_function.get(self._name, []):
4185 up_write_date = write_date + datetime.timedelta(hours=i[5])
4186 if datetime.datetime.now() < up_write_date:
4188 field_dict[r[0]].append(i[1])
4194 if self._columns[f]._multi not in keys:
4195 keys.append(self._columns[f]._multi)
4196 todo.setdefault(self._columns[f]._multi, [])
4197 todo[self._columns[f]._multi].append(f)
4201 # use admin user for accessing objects having rules defined on store fields
4202 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4203 for id, value in result.items():
4205 for f in value.keys():
4206 if f in field_dict[id]:
4213 if self._columns[v]._type == 'many2one':
4215 value[v] = value[v][0]
4218 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4219 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4222 cr.execute('update "' + self._table + '" set ' + \
4223 ','.join(upd0) + ' where id = %s', upd1)
4227 # use admin user for accessing objects having rules defined on store fields
4228 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4229 for r in result.keys():
4231 if r in field_dict.keys():
4232 if f in field_dict[r]:
4234 for id, value in result.items():
4235 if self._columns[f]._type == 'many2one':
4240 cr.execute('update "' + self._table + '" set ' + \
4241 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4243 # invalidate the cache for the modified fields
4244 self.browse(cr, uid, ids, context).modified(fields)
4248 # TODO: ameliorer avec NULL
4249 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4250 """Computes the WHERE clause needed to implement an OpenERP domain.
4251 :param domain: the domain to compute
4253 :param active_test: whether the default filtering of records with ``active``
4254 field set to ``False`` should be applied.
4255 :return: the query expressing the given domain as provided in domain
4256 :rtype: osv.query.Query
4261 # if the object has a field named 'active', filter out all inactive
4262 # records unless they were explicitely asked for
4263 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4265 # the item[0] trick below works for domain items and '&'/'|'/'!'
4267 if not any(item[0] == 'active' for item in domain):
4268 domain.insert(0, ('active', '=', 1))
4270 domain = [('active', '=', 1)]
4273 e = expression.expression(cr, user, domain, self, context)
4274 tables = e.get_tables()
4275 where_clause, where_params = e.to_sql()
4276 where_clause = where_clause and [where_clause] or []
4278 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4280 return Query(tables, where_clause, where_params)
4282 def _check_qorder(self, word):
4283 if not regex_order.match(word):
4284 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4287 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4288 """Add what's missing in ``query`` to implement all appropriate ir.rules
4289 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4291 :param query: the current query object
4293 if uid == SUPERUSER_ID:
4296 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4297 """ :param parent_model: name of the parent model, if the added
4298 clause comes from a parent model
4302 # as inherited rules are being applied, we need to add the missing JOIN
4303 # to reach the parent table (if it was not JOINed yet in the query)
4304 parent_alias = self._inherits_join_add(self, parent_model, query)
4305 # inherited rules are applied on the external table -> need to get the alias and replace
4306 parent_table = self.pool[parent_model]._table
4307 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4308 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4310 for table in added_tables:
4311 # table is just a table name -> switch to the full alias
4312 if table == '"%s"' % parent_table:
4313 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4314 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4316 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4317 added_tables = new_tables
4318 query.where_clause += added_clause
4319 query.where_clause_params += added_params
4320 for table in added_tables:
4321 if table not in query.tables:
4322 query.tables.append(table)
4326 # apply main rules on the object
4327 rule_obj = self.pool.get('ir.rule')
4328 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4329 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4331 # apply ir.rules from the parents (through _inherits)
4332 for inherited_model in self._inherits:
4333 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4334 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4335 parent_model=inherited_model)
4337 def _generate_m2o_order_by(self, order_field, query):
4339 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4340 either native m2o fields or function/related fields that are stored, including
4341 intermediate JOINs for inheritance if required.
4343 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4345 if order_field not in self._columns and order_field in self._inherit_fields:
4346 # also add missing joins for reaching the table containing the m2o field
4347 qualified_field = self._inherits_join_calc(order_field, query)
4348 order_field_column = self._inherit_fields[order_field][2]
4350 qualified_field = '"%s"."%s"' % (self._table, order_field)
4351 order_field_column = self._columns[order_field]
4353 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4354 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4355 _logger.debug("Many2one function/related fields must be stored " \
4356 "to be used as ordering fields! Ignoring sorting for %s.%s",
4357 self._name, order_field)
4360 # figure out the applicable order_by for the m2o
4361 dest_model = self.pool[order_field_column._obj]
4362 m2o_order = dest_model._order
4363 if not regex_order.match(m2o_order):
4364 # _order is complex, can't use it here, so we default to _rec_name
4365 m2o_order = dest_model._rec_name
4367 # extract the field names, to be able to qualify them and add desc/asc
4369 for order_part in m2o_order.split(","):
4370 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4371 m2o_order = m2o_order_list
4373 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4374 # as we don't want to exclude results that have NULL values for the m2o
4375 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4376 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4377 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4378 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4380 def _generate_order_by(self, order_spec, query):
4382 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4383 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4385 :raise" except_orm in case order_spec is malformed
4387 order_by_clause = ''
4388 order_spec = order_spec or self._order
4390 order_by_elements = []
4391 self._check_qorder(order_spec)
4392 for order_part in order_spec.split(','):
4393 order_split = order_part.strip().split(' ')
4394 order_field = order_split[0].strip()
4395 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4397 if order_field == 'id':
4398 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4399 elif order_field in self._columns:
4400 order_column = self._columns[order_field]
4401 if order_column._classic_read:
4402 inner_clause = '"%s"."%s"' % (self._table, order_field)
4403 elif order_column._type == 'many2one':
4404 inner_clause = self._generate_m2o_order_by(order_field, query)
4406 continue # ignore non-readable or "non-joinable" fields
4407 elif order_field in self._inherit_fields:
4408 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4409 order_column = parent_obj._columns[order_field]
4410 if order_column._classic_read:
4411 inner_clause = self._inherits_join_calc(order_field, query)
4412 elif order_column._type == 'many2one':
4413 inner_clause = self._generate_m2o_order_by(order_field, query)
4415 continue # ignore non-readable or "non-joinable" fields
4417 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4419 if isinstance(inner_clause, list):
4420 for clause in inner_clause:
4421 order_by_elements.append("%s %s" % (clause, order_direction))
4423 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4424 if order_by_elements:
4425 order_by_clause = ",".join(order_by_elements)
4427 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4429 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4431 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4432 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4433 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4434 This is ok at the security level because this method is private and not callable through XML-RPC.
4436 :param access_rights_uid: optional user ID to use when checking access rights
4437 (not for ir.rules, this is only for ir.model.access)
4441 self.check_access_rights(cr, access_rights_uid or user, 'read')
4443 # For transient models, restrict acces to the current user, except for the super-user
4444 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4445 args = expression.AND(([('create_uid', '=', user)], args or []))
4447 query = self._where_calc(cr, user, args, context=context)
4448 self._apply_ir_rules(cr, user, query, 'read', context=context)
4449 order_by = self._generate_order_by(order, query)
4450 from_clause, where_clause, where_clause_params = query.get_sql()
4452 limit_str = limit and ' limit %d' % limit or ''
4453 offset_str = offset and ' offset %d' % offset or ''
4454 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4455 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4458 # /!\ the main query must be executed as a subquery, otherwise
4459 # offset and limit apply to the result of count()!
4460 cr.execute('SELECT count(*) FROM (%s) AS count' % query_str, where_clause_params)
4464 cr.execute(query_str, where_clause_params)
4467 # TDE note: with auto_join, we could have several lines about the same result
4468 # i.e. a lead with several unread messages; we uniquify the result using
4469 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4470 def _uniquify_list(seq):
4472 return [x for x in seq if x not in seen and not seen.add(x)]
4474 return _uniquify_list([x[0] for x in res])
4476 # returns the different values ever entered for one field
4477 # this is used, for example, in the client when the user hits enter on
4479 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4482 if field in self._inherit_fields:
4483 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4485 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4487 def copy_data(self, cr, uid, id, default=None, context=None):
4489 Copy given record's data with all its fields values
4491 :param cr: database cursor
4492 :param uid: current user id
4493 :param id: id of the record to copy
4494 :param default: field values to override in the original values of the copied record
4495 :type default: dictionary
4496 :param context: context arguments, like lang, time zone
4497 :type context: dictionary
4498 :return: dictionary containing all the field values
4504 # avoid recursion through already copied records in case of circular relationship
4505 seen_map = context.setdefault('__copy_data_seen', {})
4506 if id in seen_map.setdefault(self._name, []):
4508 seen_map[self._name].append(id)
4512 if 'state' not in default:
4513 if 'state' in self._defaults:
4514 if callable(self._defaults['state']):
4515 default['state'] = self._defaults['state'](self, cr, uid, context)
4517 default['state'] = self._defaults['state']
4519 # build a black list of fields that should not be copied
4520 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4521 def blacklist_given_fields(obj):
4522 # blacklist the fields that are given by inheritance
4523 for other, field_to_other in obj._inherits.items():
4524 blacklist.add(field_to_other)
4525 if field_to_other in default:
4526 # all the fields of 'other' are given by the record: default[field_to_other],
4527 # except the ones redefined in self
4528 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4530 blacklist_given_fields(self.pool[other])
4531 # blacklist deprecated fields
4532 for name, field in obj._columns.items():
4533 if field.deprecated:
4536 blacklist_given_fields(self)
4539 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4542 if f not in blacklist)
4544 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4548 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4551 for f, colinfo in fields_to_copy.iteritems():
4552 field = colinfo.column
4553 if field._type == 'many2one':
4554 res[f] = data[f] and data[f][0]
4555 elif field._type == 'one2many':
4556 other = self.pool[field._obj]
4557 # duplicate following the order of the ids because we'll rely on
4558 # it later for copying translations in copy_translation()!
4559 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4560 # the lines are duplicated using the wrong (old) parent, but then
4561 # are reassigned to the correct one thanks to the (0, 0, ...)
4562 res[f] = [(0, 0, line) for line in lines if line]
4563 elif field._type == 'many2many':
4564 res[f] = [(6, 0, data[f])]
4570 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4574 # avoid recursion through already copied records in case of circular relationship
4575 seen_map = context.setdefault('__copy_translations_seen',{})
4576 if old_id in seen_map.setdefault(self._name,[]):
4578 seen_map[self._name].append(old_id)
4580 trans_obj = self.pool.get('ir.translation')
4581 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4582 fields = self.fields_get(cr, uid, context=context)
4584 for field_name, field_def in fields.items():
4585 # removing the lang to compare untranslated values
4586 context_wo_lang = dict(context, lang=None)
4587 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4588 # we must recursively copy the translations for o2o and o2m
4589 if field_def['type'] == 'one2many':
4590 target_obj = self.pool[field_def['relation']]
4591 # here we rely on the order of the ids to match the translations
4592 # as foreseen in copy_data()
4593 old_children = sorted(r.id for r in old_record[field_name])
4594 new_children = sorted(r.id for r in new_record[field_name])
4595 for (old_child, new_child) in zip(old_children, new_children):
4596 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4597 # and for translatable fields we keep them for copy
4598 elif field_def.get('translate'):
4599 if field_name in self._columns:
4600 trans_name = self._name + "," + field_name
4603 elif field_name in self._inherit_fields:
4604 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4605 # get the id of the parent record to set the translation
4606 inherit_field_name = self._inherit_fields[field_name][1]
4607 target_id = new_record[inherit_field_name].id
4608 source_id = old_record[inherit_field_name].id
4612 trans_ids = trans_obj.search(cr, uid, [
4613 ('name', '=', trans_name),
4614 ('res_id', '=', source_id)
4616 user_lang = context.get('lang')
4617 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4619 # remove source to avoid triggering _set_src
4620 del record['source']
4621 record.update({'res_id': target_id})
4622 if user_lang and user_lang == record['lang']:
4623 # 'source' to force the call to _set_src
4624 # 'value' needed if value is changed in copy(), want to see the new_value
4625 record['source'] = old_record[field_name]
4626 record['value'] = new_record[field_name]
4627 trans_obj.create(cr, uid, record, context=context)
4629 @api.returns('self', lambda value: value.id)
4630 def copy(self, cr, uid, id, default=None, context=None):
4632 Duplicate record with given id updating it with default values
4634 :param cr: database cursor
4635 :param uid: current user id
4636 :param id: id of the record to copy
4637 :param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4638 :type default: dictionary
4639 :param context: context arguments, like lang, time zone
4640 :type context: dictionary
4641 :return: id of the newly created record
4646 context = context.copy()
4647 data = self.copy_data(cr, uid, id, default, context)
4648 new_id = self.create(cr, uid, data, context)
4649 self.copy_translations(cr, uid, id, new_id, context)
4653 @api.returns('self')
4655 """ Return the subset of records in `self` that exist, and mark deleted
4656 records as such in cache. It can be used as a test on records::
4661 By convention, new records are returned as existing.
4663 ids = filter(None, self._ids) # ids to check in database
4666 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4667 self._cr.execute(query, (ids,))
4668 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4669 [id for id in self._ids if not id]) # new ids
4670 existing = self.browse(ids)
4671 if len(existing) < len(self):
4672 # mark missing records in cache with a failed value
4673 exc = MissingError(_("Record does not exist or has been deleted."))
4674 (self - existing)._cache.update(FailedValue(exc))
4677 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4678 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4680 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4681 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4682 return self._check_recursion(cr, uid, ids, context, parent)
4684 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4686 Verifies that there is no loop in a hierarchical structure of records,
4687 by following the parent relationship using the **parent** field until a loop
4688 is detected or until a top-level record is found.
4690 :param cr: database cursor
4691 :param uid: current user id
4692 :param ids: list of ids of records to check
4693 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4694 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4697 parent = self._parent_name
4699 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4700 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4703 while current_id is not None:
4704 cr.execute(query, (current_id,))
4705 result = cr.fetchone()
4706 current_id = result[0] if result else None
4707 if current_id == id:
4711 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4713 Verifies that there is no loop in a hierarchical structure of records,
4714 by following the parent relationship using the **parent** field until a loop
4715 is detected or until a top-level record is found.
4717 :param cr: database cursor
4718 :param uid: current user id
4719 :param ids: list of ids of records to check
4720 :param field_name: field to check
4721 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4724 field = self._all_columns.get(field_name)
4725 field = field.column if field else None
4726 if not field or field._type != 'many2many' or field._obj != self._name:
4727 # field must be a many2many on itself
4728 raise ValueError('invalid field_name: %r' % (field_name,))
4730 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4734 for i in range(0, len(ids_parent), cr.IN_MAX):
4736 sub_ids_parent = ids_parent[i:j]
4737 cr.execute(query, (tuple(sub_ids_parent),))
4738 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4739 ids_parent = ids_parent2
4740 for i in ids_parent:
4745 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4746 """Retrieve the External ID(s) of any database record.
4748 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4750 :return: map of ids to the list of their fully qualified External IDs
4751 in the form ``module.key``, or an empty list when there's no External
4752 ID for a record, e.g.::
4754 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4757 ir_model_data = self.pool.get('ir.model.data')
4758 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4759 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4762 # can't use dict.fromkeys() as the list would be shared!
4764 for record in data_results:
4765 result[record['res_id']].append('%(module)s.%(name)s' % record)
4768 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4769 """Retrieve the External ID of any database record, if there
4770 is one. This method works as a possible implementation
4771 for a function field, to be able to add it to any
4772 model object easily, referencing it as ``Model.get_external_id``.
4774 When multiple External IDs exist for a record, only one
4775 of them is returned (randomly).
4777 :return: map of ids to their fully qualified XML ID,
4778 defaulting to an empty string when there's none
4779 (to be usable as a function field),
4782 { 'id': 'module.ext_id',
4785 results = self._get_xml_ids(cr, uid, ids)
4786 for k, v in results.iteritems():
4793 # backwards compatibility
4794 get_xml_id = get_external_id
4795 _get_xml_ids = _get_external_ids
4797 def print_report(self, cr, uid, ids, name, data, context=None):
4799 Render the report `name` for the given IDs. The report must be defined
4800 for this model, not another.
4802 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4803 assert self._name == report.table
4804 return report.create(cr, uid, ids, data, context)
4808 def is_transient(cls):
4809 """ Return whether the model is transient.
4811 See :class:`TransientModel`.
4814 return cls._transient
4816 def _transient_clean_rows_older_than(self, cr, seconds):
4817 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4818 # Never delete rows used in last 5 minutes
4819 seconds = max(seconds, 300)
4820 query = ("SELECT id FROM " + self._table + " WHERE"
4821 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4822 " < ((now() at time zone 'UTC') - interval %s)")
4823 cr.execute(query, ("%s seconds" % seconds,))
4824 ids = [x[0] for x in cr.fetchall()]
4825 self.unlink(cr, SUPERUSER_ID, ids)
4827 def _transient_clean_old_rows(self, cr, max_count):
4828 # Check how many rows we have in the table
4829 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4831 if res[0][0] <= max_count:
4832 return # max not reached, nothing to do
4833 self._transient_clean_rows_older_than(cr, 300)
4835 def _transient_vacuum(self, cr, uid, force=False):
4836 """Clean the transient records.
4838 This unlinks old records from the transient model tables whenever the
4839 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4840 Actual cleaning will happen only once every "_transient_check_time" calls.
4841 This means this method can be called frequently called (e.g. whenever
4842 a new record is created).
4843 Example with both max_hours and max_count active:
4844 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4845 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4846 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4847 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4848 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4849 would immediately cause the maximum to be reached again.
4850 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4852 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4853 _transient_check_time = 20 # arbitrary limit on vacuum executions
4854 self._transient_check_count += 1
4855 if not force and (self._transient_check_count < _transient_check_time):
4856 return True # no vacuum cleaning this time
4857 self._transient_check_count = 0
4859 # Age-based expiration
4860 if self._transient_max_hours:
4861 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4863 # Count-based expiration
4864 if self._transient_max_count:
4865 self._transient_clean_old_rows(cr, self._transient_max_count)
4869 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4870 """ Serializes one2many and many2many commands into record dictionaries
4871 (as if all the records came from the database via a read()). This
4872 method is aimed at onchange methods on one2many and many2many fields.
4874 Because commands might be creation commands, not all record dicts
4875 will contain an ``id`` field. Commands matching an existing record
4876 will have an ``id``.
4878 :param field_name: name of the one2many or many2many field matching the commands
4879 :type field_name: str
4880 :param commands: one2many or many2many commands to execute on ``field_name``
4881 :type commands: list((int|False, int|False, dict|False))
4882 :param fields: list of fields to read from the database, when applicable
4883 :type fields: list(str)
4884 :returns: records in a shape similar to that returned by ``read()``
4885 (except records may be missing the ``id`` field if they don't exist in db)
4888 result = [] # result (list of dict)
4889 record_ids = [] # ids of records to read
4890 updates = {} # {id: dict} of updates on particular records
4892 for command in commands or []:
4893 if not isinstance(command, (list, tuple)):
4894 record_ids.append(command)
4895 elif command[0] == 0:
4896 result.append(command[2])
4897 elif command[0] == 1:
4898 record_ids.append(command[1])
4899 updates.setdefault(command[1], {}).update(command[2])
4900 elif command[0] in (2, 3):
4901 record_ids = [id for id in record_ids if id != command[1]]
4902 elif command[0] == 4:
4903 record_ids.append(command[1])
4904 elif command[0] == 5:
4905 result, record_ids = [], []
4906 elif command[0] == 6:
4907 result, record_ids = [], list(command[2])
4909 # read the records and apply the updates
4910 other_model = self.pool[self._all_columns[field_name].column._obj]
4911 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4912 record.update(updates.get(record['id'], {}))
4913 result.append(record)
4917 # for backward compatibility
4918 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
4920 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
4922 Performs a ``search()`` followed by a ``read()``.
4924 :param cr: database cursor
4925 :param user: current user id
4926 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
4927 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
4928 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
4929 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
4930 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
4931 :param context: context arguments.
4932 :return: List of dictionaries containing the asked fields.
4933 :rtype: List of dictionaries.
4936 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
4940 if fields and fields == ['id']:
4941 # shortcut read if we only want the ids
4942 return [{'id': id} for id in record_ids]
4944 # read() ignores active_test, but it would forward it to any downstream search call
4945 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
4946 # was presumably only meant for the main search().
4947 # TODO: Move this to read() directly?
4948 read_ctx = dict(context or {})
4949 read_ctx.pop('active_test', None)
4951 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
4952 if len(result) <= 1:
4956 index = dict((r['id'], r) for r in result)
4957 return [index[x] for x in record_ids if x in index]
4959 def _register_hook(self, cr):
4960 """ stuff to do right after the registry is built """
4963 def _patch_method(self, name, method):
4964 """ Monkey-patch a method for all instances of this model. This replaces
4965 the method called `name` by `method` in `self`'s class.
4966 The original method is then accessible via ``method.origin``, and it
4967 can be restored with :meth:`~._revert_method`.
4972 def do_write(self, values):
4973 # do stuff, and call the original method
4974 return do_write.origin(self, values)
4976 # patch method write of model
4977 model._patch_method('write', do_write)
4979 # this will call do_write
4980 records = model.search([...])
4983 # restore the original method
4984 model._revert_method('write')
4987 origin = getattr(cls, name)
4988 method.origin = origin
4989 # propagate decorators from origin to method, and apply api decorator
4990 wrapped = api.guess(api.propagate(origin, method))
4991 wrapped.origin = origin
4992 setattr(cls, name, wrapped)
4994 def _revert_method(self, name):
4995 """ Revert the original method of `self` called `name`.
4996 See :meth:`~._patch_method`.
4999 method = getattr(cls, name)
5000 setattr(cls, name, method.origin)
5005 # An instance represents an ordered collection of records in a given
5006 # execution environment. The instance object refers to the environment, and
5007 # the records themselves are represented by their cache dictionary. The 'id'
5008 # of each record is found in its corresponding cache dictionary.
5010 # This design has the following advantages:
5011 # - cache access is direct and thus fast;
5012 # - one can consider records without an 'id' (see new records);
5013 # - the global cache is only an index to "resolve" a record 'id'.
5017 def _browse(cls, env, ids):
5018 """ Create an instance attached to `env`; `ids` is a tuple of record
5021 records = object.__new__(cls)
5024 env.prefetch[cls._name].update(ids)
5028 def browse(self, arg=None):
5029 """ Return an instance corresponding to `arg` and attached to
5030 `self.env`; `arg` is either a record id, or a collection of record ids.
5032 ids = _normalize_ids(arg)
5033 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5034 return self._browse(self.env, ids)
5037 def browse(self, cr, uid, arg=None, context=None):
5038 ids = _normalize_ids(arg)
5039 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5040 return self._browse(Environment(cr, uid, context or {}), ids)
5043 # Internal properties, for manipulating the instance's implementation
5048 """ Return the list of non-false record ids of this instance. """
5049 return filter(None, list(self._ids))
5051 # backward-compatibility with former browse records
5052 _cr = property(lambda self: self.env.cr)
5053 _uid = property(lambda self: self.env.uid)
5054 _context = property(lambda self: self.env.context)
5057 # Conversion methods
5060 def ensure_one(self):
5061 """ Return `self` if it is a singleton instance, otherwise raise an
5066 raise except_orm("ValueError", "Expected singleton: %s" % self)
5068 def with_env(self, env):
5069 """ Return an instance equivalent to `self` attached to `env`.
5071 return self._browse(env, self._ids)
5073 def sudo(self, user=SUPERUSER_ID):
5074 """ Return an instance equivalent to `self` attached to an environment
5075 based on `self.env` with the given `user`.
5077 return self.with_env(self.env(user=user))
5079 def with_context(self, *args, **kwargs):
5080 """ Return an instance equivalent to `self` attached to an environment
5081 based on `self.env` with another context. The context is given by
5082 `self._context` or the positional argument if given, and modified by
5085 context = dict(args[0] if args else self._context, **kwargs)
5086 return self.with_env(self.env(context=context))
5088 def _convert_to_cache(self, values):
5089 """ Convert the `values` dictionary into cached values. """
5090 fields = self._fields
5092 name: fields[name].convert_to_cache(value, self.env)
5093 for name, value in values.iteritems()
5097 def _convert_to_write(self, values):
5098 """ Convert the `values` dictionary into the format of :meth:`write`. """
5099 fields = self._fields
5101 (name, fields[name].convert_to_write(value))
5102 for name, value in values.iteritems()
5103 if name in self._fields
5107 # Record traversal and update
5110 def _mapped_func(self, func):
5111 """ Apply function `func` on all records in `self`, and return the
5112 result as a list or a recordset (if `func` return recordsets).
5114 vals = [func(rec) for rec in self]
5115 val0 = vals[0] if vals else func(self)
5116 if isinstance(val0, BaseModel):
5117 return reduce(operator.or_, vals, val0)
5120 def mapped(self, func):
5121 """ Apply `func` on all records in `self`, and return the result as a
5122 list or a recordset (if `func` return recordsets). In the latter
5123 case, the order of the returned recordset is arbritrary.
5125 :param func: a function or a dot-separated sequence of field names
5127 if isinstance(func, basestring):
5129 for name in func.split('.'):
5130 recs = recs._mapped_func(operator.itemgetter(name))
5133 return self._mapped_func(func)
5135 def _mapped_cache(self, name_seq):
5136 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5137 field names, and only cached values are used.
5140 for name in name_seq.split('.'):
5141 field = recs._fields[name]
5142 null = field.null(self.env)
5143 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5146 def filtered(self, func):
5147 """ Select the records in `self` such that `func(rec)` is true, and
5148 return them as a recordset.
5150 :param func: a function or a dot-separated sequence of field names
5152 if isinstance(func, basestring):
5154 func = lambda rec: filter(None, rec.mapped(name))
5155 return self.browse([rec.id for rec in self if func(rec)])
5157 def sorted(self, key=None):
5158 """ Return the recordset `self` ordered by `key` """
5160 return self.search([('id', 'in', self.ids)])
5162 return self.browse(map(int, sorted(self, key=key)))
5164 def update(self, values):
5165 """ Update record `self[0]` with `values`. """
5166 for name, value in values.iteritems():
5170 # New records - represent records that do not exist in the database yet;
5171 # they are used to compute default values and perform onchanges.
5175 def new(self, values={}):
5176 """ Return a new record instance attached to `self.env`, and
5177 initialized with the `values` dictionary. Such a record does not
5178 exist in the database.
5180 record = self.browse([NewId()])
5181 record._cache.update(self._convert_to_cache(values))
5183 if record.env.in_onchange:
5184 # The cache update does not set inverse fields, so do it manually.
5185 # This is useful for computing a function field on secondary
5186 # records, if that field depends on the main record.
5188 field = self._fields.get(name)
5189 if field and field.inverse_field:
5190 field.inverse_field._update(record[name], record)
5195 # Dirty flag, to mark records modified (in draft mode)
5200 """ Return whether any record in `self` is dirty. """
5201 dirty = self.env.dirty
5202 return any(record in dirty for record in self)
5205 def _dirty(self, value):
5206 """ Mark the records in `self` as dirty. """
5208 map(self.env.dirty.add, self)
5210 map(self.env.dirty.discard, self)
5216 def __nonzero__(self):
5217 """ Test whether `self` is nonempty. """
5218 return bool(getattr(self, '_ids', True))
5221 """ Return the size of `self`. """
5222 return len(self._ids)
5225 """ Return an iterator over `self`. """
5226 for id in self._ids:
5227 yield self._browse(self.env, (id,))
5229 def __contains__(self, item):
5230 """ Test whether `item` is a subset of `self` or a field name. """
5231 if isinstance(item, BaseModel):
5232 if self._name == item._name:
5233 return set(item._ids) <= set(self._ids)
5234 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5235 if isinstance(item, basestring):
5236 return item in self._fields
5237 return item in self.ids
5239 def __add__(self, other):
5240 """ Return the concatenation of two recordsets. """
5241 if not isinstance(other, BaseModel) or self._name != other._name:
5242 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5243 return self.browse(self._ids + other._ids)
5245 def __sub__(self, other):
5246 """ Return the recordset of all the records in `self` that are not in `other`. """
5247 if not isinstance(other, BaseModel) or self._name != other._name:
5248 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5249 other_ids = set(other._ids)
5250 return self.browse([id for id in self._ids if id not in other_ids])
5252 def __and__(self, other):
5253 """ Return the intersection of two recordsets.
5254 Note that recordset order is not preserved.
5256 if not isinstance(other, BaseModel) or self._name != other._name:
5257 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5258 return self.browse(set(self._ids) & set(other._ids))
5260 def __or__(self, other):
5261 """ Return the union of two recordsets.
5262 Note that recordset order is not preserved.
5264 if not isinstance(other, BaseModel) or self._name != other._name:
5265 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5266 return self.browse(set(self._ids) | set(other._ids))
5268 def __eq__(self, other):
5269 """ Test whether two recordsets are equivalent (up to reordering). """
5270 if not isinstance(other, BaseModel):
5272 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5274 return self._name == other._name and set(self._ids) == set(other._ids)
5276 def __ne__(self, other):
5277 return not self == other
5279 def __lt__(self, other):
5280 if not isinstance(other, BaseModel) or self._name != other._name:
5281 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5282 return set(self._ids) < set(other._ids)
5284 def __le__(self, other):
5285 if not isinstance(other, BaseModel) or self._name != other._name:
5286 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5287 return set(self._ids) <= set(other._ids)
5289 def __gt__(self, other):
5290 if not isinstance(other, BaseModel) or self._name != other._name:
5291 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5292 return set(self._ids) > set(other._ids)
5294 def __ge__(self, other):
5295 if not isinstance(other, BaseModel) or self._name != other._name:
5296 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5297 return set(self._ids) >= set(other._ids)
5303 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5305 def __unicode__(self):
5306 return unicode(str(self))
5311 if hasattr(self, '_ids'):
5312 return hash((self._name, frozenset(self._ids)))
5314 return hash(self._name)
5316 def __getitem__(self, key):
5317 """ If `key` is an integer or a slice, return the corresponding record
5318 selection as an instance (attached to `self.env`).
5319 Otherwise read the field `key` of the first record in `self`.
5323 inst = model.search(dom) # inst is a recordset
5324 r4 = inst[3] # fourth record in inst
5325 rs = inst[10:20] # subset of inst
5326 nm = rs['name'] # name of first record in inst
5328 if isinstance(key, basestring):
5329 # important: one must call the field's getter
5330 return self._fields[key].__get__(self, type(self))
5331 elif isinstance(key, slice):
5332 return self._browse(self.env, self._ids[key])
5334 return self._browse(self.env, (self._ids[key],))
5336 def __setitem__(self, key, value):
5337 """ Assign the field `key` to `value` in record `self`. """
5338 # important: one must call the field's setter
5339 return self._fields[key].__set__(self, value)
5342 # Cache and recomputation management
5347 """ Return the cache of `self`, mapping field names to values. """
5348 return RecordCache(self)
5351 def _in_cache_without(self, field):
5352 """ Make sure `self` is present in cache (for prefetching), and return
5353 the records of model `self` in cache that have no value for `field`
5354 (:class:`Field` instance).
5357 prefetch_ids = env.prefetch[self._name]
5358 prefetch_ids.update(self._ids)
5359 ids = filter(None, prefetch_ids - set(env.cache[field]))
5360 return self.browse(ids)
5364 """ Clear the records cache.
5367 The record cache is automatically invalidated.
5369 self.invalidate_cache()
5372 def invalidate_cache(self, fnames=None, ids=None):
5373 """ Invalidate the record caches after some records have been modified.
5374 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5376 :param fnames: the list of modified fields, or ``None`` for all fields
5377 :param ids: the list of modified record ids, or ``None`` for all
5381 return self.env.invalidate_all()
5382 fields = self._fields.values()
5384 fields = map(self._fields.__getitem__, fnames)
5386 # invalidate fields and inverse fields, too
5387 spec = [(f, ids) for f in fields] + \
5388 [(f.inverse_field, None) for f in fields if f.inverse_field]
5389 self.env.invalidate(spec)
5392 def modified(self, fnames):
5393 """ Notify that fields have been modified on `self`. This invalidates
5394 the cache, and prepares the recomputation of stored function fields
5395 (new-style fields only).
5397 :param fnames: iterable of field names that have been modified on
5400 # each field knows what to invalidate and recompute
5402 for fname in fnames:
5403 spec += self._fields[fname].modified(self)
5407 for env in self.env.all
5408 for field in env.cache
5410 # invalidate non-stored fields.function which are currently cached
5411 spec += [(f, None) for f in self.pool.pure_function_fields
5412 if f in cached_fields]
5414 self.env.invalidate(spec)
5416 def _recompute_check(self, field):
5417 """ If `field` must be recomputed on some record in `self`, return the
5418 corresponding records that must be recomputed.
5420 for env in [self.env] + list(iter(self.env.all)):
5421 if env.todo.get(field) and env.todo[field] & self:
5422 return env.todo[field]
5424 def _recompute_todo(self, field):
5425 """ Mark `field` to be recomputed. """
5426 todo = self.env.todo
5427 todo[field] = (todo.get(field) or self.browse()) | self
5429 def _recompute_done(self, field):
5430 """ Mark `field` as being recomputed. """
5431 todo = self.env.todo
5433 recs = todo.pop(field) - self
5438 def recompute(self):
5439 """ Recompute stored function fields. The fields and records to
5440 recompute have been determined by method :meth:`modified`.
5442 for env in list(iter(self.env.all)):
5444 field, recs = next(env.todo.iteritems())
5445 # evaluate the fields to recompute, and save them to database
5446 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5448 values = rec._convert_to_write({
5449 f.name: rec[f.name] for f in field.computed_fields
5452 except MissingError:
5454 # mark the computed fields as done
5455 map(recs._recompute_done, field.computed_fields)
5458 # Generic onchange method
5461 def _has_onchange(self, field, other_fields):
5462 """ Return whether `field` should trigger an onchange event in the
5463 presence of `other_fields`.
5465 # test whether self has an onchange method for field, or field is a
5466 # dependency of any field in other_fields
5467 return field.name in self._onchange_methods or \
5468 any(dep in other_fields for dep in field.dependents)
5471 def _onchange_spec(self, view_info=None):
5472 """ Return the onchange spec from a view description; if not given, the
5473 result of ``self.fields_view_get()`` is used.
5477 # for traversing the XML arch and populating result
5478 def process(node, info, prefix):
5479 if node.tag == 'field':
5480 name = node.attrib['name']
5481 names = "%s.%s" % (prefix, name) if prefix else name
5482 if not result.get(names):
5483 result[names] = node.attrib.get('on_change')
5484 # traverse the subviews included in relational fields
5485 for subinfo in info['fields'][name].get('views', {}).itervalues():
5486 process(etree.fromstring(subinfo['arch']), subinfo, names)
5489 process(child, info, prefix)
5491 if view_info is None:
5492 view_info = self.fields_view_get()
5493 process(etree.fromstring(view_info['arch']), view_info, '')
5496 def _onchange_eval(self, field_name, onchange, result):
5497 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5498 on record `self`. Value assignments are applied on `self`, while
5499 domain and warning messages are put in dictionary `result`.
5501 onchange = onchange.strip()
5504 if onchange in ("1", "true"):
5505 for method in self._onchange_methods.get(field_name, ()):
5506 method_res = method(self)
5509 if 'domain' in method_res:
5510 result.setdefault('domain', {}).update(method_res['domain'])
5511 if 'warning' in method_res:
5512 result['warning'] = method_res['warning']
5516 match = onchange_v7.match(onchange)
5518 method, params = match.groups()
5520 # evaluate params -> tuple
5521 global_vars = {'context': self._context, 'uid': self._uid}
5522 if self._context.get('field_parent'):
5523 class RawRecord(object):
5524 def __init__(self, record):
5525 self._record = record
5526 def __getattr__(self, name):
5527 field = self._record._fields[name]
5528 value = self._record[name]
5529 return field.convert_to_onchange(value)
5530 record = self[self._context['field_parent']]
5531 global_vars['parent'] = RawRecord(record)
5533 key: self._fields[key].convert_to_onchange(val)
5534 for key, val in self._cache.iteritems()
5536 params = eval("[%s]" % params, global_vars, field_vars)
5538 # call onchange method
5539 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5540 method_res = getattr(self._model, method)(*args)
5541 if not isinstance(method_res, dict):
5543 if 'value' in method_res:
5544 method_res['value'].pop('id', None)
5545 self.update(self._convert_to_cache(method_res['value']))
5546 if 'domain' in method_res:
5547 result.setdefault('domain', {}).update(method_res['domain'])
5548 if 'warning' in method_res:
5549 result['warning'] = method_res['warning']
5552 def onchange(self, values, field_name, field_onchange):
5553 """ Perform an onchange on the given field.
5555 :param values: dictionary mapping field names to values, giving the
5556 current state of modification
5557 :param field_name: name of the modified field_name
5558 :param field_onchange: dictionary mapping field names to their
5563 if field_name and field_name not in self._fields:
5566 # determine subfields for field.convert_to_write() below
5568 subfields = defaultdict(set)
5569 for dotname in field_onchange:
5571 secondary.append(dotname)
5572 name, subname = dotname.split('.')
5573 subfields[name].add(subname)
5575 # create a new record with values, and attach `self` to it
5576 with env.do_in_onchange():
5577 record = self.new(values)
5578 values = dict(record._cache)
5579 # attach `self` with a different context (for cache consistency)
5580 record._origin = self.with_context(__onchange=True)
5582 # determine which field should be triggered an onchange
5583 todo = set([field_name]) if field_name else set(values)
5586 # dummy assignment: trigger invalidations on the record
5588 record[name] = record[name]
5590 result = {'value': {}}
5598 with env.do_in_onchange():
5599 # apply field-specific onchange methods
5600 if field_onchange.get(name):
5601 record._onchange_eval(name, field_onchange[name], result)
5603 # force re-evaluation of function fields on secondary records
5604 for field_seq in secondary:
5605 record.mapped(field_seq)
5607 # determine which fields have been modified
5608 for name, oldval in values.iteritems():
5609 newval = record[name]
5610 if newval != oldval or getattr(newval, '_dirty', False):
5611 field = self._fields[name]
5612 result['value'][name] = field.convert_to_write(
5613 newval, record._origin, subfields[name],
5617 # At the moment, the client does not support updates on a *2many field
5618 # while this one is modified by the user.
5619 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5620 result['value'].pop(field_name, None)
5625 class RecordCache(MutableMapping):
5626 """ Implements a proxy dictionary to read/update the cache of a record.
5627 Upon iteration, it looks like a dictionary mapping field names to
5628 values. However, fields may be used as keys as well.
5630 def __init__(self, records):
5631 self._recs = records
5633 def __contains__(self, field):
5634 """ Return whether `records[0]` has a value for `field` in cache. """
5635 if isinstance(field, basestring):
5636 field = self._recs._fields[field]
5637 return self._recs.id in self._recs.env.cache[field]
5639 def __getitem__(self, field):
5640 """ Return the cached value of `field` for `records[0]`. """
5641 if isinstance(field, basestring):
5642 field = self._recs._fields[field]
5643 value = self._recs.env.cache[field][self._recs.id]
5644 return value.get() if isinstance(value, SpecialValue) else value
5646 def __setitem__(self, field, value):
5647 """ Assign the cached value of `field` for all records in `records`. """
5648 if isinstance(field, basestring):
5649 field = self._recs._fields[field]
5650 values = dict.fromkeys(self._recs._ids, value)
5651 self._recs.env.cache[field].update(values)
5653 def update(self, *args, **kwargs):
5654 """ Update the cache of all records in `records`. If the argument is a
5655 `SpecialValue`, update all fields (except "magic" columns).
5657 if args and isinstance(args[0], SpecialValue):
5658 values = dict.fromkeys(self._recs._ids, args[0])
5659 for name, field in self._recs._fields.iteritems():
5661 self._recs.env.cache[field].update(values)
5663 return super(RecordCache, self).update(*args, **kwargs)
5665 def __delitem__(self, field):
5666 """ Remove the cached value of `field` for all `records`. """
5667 if isinstance(field, basestring):
5668 field = self._recs._fields[field]
5669 field_cache = self._recs.env.cache[field]
5670 for id in self._recs._ids:
5671 field_cache.pop(id, None)
5674 """ Iterate over the field names with a regular value in cache. """
5675 cache, id = self._recs.env.cache, self._recs.id
5676 dummy = SpecialValue(None)
5677 for name, field in self._recs._fields.iteritems():
5678 if name not in MAGIC_COLUMNS and \
5679 not isinstance(cache[field].get(id, dummy), SpecialValue):
5683 """ Return the number of fields with a regular value in cache. """
5684 return sum(1 for name in self)
5686 class Model(BaseModel):
5687 """Main super-class for regular database-persisted OpenERP models.
5689 OpenERP models are created by inheriting from this class::
5694 The system will later instantiate the class once per database (on
5695 which the class' module is installed).
5698 _register = False # not visible in ORM registry, meant to be python-inherited only
5699 _transient = False # True in a TransientModel
5701 class TransientModel(BaseModel):
5702 """Model super-class for transient records, meant to be temporarily
5703 persisted, and regularly vaccuum-cleaned.
5705 A TransientModel has a simplified access rights management,
5706 all users can create new records, and may only access the
5707 records they created. The super-user has unrestricted access
5708 to all TransientModel records.
5711 _register = False # not visible in ORM registry, meant to be python-inherited only
5714 class AbstractModel(BaseModel):
5715 """Abstract Model super-class for creating an abstract class meant to be
5716 inherited by regular models (Models or TransientModels) but not meant to
5717 be usable on its own, or persisted.
5719 Technical note: we don't want to make AbstractModel the super-class of
5720 Model or BaseModel because it would not make sense to put the main
5721 definition of persistence methods such as create() in it, and still we
5722 should be able to override them within an AbstractModel.
5724 _auto = False # don't create any database backend for AbstractModels
5725 _register = False # not visible in ORM registry, meant to be python-inherited only
5728 def itemgetter_tuple(items):
5729 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5730 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5735 return lambda gettable: (gettable[items[0]],)
5736 return operator.itemgetter(*items)
5738 def convert_pgerror_23502(model, fields, info, e):
5739 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5740 r'not-null constraint\n',
5742 field_name = m and m.group('field')
5743 if not m or field_name not in fields:
5744 return {'message': unicode(e)}
5745 message = _(u"Missing required value for the field '%s'.") % field_name
5746 field = fields.get(field_name)
5748 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5751 'field': field_name,
5754 def convert_pgerror_23505(model, fields, info, e):
5755 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5757 field_name = m and m.group('field')
5758 if not m or field_name not in fields:
5759 return {'message': unicode(e)}
5760 message = _(u"The value for the field '%s' already exists.") % field_name
5761 field = fields.get(field_name)
5763 message = _(u"%s This might be '%s' in the current model, or a field "
5764 u"of the same name in an o2m.") % (message, field['string'])
5767 'field': field_name,
5770 PGERROR_TO_OE = defaultdict(
5771 # shape of mapped converters
5772 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5773 # not_null_violation
5774 '23502': convert_pgerror_23502,
5775 # unique constraint error
5776 '23505': convert_pgerror_23505,
5779 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5780 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5782 Various implementations were tested on the corpus of all browse() calls
5783 performed during a full crawler run (after having installed all website_*
5784 modules) and this one was the most efficient overall.
5786 A possible bit of correctness was sacrificed by not doing any test on
5787 Iterable and just assuming that any non-atomic type was an iterable of
5792 # much of the corpus is falsy objects (empty list, tuple or set, None)
5796 # `type in set` is significantly faster (because more restrictive) than
5797 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5798 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5799 # (and looks much worse) in most cases, but over millions of calls it
5800 # does have a very minor effect.
5801 if arg.__class__ in atoms:
5806 # keep those imports here to avoid dependency cycle errors
5807 from .osv import expression
5808 from .fields import Field, SpecialValue, FailedValue
5810 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: