1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
52 from collections import defaultdict, MutableMapping
53 from inspect import getmembers
56 import dateutil.relativedelta
58 from lxml import etree
61 from . import SUPERUSER_ID
64 from .api import Environment
65 from .exceptions import except_orm, AccessError, MissingError, ValidationError
66 from .osv import fields
67 from .osv.query import Query
68 from .tools import lazy_property, ormcache
69 from .tools.config import config
70 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
71 from .tools.safe_eval import safe_eval as eval
72 from .tools.translate import _
74 _logger = logging.getLogger(__name__)
75 _schema = logging.getLogger(__name__ + '.schema')
77 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
78 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
79 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
81 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
84 def check_object_name(name):
85 """ Check if the given name is a valid openerp object name.
87 The _name attribute in osv and osv_memory object is subject to
88 some restrictions. This function returns True or False whether
89 the given name is allowed or not.
91 TODO: this is an approximation. The goal in this approximation
92 is to disallow uppercase characters (in some places, we quote
93 table/column names and in other not, which leads to this kind
96 psycopg2.ProgrammingError: relation "xxx" does not exist).
98 The same restriction should apply to both osv and osv_memory
99 objects for consistency.
102 if regex_object_name.match(name) is None:
106 def raise_on_invalid_object_name(name):
107 if not check_object_name(name):
108 msg = "The _name attribute %s is not valid." % name
110 raise except_orm('ValueError', msg)
112 POSTGRES_CONFDELTYPES = {
120 def intersect(la, lb):
121 return filter(lambda x: x in lb, la)
124 """ Test whether functions `f` and `g` are identical or have the same name """
125 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
127 def fix_import_export_id_paths(fieldname):
129 Fixes the id fields in import and exports, and splits field paths
132 :param str fieldname: name of the field to import/export
133 :return: split field name
136 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
137 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
138 return fixed_external_id.split('/')
140 def pg_varchar(size=0):
141 """ Returns the VARCHAR declaration for the provided size:
143 * If no size (or an empty or negative size is provided) return an
145 * Otherwise return a VARCHAR(n)
147 :type int size: varchar size, optional
151 if not isinstance(size, int):
152 raise TypeError("VARCHAR parameter should be an int, got %s"
155 return 'VARCHAR(%d)' % size
158 FIELDS_TO_PGTYPES = {
159 fields.boolean: 'bool',
160 fields.integer: 'int4',
164 fields.datetime: 'timestamp',
165 fields.binary: 'bytea',
166 fields.many2one: 'int4',
167 fields.serialized: 'text',
170 def get_pg_type(f, type_override=None):
172 :param fields._column f: field to get a Postgres type for
173 :param type type_override: use the provided type for dispatching instead of the field's own type
174 :returns: (postgres_identification_type, postgres_type_specification)
177 field_type = type_override or type(f)
179 if field_type in FIELDS_TO_PGTYPES:
180 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
181 elif issubclass(field_type, fields.float):
183 pg_type = ('numeric', 'NUMERIC')
185 pg_type = ('float8', 'DOUBLE PRECISION')
186 elif issubclass(field_type, (fields.char, fields.reference)):
187 pg_type = ('varchar', pg_varchar(f.size))
188 elif issubclass(field_type, fields.selection):
189 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
190 or getattr(f, 'size', None) == -1:
191 pg_type = ('int4', 'INTEGER')
193 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
194 elif issubclass(field_type, fields.function):
195 if f._type == 'selection':
196 pg_type = ('varchar', pg_varchar())
198 pg_type = get_pg_type(f, getattr(fields, f._type))
200 _logger.warning('%s type not supported!', field_type)
206 class MetaModel(api.Meta):
207 """ Metaclass for the models.
209 This class is used as the metaclass for the class :class:`BaseModel` to
210 discover the models defined in a module (without instanciating them).
211 If the automatic discovery is not needed, it is possible to set the model's
212 ``_register`` attribute to False.
216 module_to_models = {}
218 def __init__(self, name, bases, attrs):
219 if not self._register:
220 self._register = True
221 super(MetaModel, self).__init__(name, bases, attrs)
224 if not hasattr(self, '_module'):
225 # The (OpenERP) module name can be in the `openerp.addons` namespace
226 # or not. For instance, module `sale` can be imported as
227 # `openerp.addons.sale` (the right way) or `sale` (for backward
229 module_parts = self.__module__.split('.')
230 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
231 module_name = self.__module__.split('.')[2]
233 module_name = self.__module__.split('.')[0]
234 self._module = module_name
236 # Remember which models to instanciate for this module.
238 self.module_to_models.setdefault(self._module, []).append(self)
242 """ Pseudo-ids for new records. """
243 def __nonzero__(self):
246 IdType = (int, long, basestring, NewId)
249 # special columns automatically created by the ORM
250 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
251 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
253 class BaseModel(object):
254 """ Base class for OpenERP models.
256 OpenERP models are created by inheriting from this class' subclasses:
258 * :class:`Model` for regular database-persisted models
260 * :class:`TransientModel` for temporary data, stored in the database but
261 automatically vaccuumed every so often
263 * :class:`AbstractModel` for abstract super classes meant to be shared by
264 multiple inheriting model
266 The system automatically instantiates every model once per database. Those
267 instances represent the available models on each database, and depend on
268 which modules are installed on that database. The actual class of each
269 instance is built from the Python classes that create and inherit from the
272 Every model instance is a "recordset", i.e., an ordered collection of
273 records of the model. Recordsets are returned by methods like
274 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
275 explicit representation: a record is represented as a recordset of one
278 To create a class that should not be instantiated, the _register class
279 attribute may be set to False.
281 __metaclass__ = MetaModel
282 _auto = True # create database backend
283 _register = False # Set to false if the model shouldn't be automatically discovered.
290 _parent_name = 'parent_id'
291 _parent_store = False
292 _parent_order = False
298 _translate = True # set to False to disable translations export for this model
300 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
301 # to include in the _read_group, if grouped on this field
305 _transient = False # True in a TransientModel
308 # { 'parent_model': 'm2o_field', ... }
311 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
312 # model from which it is inherits'd, r is the (local) field towards m, f
313 # is the _column object itself, and n is the original (i.e. top-most)
316 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
317 # field_column_obj, origina_parent_model), ... }
320 # Mapping field name/column_info object
321 # This is similar to _inherit_fields but:
322 # 1. includes self fields,
323 # 2. uses column_info instead of a triple.
328 _sql_constraints = []
330 # model dependencies, for models backed up by sql views:
331 # {model_name: field_names, ...}
334 CONCURRENCY_CHECK_FIELD = '__last_update'
336 def log(self, cr, uid, id, message, secondary=False, context=None):
337 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
339 def view_init(self, cr, uid, fields_list, context=None):
340 """Override this method to do specific things when a view on the object is opened."""
343 def _field_create(self, cr, context=None):
344 """ Create entries in ir_model_fields for all the model's fields.
346 If necessary, also create an entry in ir_model, and if called from the
347 modules loading scheme (by receiving 'module' in the context), also
348 create entries in ir_model_data (for the model and the fields).
350 - create an entry in ir_model (if there is not already one),
351 - create an entry in ir_model_data (if there is not already one, and if
352 'module' is in the context),
353 - update ir_model_fields with the fields found in _columns
354 (TODO there is some redundancy as _columns is updated from
355 ir_model_fields in __init__).
360 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
362 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
363 model_id = cr.fetchone()[0]
364 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
366 model_id = cr.fetchone()[0]
367 if 'module' in context:
368 name_id = 'model_'+self._name.replace('.', '_')
369 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
371 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
372 (name_id, context['module'], 'ir.model', model_id)
375 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
377 for rec in cr.dictfetchall():
378 cols[rec['name']] = rec
380 ir_model_fields_obj = self.pool.get('ir.model.fields')
382 # sparse field should be created at the end, as it depends on its serialized field already existing
383 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
384 for (k, f) in model_fields:
386 'model_id': model_id,
389 'field_description': f.string,
391 'relation': f._obj or '',
392 'select_level': tools.ustr(int(f.select)),
393 'readonly': (f.readonly and 1) or 0,
394 'required': (f.required and 1) or 0,
395 'selectable': (f.selectable and 1) or 0,
396 'translate': (f.translate and 1) or 0,
397 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
398 'serialization_field_id': None,
400 if getattr(f, 'serialization_field', None):
401 # resolve link to serialization_field if specified by name
402 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
403 if not serialization_field_id:
404 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
405 vals['serialization_field_id'] = serialization_field_id[0]
407 # When its a custom field,it does not contain f.select
408 if context.get('field_state', 'base') == 'manual':
409 if context.get('field_name', '') == k:
410 vals['select_level'] = context.get('select', '0')
411 #setting value to let the problem NOT occur next time
413 vals['select_level'] = cols[k]['select_level']
416 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
417 id = cr.fetchone()[0]
419 cr.execute("""INSERT INTO ir_model_fields (
420 id, model_id, model, name, field_description, ttype,
421 relation,state,select_level,relation_field, translate, serialization_field_id
423 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
425 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
426 vals['relation'], 'base',
427 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
429 if 'module' in context:
430 name1 = 'field_' + self._table + '_' + k
431 cr.execute("select name from ir_model_data where name=%s", (name1,))
433 name1 = name1 + "_" + str(id)
434 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
435 (name1, context['module'], 'ir.model.fields', id)
438 for key, val in vals.items():
439 if cols[k][key] != vals[key]:
440 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
441 cr.execute("""UPDATE ir_model_fields SET
442 model_id=%s, field_description=%s, ttype=%s, relation=%s,
443 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
445 model=%s AND name=%s""", (
446 vals['model_id'], vals['field_description'], vals['ttype'],
448 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
451 self.invalidate_cache(cr, SUPERUSER_ID)
454 def _add_field(cls, name, field):
455 """ Add the given `field` under the given `name` in the class """
456 field.set_class_name(cls, name)
458 # add field in _fields (for reflection)
459 cls._fields[name] = field
461 # add field as an attribute, unless another kind of value already exists
462 if isinstance(getattr(cls, name, field), Field):
463 setattr(cls, name, field)
465 _logger.warning("In model %r, member %r is not a field", cls._name, name)
468 cls._columns[name] = field.to_column()
470 # remove potential column that may be overridden by field
471 cls._columns.pop(name, None)
474 def _add_magic_fields(cls):
475 """ Introduce magic fields on the current class
477 * id is a "normal" field (with a specific getter)
478 * create_uid, create_date, write_uid and write_date have become
480 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
481 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
482 to get the same structure as the previous
483 ``(now() at time zone 'UTC')::timestamp``::
485 # select (now() at time zone 'UTC')::timestamp;
487 ----------------------------
488 2013-06-18 08:30:37.292809
490 >>> str(datetime.datetime.utcnow())
491 '2013-06-18 08:31:32.821177'
493 def add(name, field):
494 """ add `field` with the given `name` if it does not exist yet """
495 if name not in cls._columns and name not in cls._fields:
496 cls._add_field(name, field)
501 # this field 'id' must override any other column or field
502 cls._add_field('id', fields.Id(automatic=True))
504 add('display_name', fields.Char(string='Display Name', automatic=True,
505 compute='_compute_display_name'))
508 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
509 add('create_date', fields.Datetime(string='Created on', automatic=True))
510 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
511 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
512 last_modified_name = 'compute_concurrency_field_with_access'
514 last_modified_name = 'compute_concurrency_field'
516 # this field must override any other column or field
517 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
518 string='Last Modified on', compute=last_modified_name, automatic=True))
521 def compute_concurrency_field(self):
522 self[self.CONCURRENCY_CHECK_FIELD] = \
523 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
526 @api.depends('create_date', 'write_date')
527 def compute_concurrency_field_with_access(self):
528 self[self.CONCURRENCY_CHECK_FIELD] = \
529 self.write_date or self.create_date or \
530 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
533 # Goal: try to apply inheritance at the instanciation level and
534 # put objects in the pool var
537 def _build_model(cls, pool, cr):
538 """ Instanciate a given model.
540 This class method instanciates the class of some model (i.e. a class
541 deriving from osv or osv_memory). The class might be the class passed
542 in argument or, if it inherits from another class, a class constructed
543 by combining the two classes.
547 # IMPORTANT: the registry contains an instance for each model. The class
548 # of each model carries inferred metadata that is shared among the
549 # model's instances for this registry, but not among registries. Hence
550 # we cannot use that "registry class" for combining model classes by
551 # inheritance, since it confuses the metadata inference process.
553 # Keep links to non-inherited constraints in cls; this is useful for
554 # instance when exporting translations
555 cls._local_constraints = cls.__dict__.get('_constraints', [])
556 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
558 # determine inherited models
559 parents = getattr(cls, '_inherit', [])
560 parents = [parents] if isinstance(parents, basestring) else (parents or [])
562 # determine the model's name
563 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
565 # determine the module that introduced the model
566 original_module = pool[name]._original_module if name in parents else cls._module
568 # build the class hierarchy for the model
569 for parent in parents:
570 if parent not in pool:
571 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
572 'You may need to add a dependency on the parent class\' module.' % (name, parent))
573 parent_model = pool[parent]
575 # do no use the class of parent_model, since that class contains
576 # inferred metadata; use its ancestor instead
577 parent_class = type(parent_model).__base__
579 # don't inherit custom fields
580 columns = dict((key, val)
581 for key, val in parent_class._columns.iteritems()
584 columns.update(cls._columns)
586 defaults = dict(parent_class._defaults)
587 defaults.update(cls._defaults)
589 inherits = dict(parent_class._inherits)
590 inherits.update(cls._inherits)
592 depends = dict(parent_class._depends)
593 for m, fs in cls._depends.iteritems():
594 depends[m] = depends.get(m, []) + fs
596 old_constraints = parent_class._constraints
597 new_constraints = cls._constraints
598 # filter out from old_constraints the ones overridden by a
599 # constraint with the same function name in new_constraints
600 constraints = new_constraints + [oldc
601 for oldc in old_constraints
602 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
603 for newc in new_constraints)
606 sql_constraints = cls._sql_constraints + \
607 parent_class._sql_constraints
613 '_defaults': defaults,
614 '_inherits': inherits,
616 '_constraints': constraints,
617 '_sql_constraints': sql_constraints,
619 cls = type(name, (cls, parent_class), attrs)
621 # introduce the "registry class" of the model;
622 # duplicate some attributes so that the ORM can modify them
626 '_columns': dict(cls._columns),
627 '_defaults': dict(cls._defaults),
628 '_inherits': dict(cls._inherits),
629 '_depends': dict(cls._depends),
630 '_constraints': list(cls._constraints),
631 '_sql_constraints': list(cls._sql_constraints),
632 '_original_module': original_module,
634 cls = type(cls._name, (cls,), attrs)
636 # float fields are registry-dependent (digit attribute); duplicate them
638 for key, col in cls._columns.items():
639 if col._type == 'float':
640 cls._columns[key] = copy.copy(col)
642 # instantiate the model, and initialize it
643 model = object.__new__(cls)
644 model.__init__(pool, cr)
648 def _init_function_fields(cls, pool, cr):
649 # initialize the list of non-stored function fields for this model
650 pool._pure_function_fields[cls._name] = []
652 # process store of low-level function fields
653 for fname, column in cls._columns.iteritems():
654 if hasattr(column, 'digits_change'):
655 column.digits_change(cr)
656 # filter out existing store about this field
657 pool._store_function[cls._name] = [
659 for stored in pool._store_function.get(cls._name, [])
660 if (stored[0], stored[1]) != (cls._name, fname)
662 if not isinstance(column, fields.function):
665 # register it on the pool for invalidation
666 pool._pure_function_fields[cls._name].append(fname)
668 # process store parameter
671 get_ids = lambda self, cr, uid, ids, c={}: ids
672 store = {cls._name: (get_ids, None, column.priority, None)}
673 for model, spec in store.iteritems():
675 (fnct, fields2, order, length) = spec
677 (fnct, fields2, order) = spec
680 raise except_orm('Error',
681 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
682 pool._store_function.setdefault(model, [])
683 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
684 if t not in pool._store_function[model]:
685 pool._store_function[model].append(t)
686 pool._store_function[model].sort(key=lambda x: x[4])
689 def _init_manual_fields(cls, pool, cr):
690 # Check whether the query is already done
691 if pool.fields_by_model is not None:
692 manual_fields = pool.fields_by_model.get(cls._name, [])
694 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
695 manual_fields = cr.dictfetchall()
697 for field in manual_fields:
698 if field['name'] in cls._columns:
701 'string': field['field_description'],
702 'required': bool(field['required']),
703 'readonly': bool(field['readonly']),
704 'domain': eval(field['domain']) if field['domain'] else None,
705 'size': field['size'] or None,
706 'ondelete': field['on_delete'],
707 'translate': (field['translate']),
710 #'select': int(field['select_level'])
712 if field['serialization_field_id']:
713 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
714 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
715 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
716 attrs.update({'relation': field['relation']})
717 cls._columns[field['name']] = fields.sparse(**attrs)
718 elif field['ttype'] == 'selection':
719 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
720 elif field['ttype'] == 'reference':
721 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
722 elif field['ttype'] == 'many2one':
723 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
724 elif field['ttype'] == 'one2many':
725 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
726 elif field['ttype'] == 'many2many':
727 _rel1 = field['relation'].replace('.', '_')
728 _rel2 = field['model'].replace('.', '_')
729 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
730 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
732 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
735 def _init_constraints_onchanges(cls):
736 # store sql constraint error messages
737 for (key, _, msg) in cls._sql_constraints:
738 cls.pool._sql_error[cls._table + '_' + key] = msg
740 # collect constraint and onchange methods
741 cls._constraint_methods = []
742 cls._onchange_methods = defaultdict(list)
743 for attr, func in getmembers(cls, callable):
744 if hasattr(func, '_constrains'):
745 if not all(name in cls._fields for name in func._constrains):
746 _logger.warning("@constrains%r parameters must be field names", func._constrains)
747 cls._constraint_methods.append(func)
748 if hasattr(func, '_onchange'):
749 if not all(name in cls._fields for name in func._onchange):
750 _logger.warning("@onchange%r parameters must be field names", func._onchange)
751 for name in func._onchange:
752 cls._onchange_methods[name].append(func)
755 # In the past, this method was registering the model class in the server.
756 # This job is now done entirely by the metaclass MetaModel.
758 # Do not create an instance here. Model instances are created by method
762 def __init__(self, pool, cr):
763 """ Initialize a model and make it part of the given registry.
765 - copy the stored fields' functions in the registry,
766 - retrieve custom fields and add them in the model,
767 - ensure there is a many2one for each _inherits'd parent,
768 - update the children's _columns,
769 - give a chance to each field to initialize itself.
774 # link the class to the registry, and update the registry
776 cls._model = self # backward compatibility
777 pool.add(cls._name, self)
779 # determine description, table, sequence and log_access
780 if not cls._description:
781 cls._description = cls._name
783 cls._table = cls._name.replace('.', '_')
784 if not cls._sequence:
785 cls._sequence = cls._table + '_id_seq'
786 if not hasattr(cls, '_log_access'):
787 # If _log_access is not specified, it is the same value as _auto.
788 cls._log_access = cls._auto
791 if cls.is_transient():
792 cls._transient_check_count = 0
793 cls._transient_max_count = config.get('osv_memory_count_limit')
794 cls._transient_max_hours = config.get('osv_memory_age_limit')
795 assert cls._log_access, \
796 "TransientModels must have log_access turned on, " \
797 "in order to implement their access rights policy"
799 # retrieve new-style fields and duplicate them (to avoid clashes with
800 # inheritance between different models)
802 for attr, field in getmembers(cls, Field.__instancecheck__):
803 if not field._origin:
804 cls._add_field(attr, field.copy())
806 # introduce magic fields
807 cls._add_magic_fields()
809 # register stuff about low-level function fields and custom fields
810 cls._init_function_fields(pool, cr)
811 cls._init_manual_fields(pool, cr)
814 cls._inherits_check()
815 cls._inherits_reload()
817 # register constraints and onchange methods
818 cls._init_constraints_onchanges()
821 for k in cls._defaults:
822 assert k in cls._fields, \
823 "Model %s has a default for nonexiting field %s" % (cls._name, k)
826 for column in cls._columns.itervalues():
831 assert cls._rec_name in cls._fields, \
832 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
833 elif 'name' in cls._fields:
834 cls._rec_name = 'name'
836 # prepare ormcache, which must be shared by all instances of the model
841 def _is_an_ordinary_table(self):
842 self.env.cr.execute("""\
846 AND relkind = %s""", [self._table, 'r'])
847 return bool(self.env.cr.fetchone())
849 def __export_xml_id(self):
850 """ Return a valid xml_id for the record `self`. """
851 if not self._is_an_ordinary_table():
853 "You can not export the column ID of model %s, because the "
854 "table %s is not an ordinary table."
855 % (self._name, self._table))
856 ir_model_data = self.sudo().env['ir.model.data']
857 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
860 return '%s.%s' % (data[0].module, data[0].name)
865 name = '%s_%s' % (self._table, self.id)
866 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
868 name = '%s_%s_%s' % (self._table, self.id, postfix)
869 ir_model_data.create({
872 'module': '__export__',
875 return '__export__.' + name
878 def __export_rows(self, fields):
879 """ Export fields of the records in `self`.
881 :param fields: list of lists of fields to traverse
882 :return: list of lists of corresponding values
886 # main line of record, initially empty
887 current = [''] * len(fields)
888 lines.append(current)
890 # list of primary fields followed by secondary field(s)
893 # process column by column
894 for i, path in enumerate(fields):
899 if name in primary_done:
903 current[i] = str(record.id)
905 current[i] = record.__export_xml_id()
907 field = record._fields[name]
910 # this part could be simpler, but it has to be done this way
911 # in order to reproduce the former behavior
912 if not isinstance(value, BaseModel):
913 current[i] = field.convert_to_export(value, self.env)
915 primary_done.append(name)
917 # This is a special case, its strange behavior is intended!
918 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
919 xml_ids = [r.__export_xml_id() for r in value]
920 current[i] = ','.join(xml_ids) or False
923 # recursively export the fields that follow name
924 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
925 lines2 = value.__export_rows(fields2)
927 # merge first line with record's main line
928 for j, val in enumerate(lines2[0]):
931 # check value of current field
933 # assign xml_ids, and forget about remaining lines
934 xml_ids = [item[1] for item in value.name_get()]
935 current[i] = ','.join(xml_ids)
937 # append the other lines at the end
945 def export_data(self, fields_to_export, raw_data=False):
946 """ Export fields for selected objects
948 :param fields_to_export: list of fields
949 :param raw_data: True to return value in native Python type
950 :rtype: dictionary with a *datas* matrix
952 This method is used when exporting data via client menu
954 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
956 self = self.with_context(export_raw_data=True)
957 return {'datas': self.__export_rows(fields_to_export)}
959 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
962 Use :meth:`~load` instead
964 Import given data in given module
966 This method is used when importing data via client menu.
968 Example of fields to import for a sale.order::
971 partner_id, (=name_search)
972 order_line/.id, (=database_id)
974 order_line/product_id/id, (=xml id)
975 order_line/price_unit,
976 order_line/product_uom_qty,
977 order_line/product_uom/id (=xml_id)
979 This method returns a 4-tuple with the following structure::
981 (return_code, errored_resource, error_message, unused)
983 * The first item is a return code, it is ``-1`` in case of
984 import error, or the last imported row number in case of success
985 * The second item contains the record data dict that failed to import
986 in case of error, otherwise it's 0
987 * The third item contains an error message string in case of error,
989 * The last item is currently unused, with no specific semantics
991 :param fields: list of fields to import
992 :param datas: data to import
993 :param mode: 'init' or 'update' for record creation
994 :param current_module: module name
995 :param noupdate: flag for record creation
996 :param filename: optional file to store partial import state for recovery
997 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
998 :rtype: (int, dict or 0, str or 0, str or 0)
1000 context = dict(context) if context is not None else {}
1001 context['_import_current_module'] = current_module
1003 fields = map(fix_import_export_id_paths, fields)
1004 ir_model_data_obj = self.pool.get('ir.model.data')
1007 if m['type'] == 'error':
1008 raise Exception(m['message'])
1010 if config.get('import_partial') and filename:
1011 with open(config.get('import_partial'), 'rb') as partial_import_file:
1012 data = pickle.load(partial_import_file)
1013 position = data.get(filename, 0)
1017 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1018 self._extract_records(cr, uid, fields, datas,
1019 context=context, log=log),
1020 context=context, log=log):
1021 ir_model_data_obj._update(cr, uid, self._name,
1022 current_module, res, mode=mode, xml_id=xml_id,
1023 noupdate=noupdate, res_id=res_id, context=context)
1024 position = info.get('rows', {}).get('to', 0) + 1
1025 if config.get('import_partial') and filename and (not (position%100)):
1026 with open(config.get('import_partial'), 'rb') as partial_import:
1027 data = pickle.load(partial_import)
1028 data[filename] = position
1029 with open(config.get('import_partial'), 'wb') as partial_import:
1030 pickle.dump(data, partial_import)
1031 if context.get('defer_parent_store_computation'):
1032 self._parent_store_compute(cr)
1034 except Exception, e:
1036 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1038 if context.get('defer_parent_store_computation'):
1039 self._parent_store_compute(cr)
1040 return position, 0, 0, 0
1042 def load(self, cr, uid, fields, data, context=None):
1044 Attempts to load the data matrix, and returns a list of ids (or
1045 ``False`` if there was an error and no id could be generated) and a
1048 The ids are those of the records created and saved (in database), in
1049 the same order they were extracted from the file. They can be passed
1050 directly to :meth:`~read`
1052 :param fields: list of fields to import, at the same index as the corresponding data
1053 :type fields: list(str)
1054 :param data: row-major matrix of data to import
1055 :type data: list(list(str))
1056 :param dict context:
1057 :returns: {ids: list(int)|False, messages: [Message]}
1059 cr.execute('SAVEPOINT model_load')
1062 fields = map(fix_import_export_id_paths, fields)
1063 ModelData = self.pool['ir.model.data'].clear_caches()
1065 fg = self.fields_get(cr, uid, context=context)
1072 for id, xid, record, info in self._convert_records(cr, uid,
1073 self._extract_records(cr, uid, fields, data,
1074 context=context, log=messages.append),
1075 context=context, log=messages.append):
1077 cr.execute('SAVEPOINT model_load_save')
1078 except psycopg2.InternalError, e:
1079 # broken transaction, exit and hope the source error was
1081 if not any(message['type'] == 'error' for message in messages):
1082 messages.append(dict(info, type='error',message=
1083 u"Unknown database error: '%s'" % e))
1086 ids.append(ModelData._update(cr, uid, self._name,
1087 current_module, record, mode=mode, xml_id=xid,
1088 noupdate=noupdate, res_id=id, context=context))
1089 cr.execute('RELEASE SAVEPOINT model_load_save')
1090 except psycopg2.Warning, e:
1091 messages.append(dict(info, type='warning', message=str(e)))
1092 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1093 except psycopg2.Error, e:
1094 messages.append(dict(
1096 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1097 # Failed to write, log to messages, rollback savepoint (to
1098 # avoid broken transaction) and keep going
1099 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1100 except Exception, e:
1101 message = (_('Unknown error during import:') +
1102 ' %s: %s' % (type(e), unicode(e)))
1103 moreinfo = _('Resolve other errors first')
1104 messages.append(dict(info, type='error',
1107 # Failed for some reason, perhaps due to invalid data supplied,
1108 # rollback savepoint and keep going
1109 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1110 if any(message['type'] == 'error' for message in messages):
1111 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1113 return {'ids': ids, 'messages': messages}
1115 def _extract_records(self, cr, uid, fields_, data,
1116 context=None, log=lambda a: None):
1117 """ Generates record dicts from the data sequence.
1119 The result is a generator of dicts mapping field names to raw
1120 (unconverted, unvalidated) values.
1122 For relational fields, if sub-fields were provided the value will be
1123 a list of sub-records
1125 The following sub-fields may be set on the record (by key):
1126 * None is the name_get for the record (to use with name_create/name_search)
1127 * "id" is the External ID for the record
1128 * ".id" is the Database ID for the record
1130 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1131 # Fake columns to avoid special cases in extractor
1132 columns[None] = fields.char('rec_name')
1133 columns['id'] = fields.char('External ID')
1134 columns['.id'] = fields.integer('Database ID')
1136 # m2o fields can't be on multiple lines so exclude them from the
1137 # is_relational field rows filter, but special-case it later on to
1138 # be handled with relational fields (as it can have subfields)
1139 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1140 get_o2m_values = itemgetter_tuple(
1141 [index for index, field in enumerate(fields_)
1142 if columns[field[0]]._type == 'one2many'])
1143 get_nono2m_values = itemgetter_tuple(
1144 [index for index, field in enumerate(fields_)
1145 if columns[field[0]]._type != 'one2many'])
1146 # Checks if the provided row has any non-empty non-relational field
1147 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1148 return any(g(row)) and not any(f(row))
1152 if index >= len(data): return
1155 # copy non-relational fields to record dict
1156 record = dict((field[0], value)
1157 for field, value in itertools.izip(fields_, row)
1158 if not is_relational(field[0]))
1160 # Get all following rows which have relational values attached to
1161 # the current record (no non-relational values)
1162 record_span = itertools.takewhile(
1163 only_o2m_values, itertools.islice(data, index + 1, None))
1164 # stitch record row back on for relational fields
1165 record_span = list(itertools.chain([row], record_span))
1166 for relfield in set(
1167 field[0] for field in fields_
1168 if is_relational(field[0])):
1169 column = columns[relfield]
1170 # FIXME: how to not use _obj without relying on fields_get?
1171 Model = self.pool[column._obj]
1173 # get only cells for this sub-field, should be strictly
1174 # non-empty, field path [None] is for name_get column
1175 indices, subfields = zip(*((index, field[1:] or [None])
1176 for index, field in enumerate(fields_)
1177 if field[0] == relfield))
1179 # return all rows which have at least one value for the
1180 # subfields of relfield
1181 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1182 record[relfield] = [subrecord
1183 for subrecord, _subinfo in Model._extract_records(
1184 cr, uid, subfields, relfield_data,
1185 context=context, log=log)]
1187 yield record, {'rows': {
1189 'to': index + len(record_span) - 1
1191 index += len(record_span)
1193 def _convert_records(self, cr, uid, records,
1194 context=None, log=lambda a: None):
1195 """ Converts records from the source iterable (recursive dicts of
1196 strings) into forms which can be written to the database (via
1197 self.create or (ir.model.data)._update)
1199 :returns: a list of triplets of (id, xid, record)
1200 :rtype: list((int|None, str|None, dict))
1202 if context is None: context = {}
1203 Converter = self.pool['ir.fields.converter']
1204 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1205 Translation = self.pool['ir.translation']
1207 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1208 context.get('lang'))
1210 for f, column in columns.iteritems())
1212 convert = Converter.for_model(cr, uid, self, context=context)
1214 def _log(base, field, exception):
1215 type = 'warning' if isinstance(exception, Warning) else 'error'
1216 # logs the logical (not human-readable) field name for automated
1217 # processing of response, but injects human readable in message
1218 record = dict(base, type=type, field=field,
1219 message=unicode(exception.args[0]) % base)
1220 if len(exception.args) > 1 and exception.args[1]:
1221 record.update(exception.args[1])
1224 stream = CountingStream(records)
1225 for record, extras in stream:
1228 # name_get/name_create
1229 if None in record: pass
1236 dbid = int(record['.id'])
1238 # in case of overridden id column
1239 dbid = record['.id']
1240 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1243 record=stream.index,
1245 message=_(u"Unknown database identifier '%s'") % dbid))
1248 converted = convert(record, lambda field, err:\
1249 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1251 yield dbid, xid, converted, dict(extras, record=stream.index)
1254 def _validate_fields(self, field_names):
1255 field_names = set(field_names)
1257 # old-style constraint methods
1258 trans = self.env['ir.translation']
1259 cr, uid, context = self.env.args
1262 for fun, msg, names in self._constraints:
1264 # validation must be context-independent; call `fun` without context
1265 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1267 except Exception, e:
1268 _logger.debug('Exception while validating constraint', exc_info=True)
1270 extra_error = tools.ustr(e)
1273 res_msg = msg(self._model, cr, uid, ids, context=context)
1274 if isinstance(res_msg, tuple):
1275 template, params = res_msg
1276 res_msg = template % params
1278 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1280 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1282 _("Field(s) `%s` failed against a constraint: %s") %
1283 (', '.join(names), res_msg)
1286 raise ValidationError('\n'.join(errors))
1288 # new-style constraint methods
1289 for check in self._constraint_methods:
1290 if set(check._constrains) & field_names:
1293 except ValidationError, e:
1295 except Exception, e:
1296 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1298 def default_get(self, cr, uid, fields_list, context=None):
1299 """ default_get(fields) -> default_values
1301 Return default values for the fields in `fields_list`. Default
1302 values are determined by the context, user defaults, and the model
1305 :param fields_list: a list of field names
1306 :return: a dictionary mapping each field name to its corresponding
1307 default value; the keys of the dictionary are the fields in
1308 `fields_list` that have a default value different from ``False``.
1310 This method should not be overridden. In order to change the
1311 mechanism for determining default values, you should override method
1312 :meth:`add_default_value` instead.
1314 # trigger view init hook
1315 self.view_init(cr, uid, fields_list, context)
1317 # use a new record to determine default values; evaluate fields on the
1318 # new record and put default values in result
1319 record = self.new(cr, uid, {}, context=context)
1321 for name in fields_list:
1322 if name in self._fields:
1323 value = record[name]
1324 if name in record._cache:
1325 result[name] = value # it really is a default value
1327 # convert default values to the expected format
1328 result = self._convert_to_write(result)
1331 def add_default_value(self, field):
1332 """ Set the default value of `field` to the new record `self`.
1333 The value must be assigned to `self`.
1335 assert not self.id, "Expected new record: %s" % self
1336 cr, uid, context = self.env.args
1339 # 1. look up context
1340 key = 'default_' + name
1342 self[name] = context[key]
1345 # 2. look up ir_values
1346 # Note: performance is good, because get_defaults_dict is cached!
1347 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1348 if name in ir_values_dict:
1349 self[name] = ir_values_dict[name]
1352 # 3. look up property fields
1353 # TODO: get rid of this one
1354 column = self._columns.get(name)
1355 if isinstance(column, fields.property):
1356 self[name] = self.env['ir.property'].get(name, self._name)
1359 # 4. look up _defaults
1360 if name in self._defaults:
1361 value = self._defaults[name]
1363 value = value(self._model, cr, uid, context)
1367 # 5. delegate to field
1368 field.determine_default(self)
1370 def fields_get_keys(self, cr, user, context=None):
1371 res = self._columns.keys()
1372 # TODO I believe this loop can be replace by
1373 # res.extend(self._inherit_fields.key())
1374 for parent in self._inherits:
1375 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1378 def _rec_name_fallback(self, cr, uid, context=None):
1379 rec_name = self._rec_name
1380 if rec_name not in self._columns:
1381 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1385 # Overload this method if you need a window title which depends on the context
1387 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1390 def user_has_groups(self, cr, uid, groups, context=None):
1391 """Return true if the user is at least member of one of the groups
1392 in groups_str. Typically used to resolve `groups` attribute
1393 in view and model definitions.
1395 :param str groups: comma-separated list of fully-qualified group
1396 external IDs, e.g.: ``base.group_user,base.group_system``
1397 :return: True if the current user is a member of one of the
1400 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1401 for group_ext_id in groups.split(','))
1403 def _get_default_form_view(self, cr, user, context=None):
1404 """ Generates a default single-line form view using all fields
1405 of the current model except the m2m and o2m ones.
1407 :param cr: database cursor
1408 :param int user: user id
1409 :param dict context: connection context
1410 :returns: a form view as an lxml document
1411 :rtype: etree._Element
1413 view = etree.Element('form', string=self._description)
1414 group = etree.SubElement(view, 'group', col="4")
1415 for fname, field in self._fields.iteritems():
1416 if field.automatic or field.type in ('one2many', 'many2many'):
1419 etree.SubElement(group, 'field', name=fname)
1420 if field.type == 'text':
1421 etree.SubElement(group, 'newline')
1424 def _get_default_search_view(self, cr, user, context=None):
1425 """ Generates a single-field search view, based on _rec_name.
1427 :param cr: database cursor
1428 :param int user: user id
1429 :param dict context: connection context
1430 :returns: a tree view as an lxml document
1431 :rtype: etree._Element
1433 view = etree.Element('search', string=self._description)
1434 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1437 def _get_default_tree_view(self, cr, user, context=None):
1438 """ Generates a single-field tree view, based on _rec_name.
1440 :param cr: database cursor
1441 :param int user: user id
1442 :param dict context: connection context
1443 :returns: a tree view as an lxml document
1444 :rtype: etree._Element
1446 view = etree.Element('tree', string=self._description)
1447 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1450 def _get_default_calendar_view(self, cr, user, context=None):
1451 """ Generates a default calendar view by trying to infer
1452 calendar fields from a number of pre-set attribute names
1454 :param cr: database cursor
1455 :param int user: user id
1456 :param dict context: connection context
1457 :returns: a calendar view
1458 :rtype: etree._Element
1460 def set_first_of(seq, in_, to):
1461 """Sets the first value of `seq` also found in `in_` to
1462 the `to` attribute of the view being closed over.
1464 Returns whether it's found a suitable value (and set it on
1465 the attribute) or not
1473 view = etree.Element('calendar', string=self._description)
1474 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1476 if self._date_name not in self._columns:
1478 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1479 if dt in self._columns:
1480 self._date_name = dt
1485 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1486 view.set('date_start', self._date_name)
1488 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1489 self._columns, 'color')
1491 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1492 self._columns, 'date_stop'):
1493 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1494 self._columns, 'date_delay'):
1496 _('Invalid Object Architecture!'),
1497 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1501 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1502 """ fields_view_get([view_id | view_type='form'])
1504 Get the detailed composition of the requested view like fields, model, view architecture
1506 :param view_id: id of the view or None
1507 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1508 :param toolbar: true to include contextual actions
1509 :param submenu: deprecated
1510 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1511 :raise AttributeError:
1512 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1513 * if some tag other than 'position' is found in parent view
1514 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1518 View = self.pool['ir.ui.view']
1521 'model': self._name,
1522 'field_parent': False,
1525 # try to find a view_id if none provided
1527 # <view_type>_view_ref in context can be used to overrride the default view
1528 view_ref_key = view_type + '_view_ref'
1529 view_ref = context.get(view_ref_key)
1532 module, view_ref = view_ref.split('.', 1)
1533 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1534 view_ref_res = cr.fetchone()
1536 view_id = view_ref_res[0]
1538 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1539 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1543 # otherwise try to find the lowest priority matching ir.ui.view
1544 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1546 # context for post-processing might be overriden
1549 # read the view with inherited views applied
1550 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1551 result['arch'] = root_view['arch']
1552 result['name'] = root_view['name']
1553 result['type'] = root_view['type']
1554 result['view_id'] = root_view['id']
1555 result['field_parent'] = root_view['field_parent']
1556 # override context fro postprocessing
1557 if root_view.get('model') != self._name:
1558 ctx = dict(context, base_model_name=root_view.get('model'))
1560 # fallback on default views methods if no ir.ui.view could be found
1562 get_func = getattr(self, '_get_default_%s_view' % view_type)
1563 arch_etree = get_func(cr, uid, context)
1564 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1565 result['type'] = view_type
1566 result['name'] = 'default'
1567 except AttributeError:
1568 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1570 # Apply post processing, groups and modifiers etc...
1571 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1572 result['arch'] = xarch
1573 result['fields'] = xfields
1575 # Add related action information if aksed
1577 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1583 ir_values_obj = self.pool.get('ir.values')
1584 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1585 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1586 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1587 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1588 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1589 #When multi="True" set it will display only in More of the list view
1590 resrelate = [clean(action) for action in resrelate
1591 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1593 for x in itertools.chain(resprint, resaction, resrelate):
1594 x['string'] = x['name']
1596 result['toolbar'] = {
1598 'action': resaction,
1603 def get_formview_id(self, cr, uid, id, context=None):
1604 """ Return an view id to open the document with. This method is meant to be
1605 overridden in addons that want to give specific view ids for example.
1607 :param int id: id of the document to open
1611 def get_formview_action(self, cr, uid, id, context=None):
1612 """ Return an action to open the document. This method is meant to be
1613 overridden in addons that want to give specific view ids for example.
1615 :param int id: id of the document to open
1617 view_id = self.get_formview_id(cr, uid, id, context=context)
1619 'type': 'ir.actions.act_window',
1620 'res_model': self._name,
1621 'view_type': 'form',
1622 'view_mode': 'form',
1623 'views': [(view_id, 'form')],
1624 'target': 'current',
1628 def get_access_action(self, cr, uid, id, context=None):
1629 """ Return an action to open the document. This method is meant to be
1630 overridden in addons that want to give specific access to the document.
1631 By default it opens the formview of the document.
1633 :paramt int id: id of the document to open
1635 return self.get_formview_action(cr, uid, id, context=context)
1637 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1638 return self.pool['ir.ui.view'].postprocess_and_fields(
1639 cr, uid, self._name, node, view_id, context=context)
1641 def search_count(self, cr, user, args, context=None):
1642 """ search_count(args) -> int
1644 Returns the number of records in the current model matching :ref:`the
1645 provided domain <reference/orm/domains>`.
1647 res = self.search(cr, user, args, context=context, count=True)
1648 if isinstance(res, list):
1652 @api.returns('self')
1653 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1654 """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1656 Searches for records based on the ``args``
1657 :ref:`search domain <reference/orm/domains>`.
1659 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1660 list to match all records.
1661 :param int offset: number of results to ignore (default: none)
1662 :param int limit: maximum number of records to return (default: all)
1663 :param str order: sort string
1664 :param bool count: if ``True``, the call should return the number of
1665 records matching ``args`` rather than the records
1667 :returns: at most ``limit`` records matching the search criteria
1669 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1671 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1674 # display_name, name_get, name_create, name_search
1677 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1678 def _compute_display_name(self):
1679 for i, got_name in enumerate(self.name_get()):
1680 self[i].display_name = got_name[1]
1684 """ name_get() -> [(id, name), ...]
1686 Returns a textual representation for the records in ``self``.
1687 By default this is the value of the ``display_name`` field.
1689 :return: list of pairs ``(id, text_repr)`` for each records
1693 name = self._rec_name
1694 if name in self._fields:
1695 convert = self._fields[name].convert_to_display_name
1697 result.append((record.id, convert(record[name])))
1700 result.append((record.id, "%s,%s" % (record._name, record.id)))
1705 def name_create(self, name):
1706 """ name_create(name) -> record
1708 Create a new record by calling :meth:`~.create` with only one value
1709 provided: the display name of the new record.
1711 The new record will be initialized with any default values
1712 applicable to this model, or provided through the context. The usual
1713 behavior of :meth:`~.create` applies.
1715 :param name: display name of the record to create
1717 :return: the :meth:`~.name_get` pair value of the created record
1720 record = self.create({self._rec_name: name})
1721 return record.name_get()[0]
1723 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1727 def name_search(self, name='', args=None, operator='ilike', limit=100):
1728 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1730 Search for records that have a display name matching the given
1731 `name` pattern when compared with the given `operator`, while also
1732 matching the optional search domain (`args`).
1734 This is used for example to provide suggestions based on a partial
1735 value for a relational field. Sometimes be seen as the inverse
1736 function of :meth:`~.name_get`, but it is not guaranteed to be.
1738 This method is equivalent to calling :meth:`~.search` with a search
1739 domain based on ``display_name`` and then :meth:`~.name_get` on the
1740 result of the search.
1742 :param str name: the name pattern to match
1743 :param list args: optional search domain (see :meth:`~.search` for
1744 syntax), specifying further restrictions
1745 :param str operator: domain operator for matching `name`, such as
1746 ``'like'`` or ``'='``.
1747 :param int limit: optional max number of records to return
1749 :return: list of pairs ``(id, text_repr)`` for all matching records.
1751 return self._name_search(name, args, operator, limit=limit)
1753 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1754 # private implementation of name_search, allows passing a dedicated user
1755 # for the name_get part to solve some access rights issues
1756 args = list(args or [])
1757 # optimize out the default criterion of ``ilike ''`` that matches everything
1758 if not self._rec_name:
1759 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1760 elif not (name == '' and operator == 'ilike'):
1761 args += [(self._rec_name, operator, name)]
1762 access_rights_uid = name_get_uid or user
1763 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1764 res = self.name_get(cr, access_rights_uid, ids, context)
1767 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1770 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1772 fields = self._columns.keys() + self._inherit_fields.keys()
1773 #FIXME: collect all calls to _get_source into one SQL call.
1775 res[lang] = {'code': lang}
1777 if f in self._columns:
1778 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1780 res[lang][f] = res_trans
1782 res[lang][f] = self._columns[f].string
1783 for table in self._inherits:
1784 cols = intersect(self._inherit_fields.keys(), fields)
1785 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1788 res[lang]['code'] = lang
1789 for f in res2[lang]:
1790 res[lang][f] = res2[lang][f]
1793 def write_string(self, cr, uid, id, langs, vals, context=None):
1794 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1795 #FIXME: try to only call the translation in one SQL
1798 if field in self._columns:
1799 src = self._columns[field].string
1800 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1801 for table in self._inherits:
1802 cols = intersect(self._inherit_fields.keys(), vals)
1804 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1807 def _add_missing_default_values(self, cr, uid, values, context=None):
1808 # avoid overriding inherited values when parent is set
1810 for tables, parent_field in self._inherits.items():
1811 if parent_field in values:
1812 avoid_tables.append(tables)
1814 # compute missing fields
1815 missing_defaults = set()
1816 for field in self._columns.keys():
1817 if not field in values:
1818 missing_defaults.add(field)
1819 for field in self._inherit_fields.keys():
1820 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1821 missing_defaults.add(field)
1822 # discard magic fields
1823 missing_defaults -= set(MAGIC_COLUMNS)
1825 if missing_defaults:
1826 # override defaults with the provided values, never allow the other way around
1827 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1829 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1830 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1831 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1832 defaults[dv] = [(6, 0, defaults[dv])]
1833 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1834 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1835 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1836 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1837 defaults.update(values)
1841 def clear_caches(self):
1842 """ Clear the caches
1844 This clears the caches associated to methods decorated with
1845 ``tools.ormcache`` or ``tools.ormcache_multi``.
1848 self._ormcache.clear()
1849 self.pool._any_cache_cleared = True
1850 except AttributeError:
1854 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,
1855 read_group_result, read_group_order=None, context=None):
1856 """Helper method for filling in empty groups for all possible values of
1857 the field being grouped by"""
1859 # self._group_by_full should map groupable fields to a method that returns
1860 # a list of all aggregated values that we want to display for this field,
1861 # in the form of a m2o-like pair (key,label).
1862 # This is useful to implement kanban views for instance, where all columns
1863 # should be displayed even if they don't contain any record.
1865 # Grab the list of all groups that should be displayed, including all present groups
1866 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1867 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1868 read_group_order=read_group_order,
1869 access_rights_uid=openerp.SUPERUSER_ID,
1872 result_template = dict.fromkeys(aggregated_fields, False)
1873 result_template[groupby + '_count'] = 0
1874 if remaining_groupbys:
1875 result_template['__context'] = {'group_by': remaining_groupbys}
1877 # Merge the left_side (current results as dicts) with the right_side (all
1878 # possible values as m2o pairs). Both lists are supposed to be using the
1879 # same ordering, and can be merged in one pass.
1882 def append_left(left_side):
1883 grouped_value = left_side[groupby] and left_side[groupby][0]
1884 if not grouped_value in known_values:
1885 result.append(left_side)
1886 known_values[grouped_value] = left_side
1888 count_attr = groupby + '_count'
1889 known_values[grouped_value].update({count_attr: left_side[count_attr]})
1890 def append_right(right_side):
1891 grouped_value = right_side[0]
1892 if not grouped_value in known_values:
1893 line = dict(result_template)
1894 line[groupby] = right_side
1895 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1897 known_values[grouped_value] = line
1898 while read_group_result or all_groups:
1899 left_side = read_group_result[0] if read_group_result else None
1900 right_side = all_groups[0] if all_groups else None
1901 assert left_side is None or left_side[groupby] is False \
1902 or isinstance(left_side[groupby], (tuple,list)), \
1903 'M2O-like pair expected, got %r' % left_side[groupby]
1904 assert right_side is None or isinstance(right_side, (tuple,list)), \
1905 'M2O-like pair expected, got %r' % right_side
1906 if left_side is None:
1907 append_right(all_groups.pop(0))
1908 elif right_side is None:
1909 append_left(read_group_result.pop(0))
1910 elif left_side[groupby] == right_side:
1911 append_left(read_group_result.pop(0))
1912 all_groups.pop(0) # discard right_side
1913 elif not left_side[groupby] or not left_side[groupby][0]:
1914 # left side == "Undefined" entry, not present on right_side
1915 append_left(read_group_result.pop(0))
1917 append_right(all_groups.pop(0))
1921 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1924 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1926 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1927 to the query if order should be computed against m2o field.
1928 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1929 :param aggregated_fields: list of aggregated fields in the query
1930 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1931 These dictionaries contains the qualified name of each groupby
1932 (fully qualified SQL name for the corresponding field),
1933 and the (non raw) field name.
1934 :param osv.Query query: the query under construction
1935 :return: (groupby_terms, orderby_terms)
1938 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1939 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1941 return groupby_terms, orderby_terms
1943 self._check_qorder(orderby)
1944 for order_part in orderby.split(','):
1945 order_split = order_part.split()
1946 order_field = order_split[0]
1947 if order_field in groupby_fields:
1949 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1950 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1952 orderby_terms.append(order_clause)
1953 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1955 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1956 orderby_terms.append(order)
1957 elif order_field in aggregated_fields:
1958 orderby_terms.append(order_part)
1960 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1961 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1962 self._name, order_part)
1963 return groupby_terms, orderby_terms
1965 def _read_group_process_groupby(self, gb, query, context):
1967 Helper method to collect important information about groupbys: raw
1968 field name, type, time informations, qualified name, ...
1970 split = gb.split(':')
1971 field_type = self._all_columns[split[0]].column._type
1972 gb_function = split[1] if len(split) == 2 else None
1973 temporal = field_type in ('date', 'datetime')
1974 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1975 qualified_field = self._inherits_join_calc(split[0], query)
1978 'day': 'dd MMM YYYY',
1979 'week': "'W'w YYYY",
1980 'month': 'MMMM YYYY',
1981 'quarter': 'QQQ YYYY',
1985 'day': dateutil.relativedelta.relativedelta(days=1),
1986 'week': datetime.timedelta(days=7),
1987 'month': dateutil.relativedelta.relativedelta(months=1),
1988 'quarter': dateutil.relativedelta.relativedelta(months=3),
1989 'year': dateutil.relativedelta.relativedelta(years=1)
1992 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
1993 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
1994 if field_type == 'boolean':
1995 qualified_field = "coalesce(%s,false)" % qualified_field
2000 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2001 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2002 'tz_convert': tz_convert,
2003 'qualified_field': qualified_field
2006 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2008 Helper method to sanitize the data received by read_group. The None
2009 values are converted to False, and the date/datetime are formatted,
2010 and corrected according to the timezones.
2012 value = False if value is None else value
2013 gb = groupby_dict.get(key)
2014 if gb and gb['type'] in ('date', 'datetime') and value:
2015 if isinstance(value, basestring):
2016 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2017 value = datetime.datetime.strptime(value, dt_format)
2018 if gb['tz_convert']:
2019 value = pytz.timezone(context['tz']).localize(value)
2022 def _read_group_get_domain(self, groupby, value):
2024 Helper method to construct the domain corresponding to a groupby and
2025 a given value. This is mostly relevant for date/datetime.
2027 if groupby['type'] in ('date', 'datetime') and value:
2028 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2029 domain_dt_begin = value
2030 domain_dt_end = value + groupby['interval']
2031 if groupby['tz_convert']:
2032 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2033 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2034 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2035 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2036 if groupby['type'] == 'many2one' and value:
2038 return [(groupby['field'], '=', value)]
2040 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2042 Helper method to format the data contained in the dictianary data by
2043 adding the domain corresponding to its values, the groupbys in the
2044 context and by properly formatting the date/datetime values.
2046 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2047 for k,v in data.iteritems():
2048 gb = groupby_dict.get(k)
2049 if gb and gb['type'] in ('date', 'datetime') and v:
2050 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2052 data['__domain'] = domain_group + domain
2053 if len(groupby) - len(annotated_groupbys) >= 1:
2054 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2058 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2060 Get the list of records in list view grouped by the given ``groupby`` fields
2062 :param cr: database cursor
2063 :param uid: current user id
2064 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2065 :param list fields: list of fields present in the list view specified on the object
2066 :param list groupby: list of groupby descriptions by which the records will be grouped.
2067 A groupby description is either a field (then it will be grouped by that field)
2068 or a string 'field:groupby_function'. Right now, the only functions supported
2069 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2070 date/datetime fields.
2071 :param int offset: optional number of records to skip
2072 :param int limit: optional max number of records to return
2073 :param dict context: context arguments, like lang, time zone.
2074 :param list orderby: optional ``order by`` specification, for
2075 overriding the natural sort ordering of the
2076 groups, see also :py:meth:`~osv.osv.osv.search`
2077 (supported only for many2one fields currently)
2078 :param bool lazy: if true, the results are only grouped by the first groupby and the
2079 remaining groupbys are put in the __context key. If false, all the groupbys are
2081 :return: list of dictionaries(one dictionary for each record) containing:
2083 * the values of fields grouped by the fields in ``groupby`` argument
2084 * __domain: list of tuples specifying the search criteria
2085 * __context: dictionary with argument like ``groupby``
2086 :rtype: [{'field_name_1': value, ...]
2087 :raise AccessError: * if user has no read rights on the requested object
2088 * if user tries to bypass access rules for read on the requested object
2092 self.check_access_rights(cr, uid, 'read')
2093 query = self._where_calc(cr, uid, domain, context=context)
2094 fields = fields or self._columns.keys()
2096 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2097 groupby_list = groupby[:1] if lazy else groupby
2098 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2099 for gb in groupby_list]
2100 groupby_fields = [g['field'] for g in annotated_groupbys]
2101 order = orderby or ','.join([g for g in groupby_list])
2102 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2104 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2105 for gb in groupby_fields:
2106 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2107 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2108 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2109 if not (gb in self._all_columns):
2110 # Don't allow arbitrary values, as this would be a SQL injection vector!
2111 raise except_orm(_('Invalid group_by'),
2112 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2114 aggregated_fields = [
2116 if f not in ('id', 'sequence')
2117 if f not in groupby_fields
2118 if f in self._all_columns
2119 if self._all_columns[f].column._type in ('integer', 'float')
2120 if getattr(self._all_columns[f].column, '_classic_write')]
2122 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2123 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2125 for gb in annotated_groupbys:
2126 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2128 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2129 from_clause, where_clause, where_clause_params = query.get_sql()
2130 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2131 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2135 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2136 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2139 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count %(extra_fields)s
2147 'table': self._table,
2148 'count_field': count_field,
2149 'extra_fields': prefix_terms(',', select_terms),
2150 'from': from_clause,
2151 'where': prefix_term('WHERE', where_clause),
2152 'groupby': prefix_terms('GROUP BY', groupby_terms),
2153 'orderby': prefix_terms('ORDER BY', orderby_terms),
2154 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2155 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2157 cr.execute(query, where_clause_params)
2158 fetched_data = cr.dictfetchall()
2160 if not groupby_fields:
2163 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2165 data_ids = [r['id'] for r in fetched_data]
2166 many2onefields = list(set(many2onefields))
2167 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2168 for d in fetched_data:
2169 d.update(data_dict[d['id']])
2171 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2172 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2173 if lazy and groupby_fields[0] in self._group_by_full:
2174 # Right now, read_group only fill results in lazy mode (by default).
2175 # If you need to have the empty groups in 'eager' mode, then the
2176 # method _read_group_fill_results need to be completely reimplemented
2178 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2179 aggregated_fields, result, read_group_order=order,
2183 def _inherits_join_add(self, current_model, parent_model_name, query):
2185 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2186 :param current_model: current model object
2187 :param parent_model_name: name of the parent model for which the clauses should be added
2188 :param query: query object on which the JOIN should be added
2190 inherits_field = current_model._inherits[parent_model_name]
2191 parent_model = self.pool[parent_model_name]
2192 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2195 def _inherits_join_calc(self, field, query):
2197 Adds missing table select and join clause(s) to ``query`` for reaching
2198 the field coming from an '_inherits' parent table (no duplicates).
2200 :param field: name of inherited field to reach
2201 :param query: query object on which the JOIN should be added
2202 :return: qualified name of field, to be used in SELECT clause
2204 current_table = self
2205 parent_alias = '"%s"' % current_table._table
2206 while field in current_table._inherit_fields and not field in current_table._columns:
2207 parent_model_name = current_table._inherit_fields[field][0]
2208 parent_table = self.pool[parent_model_name]
2209 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2210 current_table = parent_table
2211 return '%s."%s"' % (parent_alias, field)
2213 def _parent_store_compute(self, cr):
2214 if not self._parent_store:
2216 _logger.info('Computing parent left and right for table %s...', self._table)
2217 def browse_rec(root, pos=0):
2219 where = self._parent_name+'='+str(root)
2221 where = self._parent_name+' IS NULL'
2222 if self._parent_order:
2223 where += ' order by '+self._parent_order
2224 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2226 for id in cr.fetchall():
2227 pos2 = browse_rec(id[0], pos2)
2228 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2230 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2231 if self._parent_order:
2232 query += ' order by ' + self._parent_order
2235 for (root,) in cr.fetchall():
2236 pos = browse_rec(root, pos)
2237 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2240 def _update_store(self, cr, f, k):
2241 _logger.info("storing computed values of fields.function '%s'", k)
2242 ss = self._columns[k]._symbol_set
2243 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2244 cr.execute('select id from '+self._table)
2245 ids_lst = map(lambda x: x[0], cr.fetchall())
2247 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2248 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2249 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2250 for key, val in res.items():
2253 # if val is a many2one, just write the ID
2254 if type(val) == tuple:
2256 if val is not False:
2257 cr.execute(update_query, (ss[1](val), key))
2259 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2260 """Raise except_orm if value is not among the valid values for the selection field"""
2261 if self._columns[field]._type == 'reference':
2262 val_model, val_id_str = value.split(',', 1)
2265 val_id = long(val_id_str)
2269 raise except_orm(_('ValidateError'),
2270 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2274 if isinstance(self._columns[field].selection, (tuple, list)):
2275 if val in dict(self._columns[field].selection):
2277 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2279 raise except_orm(_('ValidateError'),
2280 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
2282 def _check_removed_columns(self, cr, log=False):
2283 # iterate on the database columns to drop the NOT NULL constraints
2284 # of fields which were required but have been removed (or will be added by another module)
2285 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2286 columns += MAGIC_COLUMNS
2287 cr.execute("SELECT a.attname, a.attnotnull"
2288 " FROM pg_class c, pg_attribute a"
2289 " WHERE c.relname=%s"
2290 " AND c.oid=a.attrelid"
2291 " AND a.attisdropped=%s"
2292 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2293 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2295 for column in cr.dictfetchall():
2297 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2298 column['attname'], self._table, self._name)
2299 if column['attnotnull']:
2300 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2301 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2302 self._table, column['attname'])
2304 def _save_constraint(self, cr, constraint_name, type):
2306 Record the creation of a constraint for this model, to make it possible
2307 to delete it later when the module is uninstalled. Type can be either
2308 'f' or 'u' depending on the constraint being a foreign key or not.
2310 if not self._module:
2311 # no need to save constraints for custom models as they're not part
2314 assert type in ('f', 'u')
2316 SELECT 1 FROM ir_model_constraint, ir_module_module
2317 WHERE ir_model_constraint.module=ir_module_module.id
2318 AND ir_model_constraint.name=%s
2319 AND ir_module_module.name=%s
2320 """, (constraint_name, self._module))
2323 INSERT INTO ir_model_constraint
2324 (name, date_init, date_update, module, model, type)
2325 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2326 (SELECT id FROM ir_module_module WHERE name=%s),
2327 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2328 (constraint_name, self._module, self._name, type))
2330 def _save_relation_table(self, cr, relation_table):
2332 Record the creation of a many2many for this model, to make it possible
2333 to delete it later when the module is uninstalled.
2336 SELECT 1 FROM ir_model_relation, ir_module_module
2337 WHERE ir_model_relation.module=ir_module_module.id
2338 AND ir_model_relation.name=%s
2339 AND ir_module_module.name=%s
2340 """, (relation_table, self._module))
2342 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2343 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2344 (SELECT id FROM ir_module_module WHERE name=%s),
2345 (SELECT id FROM ir_model WHERE model=%s))""",
2346 (relation_table, self._module, self._name))
2347 self.invalidate_cache(cr, SUPERUSER_ID)
2349 # checked version: for direct m2o starting from `self`
2350 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2351 assert self.is_transient() or not dest_model.is_transient(), \
2352 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2353 if self.is_transient() and not dest_model.is_transient():
2354 # TransientModel relationships to regular Models are annoying
2355 # usually because they could block deletion due to the FKs.
2356 # So unless stated otherwise we default them to ondelete=cascade.
2357 ondelete = ondelete or 'cascade'
2358 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2359 self._foreign_keys.add(fk_def)
2360 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2362 # unchecked version: for custom cases, such as m2m relationships
2363 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2364 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2365 self._foreign_keys.add(fk_def)
2366 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2368 def _drop_constraint(self, cr, source_table, constraint_name):
2369 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2371 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2372 # Find FK constraint(s) currently established for the m2o field,
2373 # and see whether they are stale or not
2374 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2375 cl2.relname as foreign_table
2376 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2377 pg_attribute as att1, pg_attribute as att2
2378 WHERE con.conrelid = cl1.oid
2379 AND cl1.relname = %s
2380 AND con.confrelid = cl2.oid
2381 AND array_lower(con.conkey, 1) = 1
2382 AND con.conkey[1] = att1.attnum
2383 AND att1.attrelid = cl1.oid
2384 AND att1.attname = %s
2385 AND array_lower(con.confkey, 1) = 1
2386 AND con.confkey[1] = att2.attnum
2387 AND att2.attrelid = cl2.oid
2388 AND att2.attname = %s
2389 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2390 constraints = cr.dictfetchall()
2392 if len(constraints) == 1:
2393 # Is it the right constraint?
2395 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2396 or cons['foreign_table'] != dest_model._table:
2397 # Wrong FK: drop it and recreate
2398 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2399 source_table, cons['constraint_name'])
2400 self._drop_constraint(cr, source_table, cons['constraint_name'])
2402 # it's all good, nothing to do!
2405 # Multiple FKs found for the same field, drop them all, and re-create
2406 for cons in constraints:
2407 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2408 source_table, cons['constraint_name'])
2409 self._drop_constraint(cr, source_table, cons['constraint_name'])
2411 # (re-)create the FK
2412 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2415 def _set_default_value_on_column(self, cr, column_name, context=None):
2416 # ideally should use add_default_value but fails
2417 # due to ir.values not being ready
2419 # get old-style default
2420 default = self._defaults.get(column_name)
2421 if callable(default):
2422 default = default(self, cr, SUPERUSER_ID, context)
2424 # get new_style default if no old-style
2426 record = self.new(cr, SUPERUSER_ID, context=context)
2427 field = self._fields[column_name]
2428 field.determine_default(record)
2429 defaults = dict(record._cache)
2430 if column_name in defaults:
2431 default = field.convert_to_write(defaults[column_name])
2433 column = self._columns[column_name]
2434 ss = column._symbol_set
2435 db_default = ss[1](default)
2436 # Write default if non-NULL, except for booleans for which False means
2437 # the same as NULL - this saves us an expensive query on large tables.
2438 write_default = (db_default is not None if column._type != 'boolean'
2441 _logger.debug("Table '%s': setting default value of new column %s to %r",
2442 self._table, column_name, default)
2443 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2444 self._table, column_name, ss[0], column_name)
2445 cr.execute(query, (db_default,))
2446 # this is a disgrace
2449 def _auto_init(self, cr, context=None):
2452 Call _field_create and, unless _auto is False:
2454 - create the corresponding table in database for the model,
2455 - possibly add the parent columns in database,
2456 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2457 'write_date' in database if _log_access is True (the default),
2458 - report on database columns no more existing in _columns,
2459 - remove no more existing not null constraints,
2460 - alter existing database columns to match _columns,
2461 - create database tables to match _columns,
2462 - add database indices to match _columns,
2463 - save in self._foreign_keys a list a foreign keys to create (see
2467 self._foreign_keys = set()
2468 raise_on_invalid_object_name(self._name)
2471 store_compute = False
2472 stored_fields = [] # new-style stored fields with compute
2474 update_custom_fields = context.get('update_custom_fields', False)
2475 self._field_create(cr, context=context)
2476 create = not self._table_exist(cr)
2480 self._create_table(cr)
2483 cr.execute('SELECT min(id) FROM "%s"' % (self._table,))
2484 has_rows = cr.fetchone()[0] is not None
2487 if self._parent_store:
2488 if not self._parent_columns_exist(cr):
2489 self._create_parent_columns(cr)
2490 store_compute = True
2492 self._check_removed_columns(cr, log=False)
2494 # iterate on the "object columns"
2495 column_data = self._select_column_data(cr)
2497 for k, f in self._columns.iteritems():
2498 if k == 'id': # FIXME: maybe id should be a regular column?
2500 # Don't update custom (also called manual) fields
2501 if f.manual and not update_custom_fields:
2504 if isinstance(f, fields.one2many):
2505 self._o2m_raise_on_missing_reference(cr, f)
2507 elif isinstance(f, fields.many2many):
2508 self._m2m_raise_or_create_relation(cr, f)
2511 res = column_data.get(k)
2513 # The field is not found as-is in database, try if it
2514 # exists with an old name.
2515 if not res and hasattr(f, 'oldname'):
2516 res = column_data.get(f.oldname)
2518 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2520 column_data[k] = res
2521 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2522 self._table, f.oldname, k)
2524 # The field already exists in database. Possibly
2525 # change its type, rename it, drop it or change its
2528 f_pg_type = res['typname']
2529 f_pg_size = res['size']
2530 f_pg_notnull = res['attnotnull']
2531 if isinstance(f, fields.function) and not f.store and\
2532 not getattr(f, 'nodrop', False):
2533 _logger.info('column %s (%s) converted to a function, removed from table %s',
2534 k, f.string, self._table)
2535 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2537 _schema.debug("Table '%s': dropped column '%s' with cascade",
2541 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2546 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2547 ('varchar', 'text', 'TEXT', ''),
2548 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2549 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2550 ('timestamp', 'date', 'date', '::date'),
2551 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2552 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2554 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2556 with cr.savepoint():
2557 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2558 except psycopg2.NotSupportedError:
2559 # In place alter table cannot be done because a view is depending of this field.
2560 # Do a manual copy. This will drop the view (that will be recreated later)
2561 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2562 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2563 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2564 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2566 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2567 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2569 if (f_pg_type==c[0]) and (f._type==c[1]):
2570 if f_pg_type != f_obj_type:
2572 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2573 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2574 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2575 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2577 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2578 self._table, k, c[0], c[1])
2581 if f_pg_type != f_obj_type:
2585 newname = k + '_moved' + str(i)
2586 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2587 "WHERE c.relname=%s " \
2588 "AND a.attname=%s " \
2589 "AND c.oid=a.attrelid ", (self._table, newname))
2590 if not cr.fetchone()[0]:
2594 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2595 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2596 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2597 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2598 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2599 self._table, k, f_pg_type, f._type, newname)
2601 # if the field is required and hasn't got a NOT NULL constraint
2602 if f.required and f_pg_notnull == 0:
2604 self._set_default_value_on_column(cr, k, context=context)
2605 # add the NOT NULL constraint
2607 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2609 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2612 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2613 "If you want to have it, you should update the records and execute manually:\n"\
2614 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2615 _schema.warning(msg, self._table, k, self._table, k)
2617 elif not f.required and f_pg_notnull == 1:
2618 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2620 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2623 indexname = '%s_%s_index' % (self._table, k)
2624 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2625 res2 = cr.dictfetchall()
2626 if not res2 and f.select:
2627 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2629 if f._type == 'text':
2630 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2631 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2632 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2633 " because there is a length limit for indexable btree values!\n"\
2634 "Use a search view instead if you simply want to make the field searchable."
2635 _schema.warning(msg, self._table, f._type, k)
2636 if res2 and not f.select:
2637 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2639 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2640 _schema.debug(msg, self._table, k, f._type)
2642 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2643 dest_model = self.pool[f._obj]
2644 if dest_model._auto and dest_model._table != 'ir_actions':
2645 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2647 # The field doesn't exist in database. Create it if necessary.
2649 if not isinstance(f, fields.function) or f.store:
2650 # add the missing field
2651 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2652 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2653 _schema.debug("Table '%s': added column '%s' with definition=%s",
2654 self._table, k, get_pg_type(f)[1])
2658 self._set_default_value_on_column(cr, k, context=context)
2660 # remember the functions to call for the stored fields
2661 if isinstance(f, fields.function):
2663 if f.store is not True: # i.e. if f.store is a dict
2664 order = f.store[f.store.keys()[0]][2]
2665 todo_end.append((order, self._update_store, (f, k)))
2667 # remember new-style stored fields with compute method
2668 if k in self._fields and self._fields[k].depends:
2669 stored_fields.append(self._fields[k])
2671 # and add constraints if needed
2672 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2673 if f._obj not in self.pool:
2674 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2675 dest_model = self.pool[f._obj]
2676 ref = dest_model._table
2677 # ir_actions is inherited so foreign key doesn't work on it
2678 if ref != 'ir_actions':
2679 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2681 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2685 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2686 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2689 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2690 "Try to re-run: openerp-server --update=module\n"\
2691 "If it doesn't work, update records and execute manually:\n"\
2692 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2693 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2697 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2698 create = not bool(cr.fetchone())
2700 cr.commit() # start a new transaction
2703 self._add_sql_constraints(cr)
2706 self._execute_sql(cr)
2709 self._parent_store_compute(cr)
2713 # trigger computation of new-style stored fields with a compute
2715 _logger.info("Storing computed values of %s fields %s",
2716 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2717 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2718 recs = recs.search([])
2720 map(recs._recompute_todo, stored_fields)
2723 todo_end.append((1000, func, ()))
2727 def _auto_end(self, cr, context=None):
2728 """ Create the foreign keys recorded by _auto_init. """
2729 for t, k, r, d in self._foreign_keys:
2730 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2731 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2733 del self._foreign_keys
2736 def _table_exist(self, cr):
2737 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2741 def _create_table(self, cr):
2742 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2743 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2744 _schema.debug("Table '%s': created", self._table)
2747 def _parent_columns_exist(self, cr):
2748 cr.execute("""SELECT c.relname
2749 FROM pg_class c, pg_attribute a
2750 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2751 """, (self._table, 'parent_left'))
2755 def _create_parent_columns(self, cr):
2756 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2757 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2758 if 'parent_left' not in self._columns:
2759 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2761 _schema.debug("Table '%s': added column '%s' with definition=%s",
2762 self._table, 'parent_left', 'INTEGER')
2763 elif not self._columns['parent_left'].select:
2764 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2766 if 'parent_right' not in self._columns:
2767 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2769 _schema.debug("Table '%s': added column '%s' with definition=%s",
2770 self._table, 'parent_right', 'INTEGER')
2771 elif not self._columns['parent_right'].select:
2772 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2774 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2775 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2776 self._parent_name, self._name)
2781 def _select_column_data(self, cr):
2782 # attlen is the number of bytes necessary to represent the type when
2783 # the type has a fixed size. If the type has a varying size attlen is
2784 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2785 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2786 "FROM pg_class c,pg_attribute a,pg_type t " \
2787 "WHERE c.relname=%s " \
2788 "AND c.oid=a.attrelid " \
2789 "AND a.atttypid=t.oid", (self._table,))
2790 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2793 def _o2m_raise_on_missing_reference(self, cr, f):
2794 # TODO this check should be a method on fields.one2many.
2795 if f._obj in self.pool:
2796 other = self.pool[f._obj]
2797 # TODO the condition could use fields_get_keys().
2798 if f._fields_id not in other._columns.keys():
2799 if f._fields_id not in other._inherit_fields.keys():
2800 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2802 def _m2m_raise_or_create_relation(self, cr, f):
2803 m2m_tbl, col1, col2 = f._sql_names(self)
2804 # do not create relations for custom fields as they do not belong to a module
2805 # they will be automatically removed when dropping the corresponding ir.model.field
2806 # table name for custom relation all starts with x_, see __init__
2807 if not m2m_tbl.startswith('x_'):
2808 self._save_relation_table(cr, m2m_tbl)
2809 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2810 if not cr.dictfetchall():
2811 if f._obj not in self.pool:
2812 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2813 dest_model = self.pool[f._obj]
2814 ref = dest_model._table
2815 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2816 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2817 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2818 if not cr.fetchall():
2819 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2820 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2821 if not cr.fetchall():
2822 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2824 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2825 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2826 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2828 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2831 def _add_sql_constraints(self, cr):
2834 Modify this model's database table constraints so they match the one in
2838 def unify_cons_text(txt):
2839 return txt.lower().replace(', ',',').replace(' (','(')
2841 for (key, con, _) in self._sql_constraints:
2842 conname = '%s_%s' % (self._table, key)
2844 self._save_constraint(cr, conname, 'u')
2845 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2846 existing_constraints = cr.dictfetchall()
2850 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2851 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2852 self._table, conname, con),
2853 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2858 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2859 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2860 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2866 if not existing_constraints:
2867 # constraint does not exists:
2868 sql_actions['add']['execute'] = True
2869 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2870 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2871 # constraint exists but its definition has changed:
2872 sql_actions['drop']['execute'] = True
2873 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2874 sql_actions['add']['execute'] = True
2875 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2877 # we need to add the constraint:
2878 sql_actions = [item for item in sql_actions.values()]
2879 sql_actions.sort(key=lambda x: x['order'])
2880 for sql_action in [action for action in sql_actions if action['execute']]:
2882 cr.execute(sql_action['query'])
2884 _schema.debug(sql_action['msg_ok'])
2886 _schema.warning(sql_action['msg_err'])
2890 def _execute_sql(self, cr):
2891 """ Execute the SQL code from the _sql attribute (if any)."""
2892 if hasattr(self, "_sql"):
2893 for line in self._sql.split(';'):
2894 line2 = line.replace('\n', '').strip()
2900 # Update objects that uses this one to update their _inherits fields
2904 def _inherits_reload_src(cls):
2905 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2906 for model in cls.pool.values():
2907 if cls._name in model._inherits:
2908 model._inherits_reload()
2911 def _inherits_reload(cls):
2912 """ Recompute the _inherit_fields mapping.
2914 This will also call itself on each inherits'd child model.
2918 for table in cls._inherits:
2919 other = cls.pool[table]
2920 for col in other._columns.keys():
2921 res[col] = (table, cls._inherits[table], other._columns[col], table)
2922 for col in other._inherit_fields.keys():
2923 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2924 cls._inherit_fields = res
2925 cls._all_columns = cls._get_column_infos()
2927 # interface columns with new-style fields
2928 for attr, column in cls._columns.items():
2929 if attr not in cls._fields:
2930 cls._add_field(attr, column.to_field())
2932 # interface inherited fields with new-style fields (note that the
2933 # reverse order is for being consistent with _all_columns above)
2934 for parent_model, parent_field in reversed(cls._inherits.items()):
2935 for attr, field in cls.pool[parent_model]._fields.iteritems():
2936 if attr not in cls._fields:
2937 new_field = field.copy(related=(parent_field, attr), _origin=field)
2938 cls._add_field(attr, new_field)
2940 cls._inherits_reload_src()
2943 def _get_column_infos(cls):
2944 """Returns a dict mapping all fields names (direct fields and
2945 inherited field via _inherits) to a ``column_info`` struct
2946 giving detailed columns """
2948 # do not inverse for loops, since local fields may hide inherited ones!
2949 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2950 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2951 for k, col in cls._columns.iteritems():
2952 result[k] = fields.column_info(k, col)
2956 def _inherits_check(cls):
2957 for table, field_name in cls._inherits.items():
2958 if field_name not in cls._columns:
2959 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2960 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2961 required=True, ondelete="cascade")
2962 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2963 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2964 cls._columns[field_name].required = True
2965 cls._columns[field_name].ondelete = "cascade"
2967 # reflect fields with delegate=True in dictionary cls._inherits
2968 for field in cls._fields.itervalues():
2969 if field.type == 'many2one' and not field.related and field.delegate:
2970 if not field.required:
2971 _logger.warning("Field %s with delegate=True must be required.", field)
2972 field.required = True
2973 if field.ondelete.lower() not in ('cascade', 'restrict'):
2974 field.ondelete = 'cascade'
2975 cls._inherits[field.comodel_name] = field.name
2978 def _prepare_setup_fields(self):
2979 """ Prepare the setup of fields once the models have been loaded. """
2980 for field in self._fields.itervalues():
2984 def _setup_fields(self, partial=False):
2985 """ Setup the fields (dependency triggers, etc). """
2986 for field in self._fields.itervalues():
2987 if partial and field.manual and \
2988 field.relational and field.comodel_name not in self.pool:
2989 # do not set up manual fields that refer to unknown models
2991 field.setup(self.env)
2993 # group fields by compute to determine field.computed_fields
2994 fields_by_compute = defaultdict(list)
2995 for field in self._fields.itervalues():
2997 field.computed_fields = fields_by_compute[field.compute]
2998 field.computed_fields.append(field)
3000 field.computed_fields = []
3002 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3003 """ fields_get([fields])
3005 Return the definition of each field.
3007 The returned value is a dictionary (indiced by field name) of
3008 dictionaries. The _inherits'd fields are included. The string, help,
3009 and selection (if present) attributes are translated.
3011 :param cr: database cursor
3012 :param user: current user id
3013 :param allfields: list of fields
3014 :param context: context arguments, like lang, time zone
3015 :return: dictionary of field dictionaries, each one describing a field of the business object
3016 :raise AccessError: * if user has no create/write rights on the requested object
3019 recs = self.browse(cr, user, [], context)
3022 for fname, field in self._fields.iteritems():
3023 if allfields and fname not in allfields:
3025 if field.groups and not recs.user_has_groups(field.groups):
3027 res[fname] = field.get_description(recs.env)
3029 # if user cannot create or modify records, make all fields readonly
3030 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3031 if not (has_access('write') or has_access('create')):
3032 for description in res.itervalues():
3033 description['readonly'] = True
3034 description['states'] = {}
3038 def get_empty_list_help(self, cr, user, help, context=None):
3039 """ Generic method giving the help message displayed when having
3040 no result to display in a list or kanban view. By default it returns
3041 the help given in parameter that is generally the help message
3042 defined in the action.
3046 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3048 Check the user access rights on the given fields. This raises Access
3049 Denied if the user does not have the rights. Otherwise it returns the
3050 fields (as is if the fields is not falsy, or the readable/writable
3051 fields if fields is falsy).
3053 if user == SUPERUSER_ID:
3054 return fields or list(self._fields)
3057 """ determine whether user has access to field `fname` """
3058 field = self._fields.get(fname)
3059 if field and field.groups:
3060 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3065 fields = filter(valid, self._fields)
3067 invalid_fields = set(filter(lambda name: not valid(name), fields))
3069 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3070 operation, user, self._name, ', '.join(invalid_fields))
3072 _('The requested operation cannot be completed due to security restrictions. '
3073 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3074 (self._description, operation))
3078 # add explicit old-style implementation to read()
3080 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3081 records = self.browse(cr, user, ids, context)
3082 result = BaseModel.read(records, fields, load=load)
3083 return result if isinstance(ids, list) else (bool(result) and result[0])
3085 # new-style implementation of read()
3087 def read(self, fields=None, load='_classic_read'):
3090 Reads the requested fields for the records in `self`, low-level/RPC
3091 method. In Python code, prefer :meth:`~.browse`.
3093 :param fields: list of field names to return (default is all fields)
3094 :return: a list of dictionaries mapping field names to their values,
3095 with one dictionary per record
3096 :raise AccessError: if user has no read rights on some of the given
3099 # check access rights
3100 self.check_access_rights('read')
3101 fields = self.check_field_access_rights('read', fields)
3103 # split fields into stored and computed fields
3104 stored, computed = [], []
3106 if name in self._columns:
3108 elif name in self._fields:
3109 computed.append(name)
3111 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3113 # fetch stored fields from the database to the cache
3114 self._read_from_database(stored)
3116 # retrieve results from records; this takes values from the cache and
3117 # computes remaining fields
3119 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3120 use_name_get = (load == '_classic_read')
3123 values = {'id': record.id}
3124 for name, field in name_fields:
3125 values[name] = field.convert_to_read(record[name], use_name_get)
3126 result.append(values)
3127 except MissingError:
3133 def _prefetch_field(self, field):
3134 """ Read from the database in order to fetch `field` (:class:`Field`
3135 instance) for `self` in cache.
3137 # fetch the records of this model without field_name in their cache
3138 records = self._in_cache_without(field)
3140 # by default, simply fetch field
3141 fnames = {field.name}
3143 if self.env.in_draft:
3144 # we may be doing an onchange, do not prefetch other fields
3146 elif field in self.env.todo:
3147 # field must be recomputed, do not prefetch records to recompute
3148 records -= self.env.todo[field]
3149 elif self._columns[field.name]._prefetch:
3150 # here we can optimize: prefetch all classic and many2one fields
3152 for fname, fcolumn in self._columns.iteritems()
3153 if fcolumn._prefetch)
3155 # fetch records with read()
3156 assert self in records and field.name in fnames
3159 result = records.read(list(fnames), load='_classic_write')
3163 # check the cache, and update it if necessary
3164 if not self._cache.contains(field):
3165 for values in result:
3166 record = self.browse(values.pop('id'))
3167 record._cache.update(record._convert_to_cache(values, validate=False))
3168 if not self._cache.contains(field):
3169 e = AccessError("No value found for %s.%s" % (self, field.name))
3170 self._cache[field] = FailedValue(e)
3173 def _read_from_database(self, field_names):
3174 """ Read the given fields of the records in `self` from the database,
3175 and store them in cache. Access errors are also stored in cache.
3178 cr, user, context = env.args
3180 # FIXME: The query construction needs to be rewritten using the internal Query
3181 # object, as in search(), to avoid ambiguous column references when
3182 # reading/sorting on a table that is auto_joined to another table with
3183 # common columns (e.g. the magical columns)
3185 # Construct a clause for the security rules.
3186 # 'tables' holds the list of tables necessary for the SELECT, including
3187 # the ir.rule clauses, and contains at least self._table.
3188 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3190 # determine the fields that are stored as columns in self._table
3191 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3193 # we need fully-qualified column names in case len(tables) > 1
3195 if isinstance(self._columns.get(f), fields.binary) and \
3196 context.get('bin_size_%s' % f, context.get('bin_size')):
3197 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3198 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3200 return '%s."%s"' % (self._table, f)
3201 qual_names = map(qualify, set(fields_pre + ['id']))
3203 query = """ SELECT %(qual_names)s FROM %(tables)s
3204 WHERE %(table)s.id IN %%s AND (%(extra)s)
3207 'qual_names': ",".join(qual_names),
3208 'tables': ",".join(tables),
3209 'table': self._table,
3210 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3211 'order': self._parent_order or self._order,
3215 for sub_ids in cr.split_for_in_conditions(self.ids):
3216 cr.execute(query, [tuple(sub_ids)] + rule_params)
3217 result.extend(cr.dictfetchall())
3219 ids = [vals['id'] for vals in result]
3222 # translate the fields if necessary
3223 if context.get('lang'):
3224 ir_translation = env['ir.translation']
3225 for f in fields_pre:
3226 if self._columns[f].translate:
3227 #TODO: optimize out of this loop
3228 res_trans = ir_translation._get_ids(
3229 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3231 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3233 # apply the symbol_get functions of the fields we just read
3234 for f in fields_pre:
3235 symbol_get = self._columns[f]._symbol_get
3238 vals[f] = symbol_get(vals[f])
3240 # store result in cache for POST fields
3242 record = self.browse(vals['id'])
3243 record._cache.update(record._convert_to_cache(vals, validate=False))
3245 # determine the fields that must be processed now
3246 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3248 # Compute POST fields, grouped by multi
3249 by_multi = defaultdict(list)
3250 for f in fields_post:
3251 by_multi[self._columns[f]._multi].append(f)
3253 for multi, fs in by_multi.iteritems():
3255 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3256 assert res2 is not None, \
3257 'The function field "%s" on the "%s" model returned None\n' \
3258 '(a dictionary was expected).' % (fs[0], self._name)
3260 # TOCHECK : why got string instend of dict in python2.6
3261 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3262 multi_fields = res2.get(vals['id'], {})
3265 vals[f] = multi_fields.get(f, [])
3268 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3271 vals[f] = res2[vals['id']]
3275 # Warn about deprecated fields now that fields_pre and fields_post are computed
3276 for f in field_names:
3277 column = self._columns[f]
3278 if column.deprecated:
3279 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3281 # store result in cache
3283 record = self.browse(vals.pop('id'))
3284 record._cache.update(record._convert_to_cache(vals, validate=False))
3286 # store failed values in cache for the records that could not be read
3287 fetched = self.browse(ids)
3288 missing = self - fetched
3290 extras = fetched - self
3293 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3294 ', '.join(map(repr, missing._ids)),
3295 ', '.join(map(repr, extras._ids)),
3297 # store an access error exception in existing records
3299 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3300 (self._name, 'read')
3302 forbidden = missing.exists()
3303 forbidden._cache.update(FailedValue(exc))
3304 # store a missing error exception in non-existing records
3306 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3308 (missing - forbidden)._cache.update(FailedValue(exc))
3311 def get_metadata(self):
3313 Returns some metadata about the given records.
3315 :return: list of ownership dictionaries for each requested record
3316 :rtype: list of dictionaries with the following keys:
3319 * create_uid: user who created the record
3320 * create_date: date when the record was created
3321 * write_uid: last user who changed the record
3322 * write_date: date of the last change to the record
3323 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3326 if self._log_access:
3327 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3328 quoted_table = '"%s"' % self._table
3329 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3330 query = '''SELECT %s, __imd.module, __imd.name
3331 FROM %s LEFT JOIN ir_model_data __imd
3332 ON (__imd.model = %%s and __imd.res_id = %s.id)
3333 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3334 self._cr.execute(query, (self._name, tuple(self.ids)))
3335 res = self._cr.dictfetchall()
3337 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3338 names = dict(self.env['res.users'].browse(uids).name_get())
3342 value = r[key] = r[key] or False
3343 if key in ('write_uid', 'create_uid') and value in names:
3344 r[key] = (value, names[value])
3345 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3346 del r['name'], r['module']
3349 def _check_concurrency(self, cr, ids, context):
3352 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3354 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3355 for sub_ids in cr.split_for_in_conditions(ids):
3358 id_ref = "%s,%s" % (self._name, id)
3359 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3361 ids_to_check.extend([id, update_date])
3362 if not ids_to_check:
3364 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3367 # mention the first one only to keep the error message readable
3368 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3370 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3371 """Verify the returned rows after applying record rules matches
3372 the length of `ids`, and raise an appropriate exception if it does not.
3376 ids, result_ids = set(ids), set(result_ids)
3377 missing_ids = ids - result_ids
3379 # Attempt to distinguish record rule restriction vs deleted records,
3380 # to provide a more specific error message - check if the missinf
3381 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3382 forbidden_ids = [x[0] for x in cr.fetchall()]
3384 # the missing ids are (at least partially) hidden by access rules
3385 if uid == SUPERUSER_ID:
3387 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3388 raise except_orm(_('Access Denied'),
3389 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3390 (self._description, operation))
3392 # If we get here, the missing_ids are not in the database
3393 if operation in ('read','unlink'):
3394 # No need to warn about deleting an already deleted record.
3395 # And no error when reading a record that was deleted, to prevent spurious
3396 # errors for non-transactional search/read sequences coming from clients
3398 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3399 raise except_orm(_('Missing document(s)'),
3400 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3403 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3404 """Verifies that the operation given by ``operation`` is allowed for the user
3405 according to the access rights."""
3406 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3408 def check_access_rule(self, cr, uid, ids, operation, context=None):
3409 """Verifies that the operation given by ``operation`` is allowed for the user
3410 according to ir.rules.
3412 :param operation: one of ``write``, ``unlink``
3413 :raise except_orm: * if current ir.rules do not permit this operation.
3414 :return: None if the operation is allowed
3416 if uid == SUPERUSER_ID:
3419 if self.is_transient():
3420 # Only one single implicit access rule for transient models: owner only!
3421 # This is ok to hardcode because we assert that TransientModels always
3422 # have log_access enabled so that the create_uid column is always there.
3423 # And even with _inherits, these fields are always present in the local
3424 # table too, so no need for JOINs.
3425 cr.execute("""SELECT distinct create_uid
3427 WHERE id IN %%s""" % self._table, (tuple(ids),))
3428 uids = [x[0] for x in cr.fetchall()]
3429 if len(uids) != 1 or uids[0] != uid:
3430 raise except_orm(_('Access Denied'),
3431 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3433 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3435 where_clause = ' and ' + ' and '.join(where_clause)
3436 for sub_ids in cr.split_for_in_conditions(ids):
3437 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3438 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3439 [sub_ids] + where_params)
3440 returned_ids = [x['id'] for x in cr.dictfetchall()]
3441 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3443 def create_workflow(self, cr, uid, ids, context=None):
3444 """Create a workflow instance for each given record IDs."""
3445 from openerp import workflow
3447 workflow.trg_create(uid, self._name, res_id, cr)
3448 # self.invalidate_cache(cr, uid, context=context) ?
3451 def delete_workflow(self, cr, uid, ids, context=None):
3452 """Delete the workflow instances bound to the given record IDs."""
3453 from openerp import workflow
3455 workflow.trg_delete(uid, self._name, res_id, cr)
3456 self.invalidate_cache(cr, uid, context=context)
3459 def step_workflow(self, cr, uid, ids, context=None):
3460 """Reevaluate the workflow instances of the given record IDs."""
3461 from openerp import workflow
3463 workflow.trg_write(uid, self._name, res_id, cr)
3464 # self.invalidate_cache(cr, uid, context=context) ?
3467 def signal_workflow(self, cr, uid, ids, signal, context=None):
3468 """Send given workflow signal and return a dict mapping ids to workflow results"""
3469 from openerp import workflow
3472 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3473 # self.invalidate_cache(cr, uid, context=context) ?
3476 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3477 """ Rebind the workflow instance bound to the given 'old' record IDs to
3478 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3480 from openerp import workflow
3481 for old_id, new_id in old_new_ids:
3482 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3483 self.invalidate_cache(cr, uid, context=context)
3486 def unlink(self, cr, uid, ids, context=None):
3489 Deletes the records of the current set
3491 :raise AccessError: * if user has no unlink rights on the requested object
3492 * if user tries to bypass access rules for unlink on the requested object
3493 :raise UserError: if the record is default property for other records
3498 if isinstance(ids, (int, long)):
3501 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3503 # for recomputing new-style fields
3504 recs = self.browse(cr, uid, ids, context)
3505 recs.modified(self._fields)
3507 self._check_concurrency(cr, ids, context)
3509 self.check_access_rights(cr, uid, 'unlink')
3511 ir_property = self.pool.get('ir.property')
3513 # Check if the records are used as default properties.
3514 domain = [('res_id', '=', False),
3515 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3517 if ir_property.search(cr, uid, domain, context=context):
3518 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3520 # Delete the records' properties.
3521 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3522 ir_property.unlink(cr, uid, property_ids, context=context)
3524 self.delete_workflow(cr, uid, ids, context=context)
3526 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3527 pool_model_data = self.pool.get('ir.model.data')
3528 ir_values_obj = self.pool.get('ir.values')
3529 for sub_ids in cr.split_for_in_conditions(ids):
3530 cr.execute('delete from ' + self._table + ' ' \
3531 'where id IN %s', (sub_ids,))
3533 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3534 # as these are not connected with real database foreign keys, and would be dangling references.
3535 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3536 # to avoid possible side-effects during admin calls.
3537 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3538 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3539 # Step 2. Marching towards the real deletion of referenced records
3541 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3543 # For the same reason, removing the record relevant to ir_values
3544 ir_value_ids = ir_values_obj.search(cr, uid,
3545 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3548 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3550 # invalidate the *whole* cache, since the orm does not handle all
3551 # changes made in the database, like cascading delete!
3552 recs.invalidate_cache()
3554 for order, obj_name, store_ids, fields in result_store:
3555 if obj_name == self._name:
3556 effective_store_ids = set(store_ids) - set(ids)
3558 effective_store_ids = store_ids
3559 if effective_store_ids:
3560 obj = self.pool[obj_name]
3561 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3562 rids = map(lambda x: x[0], cr.fetchall())
3564 obj._store_set_values(cr, uid, rids, fields, context)
3566 # recompute new-style fields
3575 def write(self, vals):
3578 Updates all records in the current set with the provided values.
3580 :param dict vals: fields to update and the value to set on them e.g::
3582 {'foo': 1, 'bar': "Qux"}
3584 will set the field ``foo`` to ``1`` and the field ``bar`` to
3585 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3587 :raise AccessError: * if user has no write rights on the requested object
3588 * if user tries to bypass access rules for write on the requested object
3589 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3590 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3592 .. _openerp/models/relationals/format:
3594 .. note:: Relational fields use a special "commands" format to manipulate their values
3596 This format is a list of command triplets executed sequentially,
3597 possible command triplets are:
3599 ``(0, _, values: dict)``
3600 links to a new record created from the provided values
3601 ``(1, id, values: dict)``
3602 updates the already-linked record of id ``id`` with the
3605 unlinks and deletes the linked record of id ``id``
3607 unlinks the linked record of id ``id`` without deleting it
3609 links to an existing record of id ``id``
3611 unlinks all records in the relation, equivalent to using
3612 the command ``3`` on every linked record
3614 replaces the existing list of linked records by the provoded
3615 ones, equivalent to using ``5`` then ``4`` for each id in
3618 (in command triplets, ``_`` values are ignored and can be
3619 anything, generally ``0`` or ``False``)
3621 Any command can be used on :class:`~openerp.fields.Many2many`,
3622 only ``0``, ``1`` and ``2`` can be used on
3623 :class:`~openerp.fields.One2many`.
3628 self._check_concurrency(self._ids)
3629 self.check_access_rights('write')
3631 # No user-driven update of these columns
3632 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3633 vals.pop(field, None)
3635 # split up fields into old-style and pure new-style ones
3636 old_vals, new_vals, unknown = {}, {}, []
3637 for key, val in vals.iteritems():
3638 if key in self._columns:
3640 elif key in self._fields:
3646 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3648 # write old-style fields with (low-level) method _write
3650 self._write(old_vals)
3652 # put the values of pure new-style fields into cache, and inverse them
3655 record._cache.update(record._convert_to_cache(new_vals, update=True))
3656 for key in new_vals:
3657 self._fields[key].determine_inverse(self)
3661 def _write(self, cr, user, ids, vals, context=None):
3662 # low-level implementation of write()
3667 self.check_field_access_rights(cr, user, 'write', vals.keys())
3668 for field in vals.keys():
3670 if field in self._columns:
3671 fobj = self._columns[field]
3672 elif field in self._inherit_fields:
3673 fobj = self._inherit_fields[field][2]
3680 for group in groups:
3681 module = group.split(".")[0]
3682 grp = group.split(".")[1]
3683 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3684 (grp, module, 'res.groups', user))
3685 readonly = cr.fetchall()
3686 if readonly[0][0] >= 1:
3693 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3695 # for recomputing new-style fields
3696 recs = self.browse(cr, user, ids, context)
3697 modified_fields = list(vals)
3698 if self._log_access:
3699 modified_fields += ['write_date', 'write_uid']
3700 recs.modified(modified_fields)
3702 parents_changed = []
3703 parent_order = self._parent_order or self._order
3704 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3705 # The parent_left/right computation may take up to
3706 # 5 seconds. No need to recompute the values if the
3707 # parent is the same.
3708 # Note: to respect parent_order, nodes must be processed in
3709 # order, so ``parents_changed`` must be ordered properly.
3710 parent_val = vals[self._parent_name]
3712 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3713 (self._table, self._parent_name, self._parent_name, parent_order)
3714 cr.execute(query, (tuple(ids), parent_val))
3716 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3717 (self._table, self._parent_name, parent_order)
3718 cr.execute(query, (tuple(ids),))
3719 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3726 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3728 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3729 if field_column and field_column.deprecated:
3730 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3731 if field in self._columns:
3732 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3733 if (not totranslate) or not self._columns[field].translate:
3734 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3735 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3736 direct.append(field)
3738 upd_todo.append(field)
3740 updend.append(field)
3741 if field in self._columns \
3742 and hasattr(self._columns[field], 'selection') \
3744 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3746 if self._log_access:
3747 upd0.append('write_uid=%s')
3748 upd0.append("write_date=(now() at time zone 'UTC')")
3752 self.check_access_rule(cr, user, ids, 'write', context=context)
3753 for sub_ids in cr.split_for_in_conditions(ids):
3754 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3755 'where id IN %s', upd1 + [sub_ids])
3756 if cr.rowcount != len(sub_ids):
3757 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3762 if self._columns[f].translate:
3763 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3766 # Inserting value to DB
3767 context_wo_lang = dict(context, lang=None)
3768 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3769 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3771 # call the 'set' method of fields which are not classic_write
3772 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3774 # default element in context must be removed when call a one2many or many2many
3775 rel_context = context.copy()
3776 for c in context.items():
3777 if c[0].startswith('default_'):
3778 del rel_context[c[0]]
3780 for field in upd_todo:
3782 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3784 unknown_fields = updend[:]
3785 for table in self._inherits:
3786 col = self._inherits[table]
3788 for sub_ids in cr.split_for_in_conditions(ids):
3789 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3790 'where id IN %s', (sub_ids,))
3791 nids.extend([x[0] for x in cr.fetchall()])
3795 if self._inherit_fields[val][0] == table:
3797 unknown_fields.remove(val)
3799 self.pool[table].write(cr, user, nids, v, context)
3803 'No such field(s) in model %s: %s.',
3804 self._name, ', '.join(unknown_fields))
3806 # check Python constraints
3807 recs._validate_fields(vals)
3809 # TODO: use _order to set dest at the right position and not first node of parent
3810 # We can't defer parent_store computation because the stored function
3811 # fields that are computer may refer (directly or indirectly) to
3812 # parent_left/right (via a child_of domain)
3815 self.pool._init_parent[self._name] = True
3817 order = self._parent_order or self._order
3818 parent_val = vals[self._parent_name]
3820 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3822 clause, params = '%s IS NULL' % (self._parent_name,), ()
3824 for id in parents_changed:
3825 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3826 pleft, pright = cr.fetchone()
3827 distance = pright - pleft + 1
3829 # Positions of current siblings, to locate proper insertion point;
3830 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3831 # after each update, in case several nodes are sequentially inserted one
3832 # next to the other (i.e computed incrementally)
3833 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3834 parents = cr.fetchall()
3836 # Find Position of the element
3838 for (parent_pright, parent_id) in parents:
3841 position = parent_pright and parent_pright + 1 or 1
3843 # It's the first node of the parent
3848 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3849 position = cr.fetchone()[0] + 1
3851 if pleft < position <= pright:
3852 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3854 if pleft < position:
3855 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3856 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3857 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3859 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3860 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3861 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3862 recs.invalidate_cache(['parent_left', 'parent_right'])
3864 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3867 # for recomputing new-style fields
3868 recs.modified(modified_fields)
3871 for order, model_name, ids_to_update, fields_to_recompute in result:
3872 key = (model_name, tuple(fields_to_recompute))
3873 done.setdefault(key, {})
3874 # avoid to do several times the same computation
3876 for id in ids_to_update:
3877 if id not in done[key]:
3878 done[key][id] = True
3880 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3882 # recompute new-style fields
3883 if context.get('recompute', True):
3886 self.step_workflow(cr, user, ids, context=context)
3890 # TODO: Should set perm to user.xxx
3893 @api.returns('self', lambda value: value.id)
3894 def create(self, vals):
3895 """ create(vals) -> record
3897 Creates a new record for the model.
3899 The new record is initialized using the values from ``vals`` and
3900 if necessary those from :meth:`~.default_get`.
3903 values for the model's fields, as a dictionary::
3905 {'field_name': field_value, ...}
3907 see :meth:`~.write` for details
3908 :return: new record created
3909 :raise AccessError: * if user has no create rights on the requested object
3910 * if user tries to bypass access rules for create on the requested object
3911 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3912 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3914 self.check_access_rights('create')
3916 # add missing defaults, and drop fields that may not be set by user
3917 vals = self._add_missing_default_values(vals)
3918 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3919 vals.pop(field, None)
3921 # split up fields into old-style and pure new-style ones
3922 old_vals, new_vals, unknown = {}, {}, []
3923 for key, val in vals.iteritems():
3924 if key in self._all_columns:
3926 elif key in self._fields:
3932 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3934 # create record with old-style fields
3935 record = self.browse(self._create(old_vals))
3937 # put the values of pure new-style fields into cache, and inverse them
3938 record._cache.update(record._convert_to_cache(new_vals))
3939 for key in new_vals:
3940 self._fields[key].determine_inverse(record)
3944 def _create(self, cr, user, vals, context=None):
3945 # low-level implementation of create()
3949 if self.is_transient():
3950 self._transient_vacuum(cr, user)
3953 for v in self._inherits:
3954 if self._inherits[v] not in vals:
3957 tocreate[v] = {'id': vals[self._inherits[v]]}
3960 # list of column assignments defined as tuples like:
3961 # (column_name, format_string, column_value)
3962 # (column_name, sql_formula)
3963 # Those tuples will be used by the string formatting for the INSERT
3965 ('id', "nextval('%s')" % self._sequence),
3970 for v in vals.keys():
3971 if v in self._inherit_fields and v not in self._columns:
3972 (table, col, col_detail, original_parent) = self._inherit_fields[v]
3973 tocreate[table][v] = vals[v]
3976 if (v not in self._inherit_fields) and (v not in self._columns):
3978 unknown_fields.append(v)
3981 'No such field(s) in model %s: %s.',
3982 self._name, ', '.join(unknown_fields))
3984 for table in tocreate:
3985 if self._inherits[table] in vals:
3986 del vals[self._inherits[table]]
3988 record_id = tocreate[table].pop('id', None)
3990 if record_id is None or not record_id:
3991 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
3993 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
3995 updates.append((self._inherits[table], '%s', record_id))
3997 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
3998 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4000 for bool_field in bool_fields:
4001 if bool_field not in vals:
4002 vals[bool_field] = False
4004 for field in vals.keys():
4006 if field in self._columns:
4007 fobj = self._columns[field]
4009 fobj = self._inherit_fields[field][2]
4015 for group in groups:
4016 module = group.split(".")[0]
4017 grp = group.split(".")[1]
4018 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4019 (grp, module, 'res.groups', user))
4020 readonly = cr.fetchall()
4021 if readonly[0][0] >= 1:
4024 elif readonly[0][0] == 0:
4032 current_field = self._columns[field]
4033 if current_field._classic_write:
4034 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4036 #for the function fields that receive a value, we set them directly in the database
4037 #(they may be required), but we also need to trigger the _fct_inv()
4038 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4039 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4040 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4041 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4042 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4043 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4044 #after the release but, definitively, the behavior shouldn't be different for related and function
4046 upd_todo.append(field)
4048 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4049 #related. See the above TODO comment for further explanations.
4050 if not isinstance(current_field, fields.related):
4051 upd_todo.append(field)
4052 if field in self._columns \
4053 and hasattr(current_field, 'selection') \
4055 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4056 if self._log_access:
4057 updates.append(('create_uid', '%s', user))
4058 updates.append(('write_uid', '%s', user))
4059 updates.append(('create_date', "(now() at time zone 'UTC')"))
4060 updates.append(('write_date', "(now() at time zone 'UTC')"))
4062 # the list of tuples used in this formatting corresponds to
4063 # tuple(field_name, format, value)
4064 # In some case, for example (id, create_date, write_date) we does not
4065 # need to read the third value of the tuple, because the real value is
4066 # encoded in the second value (the format).
4068 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4070 ', '.join('"%s"' % u[0] for u in updates),
4071 ', '.join(u[1] for u in updates)
4073 tuple([u[2] for u in updates if len(u) > 2])
4076 id_new, = cr.fetchone()
4077 recs = self.browse(cr, user, id_new, context)
4078 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4080 if self._parent_store and not context.get('defer_parent_store_computation'):
4082 self.pool._init_parent[self._name] = True
4084 parent = vals.get(self._parent_name, False)
4086 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4088 result_p = cr.fetchall()
4089 for (pleft,) in result_p:
4094 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4095 pleft_old = cr.fetchone()[0]
4098 cr.execute('select max(parent_right) from '+self._table)
4099 pleft = cr.fetchone()[0] or 0
4100 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4101 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4102 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4103 recs.invalidate_cache(['parent_left', 'parent_right'])
4105 # default element in context must be remove when call a one2many or many2many
4106 rel_context = context.copy()
4107 for c in context.items():
4108 if c[0].startswith('default_'):
4109 del rel_context[c[0]]
4112 for field in upd_todo:
4113 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4115 # check Python constraints
4116 recs._validate_fields(vals)
4118 # invalidate and mark new-style fields to recompute
4119 modified_fields = list(vals)
4120 if self._log_access:
4121 modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
4122 recs.modified(modified_fields)
4124 if context.get('recompute', True):
4125 result += self._store_get_values(cr, user, [id_new],
4126 list(set(vals.keys() + self._inherits.values())),
4130 for order, model_name, ids, fields2 in result:
4131 if not (model_name, ids, fields2) in done:
4132 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4133 done.append((model_name, ids, fields2))
4134 # recompute new-style fields
4137 if self._log_create and context.get('recompute', True):
4138 message = self._description + \
4140 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4141 "' " + _("created.")
4142 self.log(cr, user, id_new, message, True, context=context)
4144 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4145 self.create_workflow(cr, user, [id_new], context=context)
4148 def _store_get_values(self, cr, uid, ids, fields, context):
4149 """Returns an ordered list of fields.function to call due to
4150 an update operation on ``fields`` of records with ``ids``,
4151 obtained by calling the 'store' triggers of these fields,
4152 as setup by their 'store' attribute.
4154 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4156 if fields is None: fields = []
4157 stored_functions = self.pool._store_function.get(self._name, [])
4159 # use indexed names for the details of the stored_functions:
4160 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4162 # only keep store triggers that should be triggered for the ``fields``
4164 triggers_to_compute = (
4165 f for f in stored_functions
4166 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4170 target_id_results = {}
4171 for store_trigger in triggers_to_compute:
4172 target_func_id_ = id(store_trigger[target_ids_func_])
4173 if target_func_id_ not in target_id_results:
4174 # use admin user for accessing objects having rules defined on store fields
4175 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4176 target_ids = target_id_results[target_func_id_]
4178 # the compound key must consider the priority and model name
4179 key = (store_trigger[priority_], store_trigger[model_name_])
4180 for target_id in target_ids:
4181 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4183 # Here to_compute_map looks like:
4184 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4185 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4186 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4189 # Now we need to generate the batch function calls list
4191 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4193 for ((priority,model), id_map) in to_compute_map.iteritems():
4194 trigger_ids_maps = {}
4195 # function_ids_maps =
4196 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4197 for target_id, triggers in id_map.iteritems():
4198 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4199 for triggers, target_ids in trigger_ids_maps.iteritems():
4200 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4201 [t[func_field_to_compute_] for t in triggers]))
4204 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4207 def _store_set_values(self, cr, uid, ids, fields, context):
4208 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4209 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4214 if self._log_access:
4215 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4219 field_dict.setdefault(r[0], [])
4220 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4221 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4222 for i in self.pool._store_function.get(self._name, []):
4224 up_write_date = write_date + datetime.timedelta(hours=i[5])
4225 if datetime.datetime.now() < up_write_date:
4227 field_dict[r[0]].append(i[1])
4233 if self._columns[f]._multi not in keys:
4234 keys.append(self._columns[f]._multi)
4235 todo.setdefault(self._columns[f]._multi, [])
4236 todo[self._columns[f]._multi].append(f)
4240 # use admin user for accessing objects having rules defined on store fields
4241 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4242 for id, value in result.items():
4244 for f in value.keys():
4245 if f in field_dict[id]:
4252 if self._columns[v]._type == 'many2one':
4254 value[v] = value[v][0]
4257 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4258 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4261 cr.execute('update "' + self._table + '" set ' + \
4262 ','.join(upd0) + ' where id = %s', upd1)
4266 # use admin user for accessing objects having rules defined on store fields
4267 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4268 for r in result.keys():
4270 if r in field_dict.keys():
4271 if f in field_dict[r]:
4273 for id, value in result.items():
4274 if self._columns[f]._type == 'many2one':
4279 cr.execute('update "' + self._table + '" set ' + \
4280 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4282 # invalidate and mark new-style fields to recompute
4283 self.browse(cr, uid, ids, context).modified(fields)
4287 # TODO: ameliorer avec NULL
4288 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4289 """Computes the WHERE clause needed to implement an OpenERP domain.
4290 :param domain: the domain to compute
4292 :param active_test: whether the default filtering of records with ``active``
4293 field set to ``False`` should be applied.
4294 :return: the query expressing the given domain as provided in domain
4295 :rtype: osv.query.Query
4300 # if the object has a field named 'active', filter out all inactive
4301 # records unless they were explicitely asked for
4302 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4304 # the item[0] trick below works for domain items and '&'/'|'/'!'
4306 if not any(item[0] == 'active' for item in domain):
4307 domain.insert(0, ('active', '=', 1))
4309 domain = [('active', '=', 1)]
4312 e = expression.expression(cr, user, domain, self, context)
4313 tables = e.get_tables()
4314 where_clause, where_params = e.to_sql()
4315 where_clause = where_clause and [where_clause] or []
4317 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4319 return Query(tables, where_clause, where_params)
4321 def _check_qorder(self, word):
4322 if not regex_order.match(word):
4323 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4326 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4327 """Add what's missing in ``query`` to implement all appropriate ir.rules
4328 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4330 :param query: the current query object
4332 if uid == SUPERUSER_ID:
4335 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4336 """ :param parent_model: name of the parent model, if the added
4337 clause comes from a parent model
4341 # as inherited rules are being applied, we need to add the missing JOIN
4342 # to reach the parent table (if it was not JOINed yet in the query)
4343 parent_alias = self._inherits_join_add(self, parent_model, query)
4344 # inherited rules are applied on the external table -> need to get the alias and replace
4345 parent_table = self.pool[parent_model]._table
4346 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4347 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4349 for table in added_tables:
4350 # table is just a table name -> switch to the full alias
4351 if table == '"%s"' % parent_table:
4352 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4353 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4355 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4356 added_tables = new_tables
4357 query.where_clause += added_clause
4358 query.where_clause_params += added_params
4359 for table in added_tables:
4360 if table not in query.tables:
4361 query.tables.append(table)
4365 # apply main rules on the object
4366 rule_obj = self.pool.get('ir.rule')
4367 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4368 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4370 # apply ir.rules from the parents (through _inherits)
4371 for inherited_model in self._inherits:
4372 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4373 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4374 parent_model=inherited_model)
4376 def _generate_m2o_order_by(self, order_field, query):
4378 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4379 either native m2o fields or function/related fields that are stored, including
4380 intermediate JOINs for inheritance if required.
4382 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4384 if order_field not in self._columns and order_field in self._inherit_fields:
4385 # also add missing joins for reaching the table containing the m2o field
4386 qualified_field = self._inherits_join_calc(order_field, query)
4387 order_field_column = self._inherit_fields[order_field][2]
4389 qualified_field = '"%s"."%s"' % (self._table, order_field)
4390 order_field_column = self._columns[order_field]
4392 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4393 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4394 _logger.debug("Many2one function/related fields must be stored " \
4395 "to be used as ordering fields! Ignoring sorting for %s.%s",
4396 self._name, order_field)
4399 # figure out the applicable order_by for the m2o
4400 dest_model = self.pool[order_field_column._obj]
4401 m2o_order = dest_model._order
4402 if not regex_order.match(m2o_order):
4403 # _order is complex, can't use it here, so we default to _rec_name
4404 m2o_order = dest_model._rec_name
4406 # extract the field names, to be able to qualify them and add desc/asc
4408 for order_part in m2o_order.split(","):
4409 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4410 m2o_order = m2o_order_list
4412 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4413 # as we don't want to exclude results that have NULL values for the m2o
4414 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4415 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4416 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4417 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4419 def _generate_order_by(self, order_spec, query):
4421 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4422 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4424 :raise" except_orm in case order_spec is malformed
4426 order_by_clause = ''
4427 order_spec = order_spec or self._order
4429 order_by_elements = []
4430 self._check_qorder(order_spec)
4431 for order_part in order_spec.split(','):
4432 order_split = order_part.strip().split(' ')
4433 order_field = order_split[0].strip()
4434 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4437 if order_field == 'id':
4438 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4439 elif order_field in self._columns:
4440 order_column = self._columns[order_field]
4441 if order_column._classic_read:
4442 inner_clause = '"%s"."%s"' % (self._table, order_field)
4443 elif order_column._type == 'many2one':
4444 inner_clause = self._generate_m2o_order_by(order_field, query)
4446 continue # ignore non-readable or "non-joinable" fields
4447 elif order_field in self._inherit_fields:
4448 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4449 order_column = parent_obj._columns[order_field]
4450 if order_column._classic_read:
4451 inner_clause = self._inherits_join_calc(order_field, query)
4452 elif order_column._type == 'many2one':
4453 inner_clause = self._generate_m2o_order_by(order_field, query)
4455 continue # ignore non-readable or "non-joinable" fields
4457 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4458 if order_column and order_column._type == 'boolean':
4459 inner_clause = "COALESCE(%s, false)" % inner_clause
4461 if isinstance(inner_clause, list):
4462 for clause in inner_clause:
4463 order_by_elements.append("%s %s" % (clause, order_direction))
4465 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4466 if order_by_elements:
4467 order_by_clause = ",".join(order_by_elements)
4469 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4471 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4473 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4474 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4475 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4476 This is ok at the security level because this method is private and not callable through XML-RPC.
4478 :param access_rights_uid: optional user ID to use when checking access rights
4479 (not for ir.rules, this is only for ir.model.access)
4483 self.check_access_rights(cr, access_rights_uid or user, 'read')
4485 # For transient models, restrict acces to the current user, except for the super-user
4486 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4487 args = expression.AND(([('create_uid', '=', user)], args or []))
4489 query = self._where_calc(cr, user, args, context=context)
4490 self._apply_ir_rules(cr, user, query, 'read', context=context)
4491 order_by = self._generate_order_by(order, query)
4492 from_clause, where_clause, where_clause_params = query.get_sql()
4494 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4497 # Ignore order, limit and offset when just counting, they don't make sense and could
4499 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4500 cr.execute(query_str, where_clause_params)
4504 limit_str = limit and ' limit %d' % limit or ''
4505 offset_str = offset and ' offset %d' % offset or ''
4506 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4507 cr.execute(query_str, where_clause_params)
4510 # TDE note: with auto_join, we could have several lines about the same result
4511 # i.e. a lead with several unread messages; we uniquify the result using
4512 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4513 def _uniquify_list(seq):
4515 return [x for x in seq if x not in seen and not seen.add(x)]
4517 return _uniquify_list([x[0] for x in res])
4519 # returns the different values ever entered for one field
4520 # this is used, for example, in the client when the user hits enter on
4522 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4525 if field in self._inherit_fields:
4526 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4528 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4530 def copy_data(self, cr, uid, id, default=None, context=None):
4532 Copy given record's data with all its fields values
4534 :param cr: database cursor
4535 :param uid: current user id
4536 :param id: id of the record to copy
4537 :param default: field values to override in the original values of the copied record
4538 :type default: dictionary
4539 :param context: context arguments, like lang, time zone
4540 :type context: dictionary
4541 :return: dictionary containing all the field values
4547 # avoid recursion through already copied records in case of circular relationship
4548 seen_map = context.setdefault('__copy_data_seen', {})
4549 if id in seen_map.setdefault(self._name, []):
4551 seen_map[self._name].append(id)
4555 if 'state' not in default:
4556 if 'state' in self._defaults:
4557 if callable(self._defaults['state']):
4558 default['state'] = self._defaults['state'](self, cr, uid, context)
4560 default['state'] = self._defaults['state']
4562 # build a black list of fields that should not be copied
4563 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4564 def blacklist_given_fields(obj):
4565 # blacklist the fields that are given by inheritance
4566 for other, field_to_other in obj._inherits.items():
4567 blacklist.add(field_to_other)
4568 if field_to_other in default:
4569 # all the fields of 'other' are given by the record: default[field_to_other],
4570 # except the ones redefined in self
4571 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4573 blacklist_given_fields(self.pool[other])
4574 # blacklist deprecated fields
4575 for name, field in obj._columns.items():
4576 if field.deprecated:
4579 blacklist_given_fields(self)
4582 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4585 if f not in blacklist)
4587 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4591 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4594 for f, colinfo in fields_to_copy.iteritems():
4595 field = colinfo.column
4596 if field._type == 'many2one':
4597 res[f] = data[f] and data[f][0]
4598 elif field._type == 'one2many':
4599 other = self.pool[field._obj]
4600 # duplicate following the order of the ids because we'll rely on
4601 # it later for copying translations in copy_translation()!
4602 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4603 # the lines are duplicated using the wrong (old) parent, but then
4604 # are reassigned to the correct one thanks to the (0, 0, ...)
4605 res[f] = [(0, 0, line) for line in lines if line]
4606 elif field._type == 'many2many':
4607 res[f] = [(6, 0, data[f])]
4613 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4617 # avoid recursion through already copied records in case of circular relationship
4618 seen_map = context.setdefault('__copy_translations_seen',{})
4619 if old_id in seen_map.setdefault(self._name,[]):
4621 seen_map[self._name].append(old_id)
4623 trans_obj = self.pool.get('ir.translation')
4624 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4625 fields = self.fields_get(cr, uid, context=context)
4627 for field_name, field_def in fields.items():
4628 # removing the lang to compare untranslated values
4629 context_wo_lang = dict(context, lang=None)
4630 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4631 # we must recursively copy the translations for o2o and o2m
4632 if field_def['type'] == 'one2many':
4633 target_obj = self.pool[field_def['relation']]
4634 # here we rely on the order of the ids to match the translations
4635 # as foreseen in copy_data()
4636 old_children = sorted(r.id for r in old_record[field_name])
4637 new_children = sorted(r.id for r in new_record[field_name])
4638 for (old_child, new_child) in zip(old_children, new_children):
4639 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4640 # and for translatable fields we keep them for copy
4641 elif field_def.get('translate'):
4642 if field_name in self._columns:
4643 trans_name = self._name + "," + field_name
4646 elif field_name in self._inherit_fields:
4647 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4648 # get the id of the parent record to set the translation
4649 inherit_field_name = self._inherit_fields[field_name][1]
4650 target_id = new_record[inherit_field_name].id
4651 source_id = old_record[inherit_field_name].id
4655 trans_ids = trans_obj.search(cr, uid, [
4656 ('name', '=', trans_name),
4657 ('res_id', '=', source_id)
4659 user_lang = context.get('lang')
4660 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4662 # remove source to avoid triggering _set_src
4663 del record['source']
4664 record.update({'res_id': target_id})
4665 if user_lang and user_lang == record['lang']:
4666 # 'source' to force the call to _set_src
4667 # 'value' needed if value is changed in copy(), want to see the new_value
4668 record['source'] = old_record[field_name]
4669 record['value'] = new_record[field_name]
4670 trans_obj.create(cr, uid, record, context=context)
4672 @api.returns('self', lambda value: value.id)
4673 def copy(self, cr, uid, id, default=None, context=None):
4674 """ copy(default=None)
4676 Duplicate record with given id updating it with default values
4678 :param dict default: dictionary of field values to override in the
4679 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4680 :returns: new record
4685 context = context.copy()
4686 data = self.copy_data(cr, uid, id, default, context)
4687 new_id = self.create(cr, uid, data, context)
4688 self.copy_translations(cr, uid, id, new_id, context)
4692 @api.returns('self')
4694 """ exists() -> records
4696 Returns the subset of records in `self` that exist, and marks deleted
4697 records as such in cache. It can be used as a test on records::
4702 By convention, new records are returned as existing.
4704 ids = filter(None, self._ids) # ids to check in database
4707 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4708 self._cr.execute(query, (ids,))
4709 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4710 [id for id in self._ids if not id]) # new ids
4711 existing = self.browse(ids)
4712 if len(existing) < len(self):
4713 # mark missing records in cache with a failed value
4714 exc = MissingError(_("Record does not exist or has been deleted."))
4715 (self - existing)._cache.update(FailedValue(exc))
4718 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4719 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4721 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4722 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4723 return self._check_recursion(cr, uid, ids, context, parent)
4725 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4727 Verifies that there is no loop in a hierarchical structure of records,
4728 by following the parent relationship using the **parent** field until a loop
4729 is detected or until a top-level record is found.
4731 :param cr: database cursor
4732 :param uid: current user id
4733 :param ids: list of ids of records to check
4734 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4735 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4738 parent = self._parent_name
4740 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4741 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4744 while current_id is not None:
4745 cr.execute(query, (current_id,))
4746 result = cr.fetchone()
4747 current_id = result[0] if result else None
4748 if current_id == id:
4752 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4754 Verifies that there is no loop in a hierarchical structure of records,
4755 by following the parent relationship using the **parent** field until a loop
4756 is detected or until a top-level record is found.
4758 :param cr: database cursor
4759 :param uid: current user id
4760 :param ids: list of ids of records to check
4761 :param field_name: field to check
4762 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4765 field = self._all_columns.get(field_name)
4766 field = field.column if field else None
4767 if not field or field._type != 'many2many' or field._obj != self._name:
4768 # field must be a many2many on itself
4769 raise ValueError('invalid field_name: %r' % (field_name,))
4771 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4775 for i in range(0, len(ids_parent), cr.IN_MAX):
4777 sub_ids_parent = ids_parent[i:j]
4778 cr.execute(query, (tuple(sub_ids_parent),))
4779 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4780 ids_parent = ids_parent2
4781 for i in ids_parent:
4786 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4787 """Retrieve the External ID(s) of any database record.
4789 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4791 :return: map of ids to the list of their fully qualified External IDs
4792 in the form ``module.key``, or an empty list when there's no External
4793 ID for a record, e.g.::
4795 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4798 ir_model_data = self.pool.get('ir.model.data')
4799 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4800 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4803 # can't use dict.fromkeys() as the list would be shared!
4805 for record in data_results:
4806 result[record['res_id']].append('%(module)s.%(name)s' % record)
4809 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4810 """Retrieve the External ID of any database record, if there
4811 is one. This method works as a possible implementation
4812 for a function field, to be able to add it to any
4813 model object easily, referencing it as ``Model.get_external_id``.
4815 When multiple External IDs exist for a record, only one
4816 of them is returned (randomly).
4818 :return: map of ids to their fully qualified XML ID,
4819 defaulting to an empty string when there's none
4820 (to be usable as a function field),
4823 { 'id': 'module.ext_id',
4826 results = self._get_xml_ids(cr, uid, ids)
4827 for k, v in results.iteritems():
4834 # backwards compatibility
4835 get_xml_id = get_external_id
4836 _get_xml_ids = _get_external_ids
4838 def print_report(self, cr, uid, ids, name, data, context=None):
4840 Render the report `name` for the given IDs. The report must be defined
4841 for this model, not another.
4843 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4844 assert self._name == report.table
4845 return report.create(cr, uid, ids, data, context)
4849 def is_transient(cls):
4850 """ Return whether the model is transient.
4852 See :class:`TransientModel`.
4855 return cls._transient
4857 def _transient_clean_rows_older_than(self, cr, seconds):
4858 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4859 # Never delete rows used in last 5 minutes
4860 seconds = max(seconds, 300)
4861 query = ("SELECT id FROM " + self._table + " WHERE"
4862 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4863 " < ((now() at time zone 'UTC') - interval %s)")
4864 cr.execute(query, ("%s seconds" % seconds,))
4865 ids = [x[0] for x in cr.fetchall()]
4866 self.unlink(cr, SUPERUSER_ID, ids)
4868 def _transient_clean_old_rows(self, cr, max_count):
4869 # Check how many rows we have in the table
4870 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4872 if res[0][0] <= max_count:
4873 return # max not reached, nothing to do
4874 self._transient_clean_rows_older_than(cr, 300)
4876 def _transient_vacuum(self, cr, uid, force=False):
4877 """Clean the transient records.
4879 This unlinks old records from the transient model tables whenever the
4880 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4881 Actual cleaning will happen only once every "_transient_check_time" calls.
4882 This means this method can be called frequently called (e.g. whenever
4883 a new record is created).
4884 Example with both max_hours and max_count active:
4885 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4886 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4887 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4888 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4889 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4890 would immediately cause the maximum to be reached again.
4891 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4893 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4894 _transient_check_time = 20 # arbitrary limit on vacuum executions
4895 self._transient_check_count += 1
4896 if not force and (self._transient_check_count < _transient_check_time):
4897 return True # no vacuum cleaning this time
4898 self._transient_check_count = 0
4900 # Age-based expiration
4901 if self._transient_max_hours:
4902 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4904 # Count-based expiration
4905 if self._transient_max_count:
4906 self._transient_clean_old_rows(cr, self._transient_max_count)
4910 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4911 """ Serializes one2many and many2many commands into record dictionaries
4912 (as if all the records came from the database via a read()). This
4913 method is aimed at onchange methods on one2many and many2many fields.
4915 Because commands might be creation commands, not all record dicts
4916 will contain an ``id`` field. Commands matching an existing record
4917 will have an ``id``.
4919 :param field_name: name of the one2many or many2many field matching the commands
4920 :type field_name: str
4921 :param commands: one2many or many2many commands to execute on ``field_name``
4922 :type commands: list((int|False, int|False, dict|False))
4923 :param fields: list of fields to read from the database, when applicable
4924 :type fields: list(str)
4925 :returns: records in a shape similar to that returned by ``read()``
4926 (except records may be missing the ``id`` field if they don't exist in db)
4929 result = [] # result (list of dict)
4930 record_ids = [] # ids of records to read
4931 updates = {} # {id: dict} of updates on particular records
4933 for command in commands or []:
4934 if not isinstance(command, (list, tuple)):
4935 record_ids.append(command)
4936 elif command[0] == 0:
4937 result.append(command[2])
4938 elif command[0] == 1:
4939 record_ids.append(command[1])
4940 updates.setdefault(command[1], {}).update(command[2])
4941 elif command[0] in (2, 3):
4942 record_ids = [id for id in record_ids if id != command[1]]
4943 elif command[0] == 4:
4944 record_ids.append(command[1])
4945 elif command[0] == 5:
4946 result, record_ids = [], []
4947 elif command[0] == 6:
4948 result, record_ids = [], list(command[2])
4950 # read the records and apply the updates
4951 other_model = self.pool[self._all_columns[field_name].column._obj]
4952 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4953 record.update(updates.get(record['id'], {}))
4954 result.append(record)
4958 # for backward compatibility
4959 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
4961 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
4963 Performs a ``search()`` followed by a ``read()``.
4965 :param cr: database cursor
4966 :param user: current user id
4967 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
4968 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
4969 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
4970 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
4971 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
4972 :param context: context arguments.
4973 :return: List of dictionaries containing the asked fields.
4974 :rtype: List of dictionaries.
4977 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
4981 if fields and fields == ['id']:
4982 # shortcut read if we only want the ids
4983 return [{'id': id} for id in record_ids]
4985 # read() ignores active_test, but it would forward it to any downstream search call
4986 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
4987 # was presumably only meant for the main search().
4988 # TODO: Move this to read() directly?
4989 read_ctx = dict(context or {})
4990 read_ctx.pop('active_test', None)
4992 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
4993 if len(result) <= 1:
4997 index = dict((r['id'], r) for r in result)
4998 return [index[x] for x in record_ids if x in index]
5000 def _register_hook(self, cr):
5001 """ stuff to do right after the registry is built """
5005 def _patch_method(cls, name, method):
5006 """ Monkey-patch a method for all instances of this model. This replaces
5007 the method called `name` by `method` in the given class.
5008 The original method is then accessible via ``method.origin``, and it
5009 can be restored with :meth:`~._revert_method`.
5014 def do_write(self, values):
5015 # do stuff, and call the original method
5016 return do_write.origin(self, values)
5018 # patch method write of model
5019 model._patch_method('write', do_write)
5021 # this will call do_write
5022 records = model.search([...])
5025 # restore the original method
5026 model._revert_method('write')
5028 origin = getattr(cls, name)
5029 method.origin = origin
5030 # propagate decorators from origin to method, and apply api decorator
5031 wrapped = api.guess(api.propagate(origin, method))
5032 wrapped.origin = origin
5033 setattr(cls, name, wrapped)
5036 def _revert_method(cls, name):
5037 """ Revert the original method called `name` in the given class.
5038 See :meth:`~._patch_method`.
5040 method = getattr(cls, name)
5041 setattr(cls, name, method.origin)
5046 # An instance represents an ordered collection of records in a given
5047 # execution environment. The instance object refers to the environment, and
5048 # the records themselves are represented by their cache dictionary. The 'id'
5049 # of each record is found in its corresponding cache dictionary.
5051 # This design has the following advantages:
5052 # - cache access is direct and thus fast;
5053 # - one can consider records without an 'id' (see new records);
5054 # - the global cache is only an index to "resolve" a record 'id'.
5058 def _browse(cls, env, ids):
5059 """ Create an instance attached to `env`; `ids` is a tuple of record
5062 records = object.__new__(cls)
5065 env.prefetch[cls._name].update(ids)
5069 def browse(self, cr, uid, arg=None, context=None):
5070 ids = _normalize_ids(arg)
5071 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5072 return self._browse(Environment(cr, uid, context or {}), ids)
5075 def browse(self, arg=None):
5076 """ browse([ids]) -> records
5078 Returns a recordset for the ids provided as parameter in the current
5081 Can take no ids, a single id or a sequence of ids.
5083 ids = _normalize_ids(arg)
5084 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5085 return self._browse(self.env, ids)
5088 # Internal properties, for manipulating the instance's implementation
5093 """ List of actual record ids in this recordset (ignores placeholder
5094 ids for records to create)
5096 return filter(None, list(self._ids))
5098 # backward-compatibility with former browse records
5099 _cr = property(lambda self: self.env.cr)
5100 _uid = property(lambda self: self.env.uid)
5101 _context = property(lambda self: self.env.context)
5104 # Conversion methods
5107 def ensure_one(self):
5108 """ Verifies that the current recorset holds a single record. Raises
5109 an exception otherwise.
5113 raise except_orm("ValueError", "Expected singleton: %s" % self)
5115 def with_env(self, env):
5116 """ Returns a new version of this recordset attached to the provided
5119 :type env: :class:`~openerp.api.Environment`
5121 return self._browse(env, self._ids)
5123 def sudo(self, user=SUPERUSER_ID):
5124 """ sudo([user=SUPERUSER])
5126 Returns a new version of this recordset attached to the provided
5129 return self.with_env(self.env(user=user))
5131 def with_context(self, *args, **kwargs):
5132 """ with_context([context][, **overrides]) -> records
5134 Returns a new version of this recordset attached to an extended
5137 The extended context is either the provided ``context`` in which
5138 ``overrides`` are merged or the *current* context in which
5139 ``overrides`` are merged e.g.::
5141 # current context is {'key1': True}
5142 r2 = records.with_context({}, key2=True)
5143 # -> r2._context is {'key2': True}
5144 r2 = records.with_context(key2=True)
5145 # -> r2._context is {'key1': True, 'key2': True}
5147 context = dict(args[0] if args else self._context, **kwargs)
5148 return self.with_env(self.env(context=context))
5150 def _convert_to_cache(self, values, update=False, validate=True):
5151 """ Convert the `values` dictionary into cached values.
5153 :param update: whether the conversion is made for updating `self`;
5154 this is necessary for interpreting the commands of *2many fields
5155 :param validate: whether values must be checked
5157 fields = self._fields
5158 target = self if update else self.browse()
5160 name: fields[name].convert_to_cache(value, target, validate=validate)
5161 for name, value in values.iteritems()
5165 def _convert_to_write(self, values):
5166 """ Convert the `values` dictionary into the format of :meth:`write`. """
5167 fields = self._fields
5169 for name, value in values.iteritems():
5171 value = fields[name].convert_to_write(value)
5172 if not isinstance(value, NewId):
5173 result[name] = value
5177 # Record traversal and update
5180 def _mapped_func(self, func):
5181 """ Apply function `func` on all records in `self`, and return the
5182 result as a list or a recordset (if `func` return recordsets).
5184 vals = [func(rec) for rec in self]
5185 val0 = vals[0] if vals else func(self)
5186 if isinstance(val0, BaseModel):
5187 return reduce(operator.or_, vals, val0)
5190 def mapped(self, func):
5191 """ Apply `func` on all records in `self`, and return the result as a
5192 list or a recordset (if `func` return recordsets). In the latter
5193 case, the order of the returned recordset is arbritrary.
5195 :param func: a function or a dot-separated sequence of field names
5197 if isinstance(func, basestring):
5199 for name in func.split('.'):
5200 recs = recs._mapped_func(operator.itemgetter(name))
5203 return self._mapped_func(func)
5205 def _mapped_cache(self, name_seq):
5206 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5207 field names, and only cached values are used.
5210 for name in name_seq.split('.'):
5211 field = recs._fields[name]
5212 null = field.null(self.env)
5213 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5216 def filtered(self, func):
5217 """ Select the records in `self` such that `func(rec)` is true, and
5218 return them as a recordset.
5220 :param func: a function or a dot-separated sequence of field names
5222 if isinstance(func, basestring):
5224 func = lambda rec: filter(None, rec.mapped(name))
5225 return self.browse([rec.id for rec in self if func(rec)])
5227 def sorted(self, key=None):
5228 """ Return the recordset `self` ordered by `key` """
5230 return self.search([('id', 'in', self.ids)])
5232 return self.browse(map(int, sorted(self, key=key)))
5234 def update(self, values):
5235 """ Update record `self[0]` with `values`. """
5236 for name, value in values.iteritems():
5240 # New records - represent records that do not exist in the database yet;
5241 # they are used to compute default values and perform onchanges.
5245 def new(self, values={}):
5246 """ new([values]) -> record
5248 Return a new record instance attached to the current environment and
5249 initialized with the provided ``value``. The record is *not* created
5250 in database, it only exists in memory.
5252 record = self.browse([NewId()])
5253 record._cache.update(record._convert_to_cache(values, update=True))
5255 if record.env.in_onchange:
5256 # The cache update does not set inverse fields, so do it manually.
5257 # This is useful for computing a function field on secondary
5258 # records, if that field depends on the main record.
5260 field = self._fields.get(name)
5262 for invf in field.inverse_fields:
5263 invf._update(record[name], record)
5268 # Dirty flag, to mark records modified (in draft mode)
5273 """ Return whether any record in `self` is dirty. """
5274 dirty = self.env.dirty
5275 return any(record in dirty for record in self)
5278 def _dirty(self, value):
5279 """ Mark the records in `self` as dirty. """
5281 map(self.env.dirty.add, self)
5283 map(self.env.dirty.discard, self)
5289 def __nonzero__(self):
5290 """ Test whether `self` is nonempty. """
5291 return bool(getattr(self, '_ids', True))
5294 """ Return the size of `self`. """
5295 return len(self._ids)
5298 """ Return an iterator over `self`. """
5299 for id in self._ids:
5300 yield self._browse(self.env, (id,))
5302 def __contains__(self, item):
5303 """ Test whether `item` is a subset of `self` or a field name. """
5304 if isinstance(item, BaseModel):
5305 if self._name == item._name:
5306 return set(item._ids) <= set(self._ids)
5307 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5308 if isinstance(item, basestring):
5309 return item in self._fields
5310 return item in self.ids
5312 def __add__(self, other):
5313 """ Return the concatenation of two recordsets. """
5314 if not isinstance(other, BaseModel) or self._name != other._name:
5315 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5316 return self.browse(self._ids + other._ids)
5318 def __sub__(self, other):
5319 """ Return the recordset of all the records in `self` that are not in `other`. """
5320 if not isinstance(other, BaseModel) or self._name != other._name:
5321 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5322 other_ids = set(other._ids)
5323 return self.browse([id for id in self._ids if id not in other_ids])
5325 def __and__(self, other):
5326 """ Return the intersection of two recordsets.
5327 Note that recordset order is not preserved.
5329 if not isinstance(other, BaseModel) or self._name != other._name:
5330 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5331 return self.browse(set(self._ids) & set(other._ids))
5333 def __or__(self, other):
5334 """ Return the union of two recordsets.
5335 Note that recordset order is not preserved.
5337 if not isinstance(other, BaseModel) or self._name != other._name:
5338 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5339 return self.browse(set(self._ids) | set(other._ids))
5341 def __eq__(self, other):
5342 """ Test whether two recordsets are equivalent (up to reordering). """
5343 if not isinstance(other, BaseModel):
5345 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5347 return self._name == other._name and set(self._ids) == set(other._ids)
5349 def __ne__(self, other):
5350 return not self == other
5352 def __lt__(self, other):
5353 if not isinstance(other, BaseModel) or self._name != other._name:
5354 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5355 return set(self._ids) < set(other._ids)
5357 def __le__(self, other):
5358 if not isinstance(other, BaseModel) or self._name != other._name:
5359 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5360 return set(self._ids) <= set(other._ids)
5362 def __gt__(self, other):
5363 if not isinstance(other, BaseModel) or self._name != other._name:
5364 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5365 return set(self._ids) > set(other._ids)
5367 def __ge__(self, other):
5368 if not isinstance(other, BaseModel) or self._name != other._name:
5369 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5370 return set(self._ids) >= set(other._ids)
5376 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5378 def __unicode__(self):
5379 return unicode(str(self))
5384 if hasattr(self, '_ids'):
5385 return hash((self._name, frozenset(self._ids)))
5387 return hash(self._name)
5389 def __getitem__(self, key):
5390 """ If `key` is an integer or a slice, return the corresponding record
5391 selection as an instance (attached to `self.env`).
5392 Otherwise read the field `key` of the first record in `self`.
5396 inst = model.search(dom) # inst is a recordset
5397 r4 = inst[3] # fourth record in inst
5398 rs = inst[10:20] # subset of inst
5399 nm = rs['name'] # name of first record in inst
5401 if isinstance(key, basestring):
5402 # important: one must call the field's getter
5403 return self._fields[key].__get__(self, type(self))
5404 elif isinstance(key, slice):
5405 return self._browse(self.env, self._ids[key])
5407 return self._browse(self.env, (self._ids[key],))
5409 def __setitem__(self, key, value):
5410 """ Assign the field `key` to `value` in record `self`. """
5411 # important: one must call the field's setter
5412 return self._fields[key].__set__(self, value)
5415 # Cache and recomputation management
5420 """ Return the cache of `self`, mapping field names to values. """
5421 return RecordCache(self)
5424 def _in_cache_without(self, field):
5425 """ Make sure `self` is present in cache (for prefetching), and return
5426 the records of model `self` in cache that have no value for `field`
5427 (:class:`Field` instance).
5430 prefetch_ids = env.prefetch[self._name]
5431 prefetch_ids.update(self._ids)
5432 ids = filter(None, prefetch_ids - set(env.cache[field]))
5433 return self.browse(ids)
5437 """ Clear the records cache.
5440 The record cache is automatically invalidated.
5442 self.invalidate_cache()
5445 def invalidate_cache(self, fnames=None, ids=None):
5446 """ Invalidate the record caches after some records have been modified.
5447 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5449 :param fnames: the list of modified fields, or ``None`` for all fields
5450 :param ids: the list of modified record ids, or ``None`` for all
5454 return self.env.invalidate_all()
5455 fields = self._fields.values()
5457 fields = map(self._fields.__getitem__, fnames)
5459 # invalidate fields and inverse fields, too
5460 spec = [(f, ids) for f in fields] + \
5461 [(invf, None) for f in fields for invf in f.inverse_fields]
5462 self.env.invalidate(spec)
5465 def modified(self, fnames):
5466 """ Notify that fields have been modified on `self`. This invalidates
5467 the cache, and prepares the recomputation of stored function fields
5468 (new-style fields only).
5470 :param fnames: iterable of field names that have been modified on
5473 # each field knows what to invalidate and recompute
5475 for fname in fnames:
5476 spec += self._fields[fname].modified(self)
5480 for env in self.env.all
5481 for field in env.cache
5483 # invalidate non-stored fields.function which are currently cached
5484 spec += [(f, None) for f in self.pool.pure_function_fields
5485 if f in cached_fields]
5487 self.env.invalidate(spec)
5489 def _recompute_check(self, field):
5490 """ If `field` must be recomputed on some record in `self`, return the
5491 corresponding records that must be recomputed.
5493 for env in [self.env] + list(iter(self.env.all)):
5494 if env.todo.get(field) and env.todo[field] & self:
5495 return env.todo[field]
5497 def _recompute_todo(self, field):
5498 """ Mark `field` to be recomputed. """
5499 todo = self.env.todo
5500 todo[field] = (todo.get(field) or self.browse()) | self
5502 def _recompute_done(self, field):
5503 """ Mark `field` as being recomputed. """
5504 todo = self.env.todo
5506 recs = todo.pop(field) - self
5511 def recompute(self):
5512 """ Recompute stored function fields. The fields and records to
5513 recompute have been determined by method :meth:`modified`.
5515 for env in list(iter(self.env.all)):
5517 field, recs = next(env.todo.iteritems())
5518 # evaluate the fields to recompute, and save them to database
5519 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5521 values = rec._convert_to_write({
5522 f.name: rec[f.name] for f in field.computed_fields
5525 except MissingError:
5527 # mark the computed fields as done
5528 map(recs._recompute_done, field.computed_fields)
5531 # Generic onchange method
5534 def _has_onchange(self, field, other_fields):
5535 """ Return whether `field` should trigger an onchange event in the
5536 presence of `other_fields`.
5538 # test whether self has an onchange method for field, or field is a
5539 # dependency of any field in other_fields
5540 return field.name in self._onchange_methods or \
5541 any(dep in other_fields for dep in field.dependents)
5544 def _onchange_spec(self, view_info=None):
5545 """ Return the onchange spec from a view description; if not given, the
5546 result of ``self.fields_view_get()`` is used.
5550 # for traversing the XML arch and populating result
5551 def process(node, info, prefix):
5552 if node.tag == 'field':
5553 name = node.attrib['name']
5554 names = "%s.%s" % (prefix, name) if prefix else name
5555 if not result.get(names):
5556 result[names] = node.attrib.get('on_change')
5557 # traverse the subviews included in relational fields
5558 for subinfo in info['fields'][name].get('views', {}).itervalues():
5559 process(etree.fromstring(subinfo['arch']), subinfo, names)
5562 process(child, info, prefix)
5564 if view_info is None:
5565 view_info = self.fields_view_get()
5566 process(etree.fromstring(view_info['arch']), view_info, '')
5569 def _onchange_eval(self, field_name, onchange, result):
5570 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5571 on record `self`. Value assignments are applied on `self`, while
5572 domain and warning messages are put in dictionary `result`.
5574 onchange = onchange.strip()
5577 if onchange in ("1", "true"):
5578 for method in self._onchange_methods.get(field_name, ()):
5579 method_res = method(self)
5582 if 'domain' in method_res:
5583 result.setdefault('domain', {}).update(method_res['domain'])
5584 if 'warning' in method_res:
5585 result['warning'] = method_res['warning']
5589 match = onchange_v7.match(onchange)
5591 method, params = match.groups()
5593 # evaluate params -> tuple
5594 global_vars = {'context': self._context, 'uid': self._uid}
5595 if self._context.get('field_parent'):
5596 class RawRecord(object):
5597 def __init__(self, record):
5598 self._record = record
5599 def __getattr__(self, name):
5600 field = self._record._fields[name]
5601 value = self._record[name]
5602 return field.convert_to_onchange(value)
5603 record = self[self._context['field_parent']]
5604 global_vars['parent'] = RawRecord(record)
5606 key: self._fields[key].convert_to_onchange(val)
5607 for key, val in self._cache.iteritems()
5609 params = eval("[%s]" % params, global_vars, field_vars)
5611 # call onchange method
5612 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5613 method_res = getattr(self._model, method)(*args)
5614 if not isinstance(method_res, dict):
5616 if 'value' in method_res:
5617 method_res['value'].pop('id', None)
5618 self.update(self._convert_to_cache(method_res['value'], validate=False))
5619 if 'domain' in method_res:
5620 result.setdefault('domain', {}).update(method_res['domain'])
5621 if 'warning' in method_res:
5622 result['warning'] = method_res['warning']
5625 def onchange(self, values, field_name, field_onchange):
5626 """ Perform an onchange on the given field.
5628 :param values: dictionary mapping field names to values, giving the
5629 current state of modification
5630 :param field_name: name of the modified field_name
5631 :param field_onchange: dictionary mapping field names to their
5636 if field_name and field_name not in self._fields:
5639 # determine subfields for field.convert_to_write() below
5641 subfields = defaultdict(set)
5642 for dotname in field_onchange:
5644 secondary.append(dotname)
5645 name, subname = dotname.split('.')
5646 subfields[name].add(subname)
5648 # create a new record with values, and attach `self` to it
5649 with env.do_in_onchange():
5650 record = self.new(values)
5651 values = dict(record._cache)
5652 # attach `self` with a different context (for cache consistency)
5653 record._origin = self.with_context(__onchange=True)
5655 # determine which field should be triggered an onchange
5656 todo = set([field_name]) if field_name else set(values)
5659 # dummy assignment: trigger invalidations on the record
5661 value = record[name]
5662 field = self._fields[name]
5663 if not field_name and field.type == 'many2one' and field.delegate and not value:
5664 # do not nullify all fields of parent record for new records
5666 record[name] = value
5668 result = {'value': {}}
5676 with env.do_in_onchange():
5677 # apply field-specific onchange methods
5678 if field_onchange.get(name):
5679 record._onchange_eval(name, field_onchange[name], result)
5681 # force re-evaluation of function fields on secondary records
5682 for field_seq in secondary:
5683 record.mapped(field_seq)
5685 # determine which fields have been modified
5686 for name, oldval in values.iteritems():
5687 newval = record[name]
5688 if newval != oldval or getattr(newval, '_dirty', False):
5689 field = self._fields[name]
5690 result['value'][name] = field.convert_to_write(
5691 newval, record._origin, subfields.get(name),
5695 # At the moment, the client does not support updates on a *2many field
5696 # while this one is modified by the user.
5697 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5698 result['value'].pop(field_name, None)
5703 class RecordCache(MutableMapping):
5704 """ Implements a proxy dictionary to read/update the cache of a record.
5705 Upon iteration, it looks like a dictionary mapping field names to
5706 values. However, fields may be used as keys as well.
5708 def __init__(self, records):
5709 self._recs = records
5711 def contains(self, field):
5712 """ Return whether `records[0]` has a value for `field` in cache. """
5713 if isinstance(field, basestring):
5714 field = self._recs._fields[field]
5715 return self._recs.id in self._recs.env.cache[field]
5717 def __contains__(self, field):
5718 """ Return whether `records[0]` has a regular value for `field` in cache. """
5719 if isinstance(field, basestring):
5720 field = self._recs._fields[field]
5721 dummy = SpecialValue(None)
5722 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5723 return not isinstance(value, SpecialValue)
5725 def __getitem__(self, field):
5726 """ Return the cached value of `field` for `records[0]`. """
5727 if isinstance(field, basestring):
5728 field = self._recs._fields[field]
5729 value = self._recs.env.cache[field][self._recs.id]
5730 return value.get() if isinstance(value, SpecialValue) else value
5732 def __setitem__(self, field, value):
5733 """ Assign the cached value of `field` for all records in `records`. """
5734 if isinstance(field, basestring):
5735 field = self._recs._fields[field]
5736 values = dict.fromkeys(self._recs._ids, value)
5737 self._recs.env.cache[field].update(values)
5739 def update(self, *args, **kwargs):
5740 """ Update the cache of all records in `records`. If the argument is a
5741 `SpecialValue`, update all fields (except "magic" columns).
5743 if args and isinstance(args[0], SpecialValue):
5744 values = dict.fromkeys(self._recs._ids, args[0])
5745 for name, field in self._recs._fields.iteritems():
5747 self._recs.env.cache[field].update(values)
5749 return super(RecordCache, self).update(*args, **kwargs)
5751 def __delitem__(self, field):
5752 """ Remove the cached value of `field` for all `records`. """
5753 if isinstance(field, basestring):
5754 field = self._recs._fields[field]
5755 field_cache = self._recs.env.cache[field]
5756 for id in self._recs._ids:
5757 field_cache.pop(id, None)
5760 """ Iterate over the field names with a regular value in cache. """
5761 cache, id = self._recs.env.cache, self._recs.id
5762 dummy = SpecialValue(None)
5763 for name, field in self._recs._fields.iteritems():
5764 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5768 """ Return the number of fields with a regular value in cache. """
5769 return sum(1 for name in self)
5771 class Model(BaseModel):
5772 """Main super-class for regular database-persisted OpenERP models.
5774 OpenERP models are created by inheriting from this class::
5779 The system will later instantiate the class once per database (on
5780 which the class' module is installed).
5783 _register = False # not visible in ORM registry, meant to be python-inherited only
5784 _transient = False # True in a TransientModel
5786 class TransientModel(BaseModel):
5787 """Model super-class for transient records, meant to be temporarily
5788 persisted, and regularly vaccuum-cleaned.
5790 A TransientModel has a simplified access rights management,
5791 all users can create new records, and may only access the
5792 records they created. The super-user has unrestricted access
5793 to all TransientModel records.
5796 _register = False # not visible in ORM registry, meant to be python-inherited only
5799 class AbstractModel(BaseModel):
5800 """Abstract Model super-class for creating an abstract class meant to be
5801 inherited by regular models (Models or TransientModels) but not meant to
5802 be usable on its own, or persisted.
5804 Technical note: we don't want to make AbstractModel the super-class of
5805 Model or BaseModel because it would not make sense to put the main
5806 definition of persistence methods such as create() in it, and still we
5807 should be able to override them within an AbstractModel.
5809 _auto = False # don't create any database backend for AbstractModels
5810 _register = False # not visible in ORM registry, meant to be python-inherited only
5813 def itemgetter_tuple(items):
5814 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5815 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5820 return lambda gettable: (gettable[items[0]],)
5821 return operator.itemgetter(*items)
5823 def convert_pgerror_23502(model, fields, info, e):
5824 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5825 r'not-null constraint\n',
5827 field_name = m and m.group('field')
5828 if not m or field_name not in fields:
5829 return {'message': unicode(e)}
5830 message = _(u"Missing required value for the field '%s'.") % field_name
5831 field = fields.get(field_name)
5833 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5836 'field': field_name,
5839 def convert_pgerror_23505(model, fields, info, e):
5840 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5842 field_name = m and m.group('field')
5843 if not m or field_name not in fields:
5844 return {'message': unicode(e)}
5845 message = _(u"The value for the field '%s' already exists.") % field_name
5846 field = fields.get(field_name)
5848 message = _(u"%s This might be '%s' in the current model, or a field "
5849 u"of the same name in an o2m.") % (message, field['string'])
5852 'field': field_name,
5855 PGERROR_TO_OE = defaultdict(
5856 # shape of mapped converters
5857 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5858 # not_null_violation
5859 '23502': convert_pgerror_23502,
5860 # unique constraint error
5861 '23505': convert_pgerror_23505,
5864 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5865 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5867 Various implementations were tested on the corpus of all browse() calls
5868 performed during a full crawler run (after having installed all website_*
5869 modules) and this one was the most efficient overall.
5871 A possible bit of correctness was sacrificed by not doing any test on
5872 Iterable and just assuming that any non-atomic type was an iterable of
5877 # much of the corpus is falsy objects (empty list, tuple or set, None)
5881 # `type in set` is significantly faster (because more restrictive) than
5882 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5883 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5884 # (and looks much worse) in most cases, but over millions of calls it
5885 # does have a very minor effect.
5886 if arg.__class__ in atoms:
5891 # keep those imports here to avoid dependency cycle errors
5892 from .osv import expression
5893 from .fields import Field, SpecialValue, FailedValue
5895 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: