1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
52 from collections import defaultdict, MutableMapping
53 from inspect import getmembers
56 import dateutil.relativedelta
58 from lxml import etree
61 from . import SUPERUSER_ID
64 from .api import Environment
65 from .exceptions import except_orm, AccessError, MissingError, ValidationError
66 from .osv import fields
67 from .osv.query import Query
68 from .tools import lazy_property, ormcache
69 from .tools.config import config
70 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
71 from .tools.safe_eval import safe_eval as eval
72 from .tools.translate import _
74 _logger = logging.getLogger(__name__)
75 _schema = logging.getLogger(__name__ + '.schema')
77 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
78 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
79 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
81 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
84 def check_object_name(name):
85 """ Check if the given name is a valid openerp object name.
87 The _name attribute in osv and osv_memory object is subject to
88 some restrictions. This function returns True or False whether
89 the given name is allowed or not.
91 TODO: this is an approximation. The goal in this approximation
92 is to disallow uppercase characters (in some places, we quote
93 table/column names and in other not, which leads to this kind
96 psycopg2.ProgrammingError: relation "xxx" does not exist).
98 The same restriction should apply to both osv and osv_memory
99 objects for consistency.
102 if regex_object_name.match(name) is None:
106 def raise_on_invalid_object_name(name):
107 if not check_object_name(name):
108 msg = "The _name attribute %s is not valid." % name
110 raise except_orm('ValueError', msg)
112 POSTGRES_CONFDELTYPES = {
120 def intersect(la, lb):
121 return filter(lambda x: x in lb, la)
124 """ Test whether functions `f` and `g` are identical or have the same name """
125 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
127 def fix_import_export_id_paths(fieldname):
129 Fixes the id fields in import and exports, and splits field paths
132 :param str fieldname: name of the field to import/export
133 :return: split field name
136 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
137 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
138 return fixed_external_id.split('/')
140 def pg_varchar(size=0):
141 """ Returns the VARCHAR declaration for the provided size:
143 * If no size (or an empty or negative size is provided) return an
145 * Otherwise return a VARCHAR(n)
147 :type int size: varchar size, optional
151 if not isinstance(size, int):
152 raise TypeError("VARCHAR parameter should be an int, got %s"
155 return 'VARCHAR(%d)' % size
158 FIELDS_TO_PGTYPES = {
159 fields.boolean: 'bool',
160 fields.integer: 'int4',
164 fields.datetime: 'timestamp',
165 fields.binary: 'bytea',
166 fields.many2one: 'int4',
167 fields.serialized: 'text',
170 def get_pg_type(f, type_override=None):
172 :param fields._column f: field to get a Postgres type for
173 :param type type_override: use the provided type for dispatching instead of the field's own type
174 :returns: (postgres_identification_type, postgres_type_specification)
177 field_type = type_override or type(f)
179 if field_type in FIELDS_TO_PGTYPES:
180 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
181 elif issubclass(field_type, fields.float):
183 pg_type = ('numeric', 'NUMERIC')
185 pg_type = ('float8', 'DOUBLE PRECISION')
186 elif issubclass(field_type, (fields.char, fields.reference)):
187 pg_type = ('varchar', pg_varchar(f.size))
188 elif issubclass(field_type, fields.selection):
189 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
190 or getattr(f, 'size', None) == -1:
191 pg_type = ('int4', 'INTEGER')
193 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
194 elif issubclass(field_type, fields.function):
195 if f._type == 'selection':
196 pg_type = ('varchar', pg_varchar())
198 pg_type = get_pg_type(f, getattr(fields, f._type))
200 _logger.warning('%s type not supported!', field_type)
206 class MetaModel(api.Meta):
207 """ Metaclass for the models.
209 This class is used as the metaclass for the class :class:`BaseModel` to
210 discover the models defined in a module (without instanciating them).
211 If the automatic discovery is not needed, it is possible to set the model's
212 ``_register`` attribute to False.
216 module_to_models = {}
218 def __init__(self, name, bases, attrs):
219 if not self._register:
220 self._register = True
221 super(MetaModel, self).__init__(name, bases, attrs)
224 if not hasattr(self, '_module'):
225 # The (OpenERP) module name can be in the `openerp.addons` namespace
226 # or not. For instance, module `sale` can be imported as
227 # `openerp.addons.sale` (the right way) or `sale` (for backward
229 module_parts = self.__module__.split('.')
230 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
231 module_name = self.__module__.split('.')[2]
233 module_name = self.__module__.split('.')[0]
234 self._module = module_name
236 # Remember which models to instanciate for this module.
238 self.module_to_models.setdefault(self._module, []).append(self)
240 # transform columns into new-style fields (enables field inheritance)
241 for name, column in self._columns.iteritems():
242 if not hasattr(self, name):
243 setattr(self, name, column.to_field())
247 """ Pseudo-ids for new records. """
248 def __nonzero__(self):
251 IdType = (int, long, basestring, NewId)
254 # maximum number of prefetched records
257 # special columns automatically created by the ORM
258 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
259 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
261 class BaseModel(object):
262 """ Base class for OpenERP models.
264 OpenERP models are created by inheriting from this class' subclasses:
266 * :class:`Model` for regular database-persisted models
268 * :class:`TransientModel` for temporary data, stored in the database but
269 automatically vaccuumed every so often
271 * :class:`AbstractModel` for abstract super classes meant to be shared by
272 multiple inheriting model
274 The system automatically instantiates every model once per database. Those
275 instances represent the available models on each database, and depend on
276 which modules are installed on that database. The actual class of each
277 instance is built from the Python classes that create and inherit from the
280 Every model instance is a "recordset", i.e., an ordered collection of
281 records of the model. Recordsets are returned by methods like
282 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
283 explicit representation: a record is represented as a recordset of one
286 To create a class that should not be instantiated, the _register class
287 attribute may be set to False.
289 __metaclass__ = MetaModel
290 _auto = True # create database backend
291 _register = False # Set to false if the model shouldn't be automatically discovered.
298 _parent_name = 'parent_id'
299 _parent_store = False
300 _parent_order = False
306 _translate = True # set to False to disable translations export for this model
308 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
309 # to include in the _read_group, if grouped on this field
313 _transient = False # True in a TransientModel
316 # { 'parent_model': 'm2o_field', ... }
319 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
320 # model from which it is inherits'd, r is the (local) field towards m, f
321 # is the _column object itself, and n is the original (i.e. top-most)
324 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
325 # field_column_obj, origina_parent_model), ... }
328 # Mapping field name/column_info object
329 # This is similar to _inherit_fields but:
330 # 1. includes self fields,
331 # 2. uses column_info instead of a triple.
336 _sql_constraints = []
338 # model dependencies, for models backed up by sql views:
339 # {model_name: field_names, ...}
342 CONCURRENCY_CHECK_FIELD = '__last_update'
344 def log(self, cr, uid, id, message, secondary=False, context=None):
345 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
347 def view_init(self, cr, uid, fields_list, context=None):
348 """Override this method to do specific things when a view on the object is opened."""
351 def _field_create(self, cr, context=None):
352 """ Create entries in ir_model_fields for all the model's fields.
354 If necessary, also create an entry in ir_model, and if called from the
355 modules loading scheme (by receiving 'module' in the context), also
356 create entries in ir_model_data (for the model and the fields).
358 - create an entry in ir_model (if there is not already one),
359 - create an entry in ir_model_data (if there is not already one, and if
360 'module' is in the context),
361 - update ir_model_fields with the fields found in _columns
362 (TODO there is some redundancy as _columns is updated from
363 ir_model_fields in __init__).
368 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
370 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
371 model_id = cr.fetchone()[0]
372 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
374 model_id = cr.fetchone()[0]
375 if 'module' in context:
376 name_id = 'model_'+self._name.replace('.', '_')
377 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
379 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
380 (name_id, context['module'], 'ir.model', model_id)
383 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
385 for rec in cr.dictfetchall():
386 cols[rec['name']] = rec
388 ir_model_fields_obj = self.pool.get('ir.model.fields')
390 # sparse field should be created at the end, as it depends on its serialized field already existing
391 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
392 for (k, f) in model_fields:
394 'model_id': model_id,
397 'field_description': f.string,
399 'relation': f._obj or '',
400 'select_level': tools.ustr(int(f.select)),
401 'readonly': (f.readonly and 1) or 0,
402 'required': (f.required and 1) or 0,
403 'selectable': (f.selectable and 1) or 0,
404 'translate': (f.translate and 1) or 0,
405 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
406 'serialization_field_id': None,
408 if getattr(f, 'serialization_field', None):
409 # resolve link to serialization_field if specified by name
410 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
411 if not serialization_field_id:
412 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
413 vals['serialization_field_id'] = serialization_field_id[0]
415 # When its a custom field,it does not contain f.select
416 if context.get('field_state', 'base') == 'manual':
417 if context.get('field_name', '') == k:
418 vals['select_level'] = context.get('select', '0')
419 #setting value to let the problem NOT occur next time
421 vals['select_level'] = cols[k]['select_level']
424 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
425 id = cr.fetchone()[0]
427 cr.execute("""INSERT INTO ir_model_fields (
428 id, model_id, model, name, field_description, ttype,
429 relation,state,select_level,relation_field, translate, serialization_field_id
431 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
433 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
434 vals['relation'], 'base',
435 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
437 if 'module' in context:
438 name1 = 'field_' + self._table + '_' + k
439 cr.execute("select name from ir_model_data where name=%s", (name1,))
441 name1 = name1 + "_" + str(id)
442 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
443 (name1, context['module'], 'ir.model.fields', id)
446 for key, val in vals.items():
447 if cols[k][key] != vals[key]:
448 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
449 cr.execute("""UPDATE ir_model_fields SET
450 model_id=%s, field_description=%s, ttype=%s, relation=%s,
451 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
453 model=%s AND name=%s""", (
454 vals['model_id'], vals['field_description'], vals['ttype'],
456 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
459 self.invalidate_cache(cr, SUPERUSER_ID)
462 def _add_field(cls, name, field):
463 """ Add the given `field` under the given `name` in the class """
464 field.set_class_name(cls, name)
466 # add field in _fields (for reflection)
467 cls._fields[name] = field
469 # add field as an attribute, unless another kind of value already exists
470 if isinstance(getattr(cls, name, field), Field):
471 setattr(cls, name, field)
473 _logger.warning("In model %r, member %r is not a field", cls._name, name)
476 cls._columns[name] = field.to_column()
478 # remove potential column that may be overridden by field
479 cls._columns.pop(name, None)
482 def _add_magic_fields(cls):
483 """ Introduce magic fields on the current class
485 * id is a "normal" field (with a specific getter)
486 * create_uid, create_date, write_uid and write_date have become
488 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
489 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
490 to get the same structure as the previous
491 ``(now() at time zone 'UTC')::timestamp``::
493 # select (now() at time zone 'UTC')::timestamp;
495 ----------------------------
496 2013-06-18 08:30:37.292809
498 >>> str(datetime.datetime.utcnow())
499 '2013-06-18 08:31:32.821177'
501 def add(name, field):
502 """ add `field` with the given `name` if it does not exist yet """
503 if name not in cls._columns and name not in cls._fields:
504 cls._add_field(name, field)
509 # this field 'id' must override any other column or field
510 cls._add_field('id', fields.Id(automatic=True))
512 add('display_name', fields.Char(string='Display Name', automatic=True,
513 compute='_compute_display_name'))
516 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
517 add('create_date', fields.Datetime(string='Created on', automatic=True))
518 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
519 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
520 last_modified_name = 'compute_concurrency_field_with_access'
522 last_modified_name = 'compute_concurrency_field'
524 # this field must override any other column or field
525 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
526 string='Last Modified on', compute=last_modified_name, automatic=True))
529 def compute_concurrency_field(self):
530 self[self.CONCURRENCY_CHECK_FIELD] = \
531 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
534 @api.depends('create_date', 'write_date')
535 def compute_concurrency_field_with_access(self):
536 self[self.CONCURRENCY_CHECK_FIELD] = \
537 self.write_date or self.create_date or \
538 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
541 # Goal: try to apply inheritance at the instanciation level and
542 # put objects in the pool var
545 def _build_model(cls, pool, cr):
546 """ Instanciate a given model.
548 This class method instanciates the class of some model (i.e. a class
549 deriving from osv or osv_memory). The class might be the class passed
550 in argument or, if it inherits from another class, a class constructed
551 by combining the two classes.
555 # IMPORTANT: the registry contains an instance for each model. The class
556 # of each model carries inferred metadata that is shared among the
557 # model's instances for this registry, but not among registries. Hence
558 # we cannot use that "registry class" for combining model classes by
559 # inheritance, since it confuses the metadata inference process.
561 # Keep links to non-inherited constraints in cls; this is useful for
562 # instance when exporting translations
563 cls._local_constraints = cls.__dict__.get('_constraints', [])
564 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
566 # determine inherited models
567 parents = getattr(cls, '_inherit', [])
568 parents = [parents] if isinstance(parents, basestring) else (parents or [])
570 # determine the model's name
571 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
573 # determine the module that introduced the model
574 original_module = pool[name]._original_module if name in parents else cls._module
576 # build the class hierarchy for the model
577 for parent in parents:
578 if parent not in pool:
579 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
580 'You may need to add a dependency on the parent class\' module.' % (name, parent))
581 parent_model = pool[parent]
583 # do no use the class of parent_model, since that class contains
584 # inferred metadata; use its ancestor instead
585 parent_class = type(parent_model).__base__
587 # don't inherit custom fields
588 columns = dict((key, val)
589 for key, val in parent_class._columns.iteritems()
592 columns.update(cls._columns)
594 defaults = dict(parent_class._defaults)
595 defaults.update(cls._defaults)
597 inherits = dict(parent_class._inherits)
598 inherits.update(cls._inherits)
600 depends = dict(parent_class._depends)
601 for m, fs in cls._depends.iteritems():
602 depends[m] = depends.get(m, []) + fs
604 old_constraints = parent_class._constraints
605 new_constraints = cls._constraints
606 # filter out from old_constraints the ones overridden by a
607 # constraint with the same function name in new_constraints
608 constraints = new_constraints + [oldc
609 for oldc in old_constraints
610 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
611 for newc in new_constraints)
614 sql_constraints = cls._sql_constraints + \
615 parent_class._sql_constraints
621 '_defaults': defaults,
622 '_inherits': inherits,
624 '_constraints': constraints,
625 '_sql_constraints': sql_constraints,
627 cls = type(name, (cls, parent_class), attrs)
629 # introduce the "registry class" of the model;
630 # duplicate some attributes so that the ORM can modify them
634 '_columns': dict(cls._columns),
635 '_defaults': dict(cls._defaults),
636 '_inherits': dict(cls._inherits),
637 '_depends': dict(cls._depends),
638 '_constraints': list(cls._constraints),
639 '_sql_constraints': list(cls._sql_constraints),
640 '_original_module': original_module,
642 cls = type(cls._name, (cls,), attrs)
644 # float fields are registry-dependent (digit attribute); duplicate them
646 for key, col in cls._columns.items():
647 if col._type == 'float':
648 cls._columns[key] = copy.copy(col)
650 # instantiate the model, and initialize it
651 model = object.__new__(cls)
652 model.__init__(pool, cr)
656 def _init_function_fields(cls, pool, cr):
657 # initialize the list of non-stored function fields for this model
658 pool._pure_function_fields[cls._name] = []
660 # process store of low-level function fields
661 for fname, column in cls._columns.iteritems():
662 if hasattr(column, 'digits_change'):
663 column.digits_change(cr)
664 # filter out existing store about this field
665 pool._store_function[cls._name] = [
667 for stored in pool._store_function.get(cls._name, [])
668 if (stored[0], stored[1]) != (cls._name, fname)
670 if not isinstance(column, fields.function):
673 # register it on the pool for invalidation
674 pool._pure_function_fields[cls._name].append(fname)
676 # process store parameter
679 get_ids = lambda self, cr, uid, ids, c={}: ids
680 store = {cls._name: (get_ids, None, column.priority, None)}
681 for model, spec in store.iteritems():
683 (fnct, fields2, order, length) = spec
685 (fnct, fields2, order) = spec
688 raise except_orm('Error',
689 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
690 pool._store_function.setdefault(model, [])
691 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
692 if t not in pool._store_function[model]:
693 pool._store_function[model].append(t)
694 pool._store_function[model].sort(key=lambda x: x[4])
697 def _init_manual_fields(cls, pool, cr):
698 # Check whether the query is already done
699 if pool.fields_by_model is not None:
700 manual_fields = pool.fields_by_model.get(cls._name, [])
702 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
703 manual_fields = cr.dictfetchall()
705 for field in manual_fields:
706 if field['name'] in cls._columns:
709 'string': field['field_description'],
710 'required': bool(field['required']),
711 'readonly': bool(field['readonly']),
712 'domain': eval(field['domain']) if field['domain'] else None,
713 'size': field['size'] or None,
714 'ondelete': field['on_delete'],
715 'translate': (field['translate']),
718 #'select': int(field['select_level'])
720 if field['serialization_field_id']:
721 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
722 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
723 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
724 attrs.update({'relation': field['relation']})
725 cls._columns[field['name']] = fields.sparse(**attrs)
726 elif field['ttype'] == 'selection':
727 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
728 elif field['ttype'] == 'reference':
729 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
730 elif field['ttype'] == 'many2one':
731 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
732 elif field['ttype'] == 'one2many':
733 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
734 elif field['ttype'] == 'many2many':
735 _rel1 = field['relation'].replace('.', '_')
736 _rel2 = field['model'].replace('.', '_')
737 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
738 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
740 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
743 def _init_constraints_onchanges(cls):
744 # store sql constraint error messages
745 for (key, _, msg) in cls._sql_constraints:
746 cls.pool._sql_error[cls._table + '_' + key] = msg
748 # collect constraint and onchange methods
749 cls._constraint_methods = []
750 cls._onchange_methods = defaultdict(list)
751 for attr, func in getmembers(cls, callable):
752 if hasattr(func, '_constrains'):
753 if not all(name in cls._fields for name in func._constrains):
754 _logger.warning("@constrains%r parameters must be field names", func._constrains)
755 cls._constraint_methods.append(func)
756 if hasattr(func, '_onchange'):
757 if not all(name in cls._fields for name in func._onchange):
758 _logger.warning("@onchange%r parameters must be field names", func._onchange)
759 for name in func._onchange:
760 cls._onchange_methods[name].append(func)
763 # In the past, this method was registering the model class in the server.
764 # This job is now done entirely by the metaclass MetaModel.
766 # Do not create an instance here. Model instances are created by method
770 def __init__(self, pool, cr):
771 """ Initialize a model and make it part of the given registry.
773 - copy the stored fields' functions in the registry,
774 - retrieve custom fields and add them in the model,
775 - ensure there is a many2one for each _inherits'd parent,
776 - update the children's _columns,
777 - give a chance to each field to initialize itself.
782 # link the class to the registry, and update the registry
784 cls._model = self # backward compatibility
785 pool.add(cls._name, self)
787 # determine description, table, sequence and log_access
788 if not cls._description:
789 cls._description = cls._name
791 cls._table = cls._name.replace('.', '_')
792 if not cls._sequence:
793 cls._sequence = cls._table + '_id_seq'
794 if not hasattr(cls, '_log_access'):
795 # If _log_access is not specified, it is the same value as _auto.
796 cls._log_access = cls._auto
799 if cls.is_transient():
800 cls._transient_check_count = 0
801 cls._transient_max_count = config.get('osv_memory_count_limit')
802 cls._transient_max_hours = config.get('osv_memory_age_limit')
803 assert cls._log_access, \
804 "TransientModels must have log_access turned on, " \
805 "in order to implement their access rights policy"
807 # retrieve new-style fields and duplicate them (to avoid clashes with
808 # inheritance between different models)
810 for attr, field in getmembers(cls, Field.__instancecheck__):
811 if not field._origin:
812 cls._add_field(attr, field.copy())
814 # introduce magic fields
815 cls._add_magic_fields()
817 # register stuff about low-level function fields and custom fields
818 cls._init_function_fields(pool, cr)
819 cls._init_manual_fields(pool, cr)
822 cls._inherits_check()
823 cls._inherits_reload()
825 # register constraints and onchange methods
826 cls._init_constraints_onchanges()
829 for k in cls._defaults:
830 assert k in cls._fields, \
831 "Model %s has a default for nonexiting field %s" % (cls._name, k)
834 for column in cls._columns.itervalues():
839 assert cls._rec_name in cls._fields, \
840 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
841 elif 'name' in cls._fields:
842 cls._rec_name = 'name'
844 # prepare ormcache, which must be shared by all instances of the model
849 def _is_an_ordinary_table(self):
850 self.env.cr.execute("""\
854 AND relkind = %s""", [self._table, 'r'])
855 return bool(self.env.cr.fetchone())
857 def __export_xml_id(self):
858 """ Return a valid xml_id for the record `self`. """
859 if not self._is_an_ordinary_table():
861 "You can not export the column ID of model %s, because the "
862 "table %s is not an ordinary table."
863 % (self._name, self._table))
864 ir_model_data = self.sudo().env['ir.model.data']
865 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
868 return '%s.%s' % (data[0].module, data[0].name)
873 name = '%s_%s' % (self._table, self.id)
874 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
876 name = '%s_%s_%s' % (self._table, self.id, postfix)
877 ir_model_data.create({
880 'module': '__export__',
883 return '__export__.' + name
886 def __export_rows(self, fields):
887 """ Export fields of the records in `self`.
889 :param fields: list of lists of fields to traverse
890 :return: list of lists of corresponding values
894 # main line of record, initially empty
895 current = [''] * len(fields)
896 lines.append(current)
898 # list of primary fields followed by secondary field(s)
901 # process column by column
902 for i, path in enumerate(fields):
907 if name in primary_done:
911 current[i] = str(record.id)
913 current[i] = record.__export_xml_id()
915 field = record._fields[name]
918 # this part could be simpler, but it has to be done this way
919 # in order to reproduce the former behavior
920 if not isinstance(value, BaseModel):
921 current[i] = field.convert_to_export(value, self.env)
923 primary_done.append(name)
925 # This is a special case, its strange behavior is intended!
926 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
927 xml_ids = [r.__export_xml_id() for r in value]
928 current[i] = ','.join(xml_ids) or False
931 # recursively export the fields that follow name
932 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
933 lines2 = value.__export_rows(fields2)
935 # merge first line with record's main line
936 for j, val in enumerate(lines2[0]):
939 # check value of current field
941 # assign xml_ids, and forget about remaining lines
942 xml_ids = [item[1] for item in value.name_get()]
943 current[i] = ','.join(xml_ids)
945 # append the other lines at the end
953 def export_data(self, fields_to_export, raw_data=False):
954 """ Export fields for selected objects
956 :param fields_to_export: list of fields
957 :param raw_data: True to return value in native Python type
958 :rtype: dictionary with a *datas* matrix
960 This method is used when exporting data via client menu
962 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
964 self = self.with_context(export_raw_data=True)
965 return {'datas': self.__export_rows(fields_to_export)}
967 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
970 Use :meth:`~load` instead
972 Import given data in given module
974 This method is used when importing data via client menu.
976 Example of fields to import for a sale.order::
979 partner_id, (=name_search)
980 order_line/.id, (=database_id)
982 order_line/product_id/id, (=xml id)
983 order_line/price_unit,
984 order_line/product_uom_qty,
985 order_line/product_uom/id (=xml_id)
987 This method returns a 4-tuple with the following structure::
989 (return_code, errored_resource, error_message, unused)
991 * The first item is a return code, it is ``-1`` in case of
992 import error, or the last imported row number in case of success
993 * The second item contains the record data dict that failed to import
994 in case of error, otherwise it's 0
995 * The third item contains an error message string in case of error,
997 * The last item is currently unused, with no specific semantics
999 :param fields: list of fields to import
1000 :param datas: data to import
1001 :param mode: 'init' or 'update' for record creation
1002 :param current_module: module name
1003 :param noupdate: flag for record creation
1004 :param filename: optional file to store partial import state for recovery
1005 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1006 :rtype: (int, dict or 0, str or 0, str or 0)
1008 context = dict(context) if context is not None else {}
1009 context['_import_current_module'] = current_module
1011 fields = map(fix_import_export_id_paths, fields)
1012 ir_model_data_obj = self.pool.get('ir.model.data')
1015 if m['type'] == 'error':
1016 raise Exception(m['message'])
1018 if config.get('import_partial') and filename:
1019 with open(config.get('import_partial'), 'rb') as partial_import_file:
1020 data = pickle.load(partial_import_file)
1021 position = data.get(filename, 0)
1025 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1026 self._extract_records(cr, uid, fields, datas,
1027 context=context, log=log),
1028 context=context, log=log):
1029 ir_model_data_obj._update(cr, uid, self._name,
1030 current_module, res, mode=mode, xml_id=xml_id,
1031 noupdate=noupdate, res_id=res_id, context=context)
1032 position = info.get('rows', {}).get('to', 0) + 1
1033 if config.get('import_partial') and filename and (not (position%100)):
1034 with open(config.get('import_partial'), 'rb') as partial_import:
1035 data = pickle.load(partial_import)
1036 data[filename] = position
1037 with open(config.get('import_partial'), 'wb') as partial_import:
1038 pickle.dump(data, partial_import)
1039 if context.get('defer_parent_store_computation'):
1040 self._parent_store_compute(cr)
1042 except Exception, e:
1044 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1046 if context.get('defer_parent_store_computation'):
1047 self._parent_store_compute(cr)
1048 return position, 0, 0, 0
1050 def load(self, cr, uid, fields, data, context=None):
1052 Attempts to load the data matrix, and returns a list of ids (or
1053 ``False`` if there was an error and no id could be generated) and a
1056 The ids are those of the records created and saved (in database), in
1057 the same order they were extracted from the file. They can be passed
1058 directly to :meth:`~read`
1060 :param fields: list of fields to import, at the same index as the corresponding data
1061 :type fields: list(str)
1062 :param data: row-major matrix of data to import
1063 :type data: list(list(str))
1064 :param dict context:
1065 :returns: {ids: list(int)|False, messages: [Message]}
1067 cr.execute('SAVEPOINT model_load')
1070 fields = map(fix_import_export_id_paths, fields)
1071 ModelData = self.pool['ir.model.data'].clear_caches()
1073 fg = self.fields_get(cr, uid, context=context)
1080 for id, xid, record, info in self._convert_records(cr, uid,
1081 self._extract_records(cr, uid, fields, data,
1082 context=context, log=messages.append),
1083 context=context, log=messages.append):
1085 cr.execute('SAVEPOINT model_load_save')
1086 except psycopg2.InternalError, e:
1087 # broken transaction, exit and hope the source error was
1089 if not any(message['type'] == 'error' for message in messages):
1090 messages.append(dict(info, type='error',message=
1091 u"Unknown database error: '%s'" % e))
1094 ids.append(ModelData._update(cr, uid, self._name,
1095 current_module, record, mode=mode, xml_id=xid,
1096 noupdate=noupdate, res_id=id, context=context))
1097 cr.execute('RELEASE SAVEPOINT model_load_save')
1098 except psycopg2.Warning, e:
1099 messages.append(dict(info, type='warning', message=str(e)))
1100 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1101 except psycopg2.Error, e:
1102 messages.append(dict(
1104 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1105 # Failed to write, log to messages, rollback savepoint (to
1106 # avoid broken transaction) and keep going
1107 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1108 except Exception, e:
1109 message = (_('Unknown error during import:') +
1110 ' %s: %s' % (type(e), unicode(e)))
1111 moreinfo = _('Resolve other errors first')
1112 messages.append(dict(info, type='error',
1115 # Failed for some reason, perhaps due to invalid data supplied,
1116 # rollback savepoint and keep going
1117 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1118 if any(message['type'] == 'error' for message in messages):
1119 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1121 return {'ids': ids, 'messages': messages}
1123 def _extract_records(self, cr, uid, fields_, data,
1124 context=None, log=lambda a: None):
1125 """ Generates record dicts from the data sequence.
1127 The result is a generator of dicts mapping field names to raw
1128 (unconverted, unvalidated) values.
1130 For relational fields, if sub-fields were provided the value will be
1131 a list of sub-records
1133 The following sub-fields may be set on the record (by key):
1134 * None is the name_get for the record (to use with name_create/name_search)
1135 * "id" is the External ID for the record
1136 * ".id" is the Database ID for the record
1138 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1139 # Fake columns to avoid special cases in extractor
1140 columns[None] = fields.char('rec_name')
1141 columns['id'] = fields.char('External ID')
1142 columns['.id'] = fields.integer('Database ID')
1144 # m2o fields can't be on multiple lines so exclude them from the
1145 # is_relational field rows filter, but special-case it later on to
1146 # be handled with relational fields (as it can have subfields)
1147 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1148 get_o2m_values = itemgetter_tuple(
1149 [index for index, field in enumerate(fields_)
1150 if columns[field[0]]._type == 'one2many'])
1151 get_nono2m_values = itemgetter_tuple(
1152 [index for index, field in enumerate(fields_)
1153 if columns[field[0]]._type != 'one2many'])
1154 # Checks if the provided row has any non-empty non-relational field
1155 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1156 return any(g(row)) and not any(f(row))
1160 if index >= len(data): return
1163 # copy non-relational fields to record dict
1164 record = dict((field[0], value)
1165 for field, value in itertools.izip(fields_, row)
1166 if not is_relational(field[0]))
1168 # Get all following rows which have relational values attached to
1169 # the current record (no non-relational values)
1170 record_span = itertools.takewhile(
1171 only_o2m_values, itertools.islice(data, index + 1, None))
1172 # stitch record row back on for relational fields
1173 record_span = list(itertools.chain([row], record_span))
1174 for relfield in set(
1175 field[0] for field in fields_
1176 if is_relational(field[0])):
1177 column = columns[relfield]
1178 # FIXME: how to not use _obj without relying on fields_get?
1179 Model = self.pool[column._obj]
1181 # get only cells for this sub-field, should be strictly
1182 # non-empty, field path [None] is for name_get column
1183 indices, subfields = zip(*((index, field[1:] or [None])
1184 for index, field in enumerate(fields_)
1185 if field[0] == relfield))
1187 # return all rows which have at least one value for the
1188 # subfields of relfield
1189 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1190 record[relfield] = [subrecord
1191 for subrecord, _subinfo in Model._extract_records(
1192 cr, uid, subfields, relfield_data,
1193 context=context, log=log)]
1195 yield record, {'rows': {
1197 'to': index + len(record_span) - 1
1199 index += len(record_span)
1201 def _convert_records(self, cr, uid, records,
1202 context=None, log=lambda a: None):
1203 """ Converts records from the source iterable (recursive dicts of
1204 strings) into forms which can be written to the database (via
1205 self.create or (ir.model.data)._update)
1207 :returns: a list of triplets of (id, xid, record)
1208 :rtype: list((int|None, str|None, dict))
1210 if context is None: context = {}
1211 Converter = self.pool['ir.fields.converter']
1212 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1213 Translation = self.pool['ir.translation']
1215 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1216 context.get('lang'))
1218 for f, column in columns.iteritems())
1220 convert = Converter.for_model(cr, uid, self, context=context)
1222 def _log(base, field, exception):
1223 type = 'warning' if isinstance(exception, Warning) else 'error'
1224 # logs the logical (not human-readable) field name for automated
1225 # processing of response, but injects human readable in message
1226 record = dict(base, type=type, field=field,
1227 message=unicode(exception.args[0]) % base)
1228 if len(exception.args) > 1 and exception.args[1]:
1229 record.update(exception.args[1])
1232 stream = CountingStream(records)
1233 for record, extras in stream:
1236 # name_get/name_create
1237 if None in record: pass
1244 dbid = int(record['.id'])
1246 # in case of overridden id column
1247 dbid = record['.id']
1248 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1251 record=stream.index,
1253 message=_(u"Unknown database identifier '%s'") % dbid))
1256 converted = convert(record, lambda field, err:\
1257 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1259 yield dbid, xid, converted, dict(extras, record=stream.index)
1262 def _validate_fields(self, field_names):
1263 field_names = set(field_names)
1265 # old-style constraint methods
1266 trans = self.env['ir.translation']
1267 cr, uid, context = self.env.args
1270 for fun, msg, names in self._constraints:
1272 # validation must be context-independent; call `fun` without context
1273 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1275 except Exception, e:
1276 _logger.debug('Exception while validating constraint', exc_info=True)
1278 extra_error = tools.ustr(e)
1281 res_msg = msg(self._model, cr, uid, ids, context=context)
1282 if isinstance(res_msg, tuple):
1283 template, params = res_msg
1284 res_msg = template % params
1286 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1288 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1290 _("Field(s) `%s` failed against a constraint: %s") %
1291 (', '.join(names), res_msg)
1294 raise ValidationError('\n'.join(errors))
1296 # new-style constraint methods
1297 for check in self._constraint_methods:
1298 if set(check._constrains) & field_names:
1301 except ValidationError, e:
1303 except Exception, e:
1304 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1306 def default_get(self, cr, uid, fields_list, context=None):
1307 """ default_get(fields) -> default_values
1309 Return default values for the fields in `fields_list`. Default
1310 values are determined by the context, user defaults, and the model
1313 :param fields_list: a list of field names
1314 :return: a dictionary mapping each field name to its corresponding
1315 default value; the keys of the dictionary are the fields in
1316 `fields_list` that have a default value different from ``False``.
1318 This method should not be overridden. In order to change the
1319 mechanism for determining default values, you should override method
1320 :meth:`add_default_value` instead.
1322 # trigger view init hook
1323 self.view_init(cr, uid, fields_list, context)
1325 # use a new record to determine default values; evaluate fields on the
1326 # new record and put default values in result
1327 record = self.new(cr, uid, {}, context=context)
1329 for name in fields_list:
1330 if name in self._fields:
1331 value = record[name]
1332 if name in record._cache:
1333 result[name] = value # it really is a default value
1335 # convert default values to the expected format
1336 result = self._convert_to_write(result)
1339 def add_default_value(self, field):
1340 """ Set the default value of `field` to the new record `self`.
1341 The value must be assigned to `self`.
1343 assert not self.id, "Expected new record: %s" % self
1344 cr, uid, context = self.env.args
1347 # 1. look up context
1348 key = 'default_' + name
1350 self[name] = context[key]
1353 # 2. look up ir_values
1354 # Note: performance is good, because get_defaults_dict is cached!
1355 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1356 if name in ir_values_dict:
1357 self[name] = ir_values_dict[name]
1360 # 3. look up property fields
1361 # TODO: get rid of this one
1362 column = self._columns.get(name)
1363 if isinstance(column, fields.property):
1364 self[name] = self.env['ir.property'].get(name, self._name)
1367 # 4. look up _defaults
1368 if name in self._defaults:
1369 value = self._defaults[name]
1371 value = value(self._model, cr, uid, context)
1375 # 5. delegate to field
1376 field.determine_default(self)
1378 def fields_get_keys(self, cr, user, context=None):
1379 res = self._columns.keys()
1380 # TODO I believe this loop can be replace by
1381 # res.extend(self._inherit_fields.key())
1382 for parent in self._inherits:
1383 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1386 def _rec_name_fallback(self, cr, uid, context=None):
1387 rec_name = self._rec_name
1388 if rec_name not in self._columns:
1389 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1393 # Overload this method if you need a window title which depends on the context
1395 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1398 def user_has_groups(self, cr, uid, groups, context=None):
1399 """Return true if the user is at least member of one of the groups
1400 in groups_str. Typically used to resolve `groups` attribute
1401 in view and model definitions.
1403 :param str groups: comma-separated list of fully-qualified group
1404 external IDs, e.g.: ``base.group_user,base.group_system``
1405 :return: True if the current user is a member of one of the
1408 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1409 for group_ext_id in groups.split(','))
1411 def _get_default_form_view(self, cr, user, context=None):
1412 """ Generates a default single-line form view using all fields
1413 of the current model except the m2m and o2m ones.
1415 :param cr: database cursor
1416 :param int user: user id
1417 :param dict context: connection context
1418 :returns: a form view as an lxml document
1419 :rtype: etree._Element
1421 view = etree.Element('form', string=self._description)
1422 group = etree.SubElement(view, 'group', col="4")
1423 for fname, field in self._fields.iteritems():
1424 if field.automatic or field.type in ('one2many', 'many2many'):
1427 etree.SubElement(group, 'field', name=fname)
1428 if field.type == 'text':
1429 etree.SubElement(group, 'newline')
1432 def _get_default_search_view(self, cr, user, context=None):
1433 """ Generates a single-field search view, based on _rec_name.
1435 :param cr: database cursor
1436 :param int user: user id
1437 :param dict context: connection context
1438 :returns: a tree view as an lxml document
1439 :rtype: etree._Element
1441 view = etree.Element('search', string=self._description)
1442 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1445 def _get_default_tree_view(self, cr, user, context=None):
1446 """ Generates a single-field tree view, based on _rec_name.
1448 :param cr: database cursor
1449 :param int user: user id
1450 :param dict context: connection context
1451 :returns: a tree view as an lxml document
1452 :rtype: etree._Element
1454 view = etree.Element('tree', string=self._description)
1455 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1458 def _get_default_calendar_view(self, cr, user, context=None):
1459 """ Generates a default calendar view by trying to infer
1460 calendar fields from a number of pre-set attribute names
1462 :param cr: database cursor
1463 :param int user: user id
1464 :param dict context: connection context
1465 :returns: a calendar view
1466 :rtype: etree._Element
1468 def set_first_of(seq, in_, to):
1469 """Sets the first value of `seq` also found in `in_` to
1470 the `to` attribute of the view being closed over.
1472 Returns whether it's found a suitable value (and set it on
1473 the attribute) or not
1481 view = etree.Element('calendar', string=self._description)
1482 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1484 if self._date_name not in self._columns:
1486 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1487 if dt in self._columns:
1488 self._date_name = dt
1493 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1494 view.set('date_start', self._date_name)
1496 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1497 self._columns, 'color')
1499 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1500 self._columns, 'date_stop'):
1501 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1502 self._columns, 'date_delay'):
1504 _('Invalid Object Architecture!'),
1505 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1509 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1510 """ fields_view_get([view_id | view_type='form'])
1512 Get the detailed composition of the requested view like fields, model, view architecture
1514 :param view_id: id of the view or None
1515 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1516 :param toolbar: true to include contextual actions
1517 :param submenu: deprecated
1518 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1519 :raise AttributeError:
1520 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1521 * if some tag other than 'position' is found in parent view
1522 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1526 View = self.pool['ir.ui.view']
1529 'model': self._name,
1530 'field_parent': False,
1533 # try to find a view_id if none provided
1535 # <view_type>_view_ref in context can be used to overrride the default view
1536 view_ref_key = view_type + '_view_ref'
1537 view_ref = context.get(view_ref_key)
1540 module, view_ref = view_ref.split('.', 1)
1541 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1542 view_ref_res = cr.fetchone()
1544 view_id = view_ref_res[0]
1546 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1547 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1551 # otherwise try to find the lowest priority matching ir.ui.view
1552 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1554 # context for post-processing might be overriden
1557 # read the view with inherited views applied
1558 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1559 result['arch'] = root_view['arch']
1560 result['name'] = root_view['name']
1561 result['type'] = root_view['type']
1562 result['view_id'] = root_view['id']
1563 result['field_parent'] = root_view['field_parent']
1564 # override context fro postprocessing
1565 if root_view.get('model') != self._name:
1566 ctx = dict(context, base_model_name=root_view.get('model'))
1568 # fallback on default views methods if no ir.ui.view could be found
1570 get_func = getattr(self, '_get_default_%s_view' % view_type)
1571 arch_etree = get_func(cr, uid, context)
1572 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1573 result['type'] = view_type
1574 result['name'] = 'default'
1575 except AttributeError:
1576 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1578 # Apply post processing, groups and modifiers etc...
1579 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1580 result['arch'] = xarch
1581 result['fields'] = xfields
1583 # Add related action information if aksed
1585 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1591 ir_values_obj = self.pool.get('ir.values')
1592 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1593 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1594 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1595 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1596 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1597 #When multi="True" set it will display only in More of the list view
1598 resrelate = [clean(action) for action in resrelate
1599 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1601 for x in itertools.chain(resprint, resaction, resrelate):
1602 x['string'] = x['name']
1604 result['toolbar'] = {
1606 'action': resaction,
1611 def get_formview_id(self, cr, uid, id, context=None):
1612 """ Return an view id to open the document with. This method is meant to be
1613 overridden in addons that want to give specific view ids for example.
1615 :param int id: id of the document to open
1619 def get_formview_action(self, cr, uid, id, context=None):
1620 """ Return an action to open the document. This method is meant to be
1621 overridden in addons that want to give specific view ids for example.
1623 :param int id: id of the document to open
1625 view_id = self.get_formview_id(cr, uid, id, context=context)
1627 'type': 'ir.actions.act_window',
1628 'res_model': self._name,
1629 'view_type': 'form',
1630 'view_mode': 'form',
1631 'views': [(view_id, 'form')],
1632 'target': 'current',
1636 def get_access_action(self, cr, uid, id, context=None):
1637 """ Return an action to open the document. This method is meant to be
1638 overridden in addons that want to give specific access to the document.
1639 By default it opens the formview of the document.
1641 :paramt int id: id of the document to open
1643 return self.get_formview_action(cr, uid, id, context=context)
1645 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1646 return self.pool['ir.ui.view'].postprocess_and_fields(
1647 cr, uid, self._name, node, view_id, context=context)
1649 def search_count(self, cr, user, args, context=None):
1650 """ search_count(args) -> int
1652 Returns the number of records in the current model matching :ref:`the
1653 provided domain <reference/orm/domains>`.
1655 res = self.search(cr, user, args, context=context, count=True)
1656 if isinstance(res, list):
1660 @api.returns('self')
1661 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1662 """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1664 Searches for records based on the ``args``
1665 :ref:`search domain <reference/orm/domains>`.
1667 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1668 list to match all records.
1669 :param int offset: number of results to ignore (default: none)
1670 :param int limit: maximum number of records to return (default: all)
1671 :param str order: sort string
1672 :param bool count: if ``True``, the call should return the number of
1673 records matching ``args`` rather than the records
1675 :returns: at most ``limit`` records matching the search criteria
1677 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1679 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1682 # display_name, name_get, name_create, name_search
1685 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1686 def _compute_display_name(self):
1687 names = dict(self.name_get())
1689 record.display_name = names.get(record.id, False)
1693 """ name_get() -> [(id, name), ...]
1695 Returns a textual representation for the records in ``self``.
1696 By default this is the value of the ``display_name`` field.
1698 :return: list of pairs ``(id, text_repr)`` for each records
1702 name = self._rec_name
1703 if name in self._fields:
1704 convert = self._fields[name].convert_to_display_name
1706 result.append((record.id, convert(record[name])))
1709 result.append((record.id, "%s,%s" % (record._name, record.id)))
1714 def name_create(self, name):
1715 """ name_create(name) -> record
1717 Create a new record by calling :meth:`~.create` with only one value
1718 provided: the display name of the new record.
1720 The new record will be initialized with any default values
1721 applicable to this model, or provided through the context. The usual
1722 behavior of :meth:`~.create` applies.
1724 :param name: display name of the record to create
1726 :return: the :meth:`~.name_get` pair value of the created record
1729 record = self.create({self._rec_name: name})
1730 return record.name_get()[0]
1732 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1736 def name_search(self, name='', args=None, operator='ilike', limit=100):
1737 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1739 Search for records that have a display name matching the given
1740 `name` pattern when compared with the given `operator`, while also
1741 matching the optional search domain (`args`).
1743 This is used for example to provide suggestions based on a partial
1744 value for a relational field. Sometimes be seen as the inverse
1745 function of :meth:`~.name_get`, but it is not guaranteed to be.
1747 This method is equivalent to calling :meth:`~.search` with a search
1748 domain based on ``display_name`` and then :meth:`~.name_get` on the
1749 result of the search.
1751 :param str name: the name pattern to match
1752 :param list args: optional search domain (see :meth:`~.search` for
1753 syntax), specifying further restrictions
1754 :param str operator: domain operator for matching `name`, such as
1755 ``'like'`` or ``'='``.
1756 :param int limit: optional max number of records to return
1758 :return: list of pairs ``(id, text_repr)`` for all matching records.
1760 return self._name_search(name, args, operator, limit=limit)
1762 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1763 # private implementation of name_search, allows passing a dedicated user
1764 # for the name_get part to solve some access rights issues
1765 args = list(args or [])
1766 # optimize out the default criterion of ``ilike ''`` that matches everything
1767 if not self._rec_name:
1768 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1769 elif not (name == '' and operator == 'ilike'):
1770 args += [(self._rec_name, operator, name)]
1771 access_rights_uid = name_get_uid or user
1772 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1773 res = self.name_get(cr, access_rights_uid, ids, context)
1776 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1779 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1781 fields = self._columns.keys() + self._inherit_fields.keys()
1782 #FIXME: collect all calls to _get_source into one SQL call.
1784 res[lang] = {'code': lang}
1786 if f in self._columns:
1787 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1789 res[lang][f] = res_trans
1791 res[lang][f] = self._columns[f].string
1792 for table in self._inherits:
1793 cols = intersect(self._inherit_fields.keys(), fields)
1794 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1797 res[lang]['code'] = lang
1798 for f in res2[lang]:
1799 res[lang][f] = res2[lang][f]
1802 def write_string(self, cr, uid, id, langs, vals, context=None):
1803 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1804 #FIXME: try to only call the translation in one SQL
1807 if field in self._columns:
1808 src = self._columns[field].string
1809 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1810 for table in self._inherits:
1811 cols = intersect(self._inherit_fields.keys(), vals)
1813 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1816 def _add_missing_default_values(self, cr, uid, values, context=None):
1817 # avoid overriding inherited values when parent is set
1819 for tables, parent_field in self._inherits.items():
1820 if parent_field in values:
1821 avoid_tables.append(tables)
1823 # compute missing fields
1824 missing_defaults = set()
1825 for field in self._columns.keys():
1826 if not field in values:
1827 missing_defaults.add(field)
1828 for field in self._inherit_fields.keys():
1829 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1830 missing_defaults.add(field)
1831 # discard magic fields
1832 missing_defaults -= set(MAGIC_COLUMNS)
1834 if missing_defaults:
1835 # override defaults with the provided values, never allow the other way around
1836 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1838 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1839 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1840 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1841 defaults[dv] = [(6, 0, defaults[dv])]
1842 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1843 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1844 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1845 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1846 defaults.update(values)
1850 def clear_caches(self):
1851 """ Clear the caches
1853 This clears the caches associated to methods decorated with
1854 ``tools.ormcache`` or ``tools.ormcache_multi``.
1857 self._ormcache.clear()
1858 self.pool._any_cache_cleared = True
1859 except AttributeError:
1863 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1864 aggregated_fields, count_field,
1865 read_group_result, read_group_order=None, context=None):
1866 """Helper method for filling in empty groups for all possible values of
1867 the field being grouped by"""
1869 # self._group_by_full should map groupable fields to a method that returns
1870 # a list of all aggregated values that we want to display for this field,
1871 # in the form of a m2o-like pair (key,label).
1872 # This is useful to implement kanban views for instance, where all columns
1873 # should be displayed even if they don't contain any record.
1875 # Grab the list of all groups that should be displayed, including all present groups
1876 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1877 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1878 read_group_order=read_group_order,
1879 access_rights_uid=openerp.SUPERUSER_ID,
1882 result_template = dict.fromkeys(aggregated_fields, False)
1883 result_template[groupby + '_count'] = 0
1884 if remaining_groupbys:
1885 result_template['__context'] = {'group_by': remaining_groupbys}
1887 # Merge the left_side (current results as dicts) with the right_side (all
1888 # possible values as m2o pairs). Both lists are supposed to be using the
1889 # same ordering, and can be merged in one pass.
1892 def append_left(left_side):
1893 grouped_value = left_side[groupby] and left_side[groupby][0]
1894 if not grouped_value in known_values:
1895 result.append(left_side)
1896 known_values[grouped_value] = left_side
1898 known_values[grouped_value].update({count_field: left_side[count_field]})
1899 def append_right(right_side):
1900 grouped_value = right_side[0]
1901 if not grouped_value in known_values:
1902 line = dict(result_template)
1903 line[groupby] = right_side
1904 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1906 known_values[grouped_value] = line
1907 while read_group_result or all_groups:
1908 left_side = read_group_result[0] if read_group_result else None
1909 right_side = all_groups[0] if all_groups else None
1910 assert left_side is None or left_side[groupby] is False \
1911 or isinstance(left_side[groupby], (tuple,list)), \
1912 'M2O-like pair expected, got %r' % left_side[groupby]
1913 assert right_side is None or isinstance(right_side, (tuple,list)), \
1914 'M2O-like pair expected, got %r' % right_side
1915 if left_side is None:
1916 append_right(all_groups.pop(0))
1917 elif right_side is None:
1918 append_left(read_group_result.pop(0))
1919 elif left_side[groupby] == right_side:
1920 append_left(read_group_result.pop(0))
1921 all_groups.pop(0) # discard right_side
1922 elif not left_side[groupby] or not left_side[groupby][0]:
1923 # left side == "Undefined" entry, not present on right_side
1924 append_left(read_group_result.pop(0))
1926 append_right(all_groups.pop(0))
1930 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1933 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1935 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1936 to the query if order should be computed against m2o field.
1937 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1938 :param aggregated_fields: list of aggregated fields in the query
1939 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1940 These dictionaries contains the qualified name of each groupby
1941 (fully qualified SQL name for the corresponding field),
1942 and the (non raw) field name.
1943 :param osv.Query query: the query under construction
1944 :return: (groupby_terms, orderby_terms)
1947 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1948 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1950 return groupby_terms, orderby_terms
1952 self._check_qorder(orderby)
1953 for order_part in orderby.split(','):
1954 order_split = order_part.split()
1955 order_field = order_split[0]
1956 if order_field in groupby_fields:
1958 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1959 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1961 orderby_terms.append(order_clause)
1962 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1964 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1965 orderby_terms.append(order)
1966 elif order_field in aggregated_fields:
1967 orderby_terms.append(order_part)
1969 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1970 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1971 self._name, order_part)
1972 return groupby_terms, orderby_terms
1974 def _read_group_process_groupby(self, gb, query, context):
1976 Helper method to collect important information about groupbys: raw
1977 field name, type, time informations, qualified name, ...
1979 split = gb.split(':')
1980 field_type = self._all_columns[split[0]].column._type
1981 gb_function = split[1] if len(split) == 2 else None
1982 temporal = field_type in ('date', 'datetime')
1983 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1984 qualified_field = self._inherits_join_calc(split[0], query)
1987 'day': 'dd MMM YYYY',
1988 'week': "'W'w YYYY",
1989 'month': 'MMMM YYYY',
1990 'quarter': 'QQQ YYYY',
1994 'day': dateutil.relativedelta.relativedelta(days=1),
1995 'week': datetime.timedelta(days=7),
1996 'month': dateutil.relativedelta.relativedelta(months=1),
1997 'quarter': dateutil.relativedelta.relativedelta(months=3),
1998 'year': dateutil.relativedelta.relativedelta(years=1)
2001 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
2002 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
2003 if field_type == 'boolean':
2004 qualified_field = "coalesce(%s,false)" % qualified_field
2009 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2010 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2011 'tz_convert': tz_convert,
2012 'qualified_field': qualified_field
2015 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2017 Helper method to sanitize the data received by read_group. The None
2018 values are converted to False, and the date/datetime are formatted,
2019 and corrected according to the timezones.
2021 value = False if value is None else value
2022 gb = groupby_dict.get(key)
2023 if gb and gb['type'] in ('date', 'datetime') and value:
2024 if isinstance(value, basestring):
2025 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2026 value = datetime.datetime.strptime(value, dt_format)
2027 if gb['tz_convert']:
2028 value = pytz.timezone(context['tz']).localize(value)
2031 def _read_group_get_domain(self, groupby, value):
2033 Helper method to construct the domain corresponding to a groupby and
2034 a given value. This is mostly relevant for date/datetime.
2036 if groupby['type'] in ('date', 'datetime') and value:
2037 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2038 domain_dt_begin = value
2039 domain_dt_end = value + groupby['interval']
2040 if groupby['tz_convert']:
2041 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2042 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2043 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2044 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2045 if groupby['type'] == 'many2one' and value:
2047 return [(groupby['field'], '=', value)]
2049 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2051 Helper method to format the data contained in the dictianary data by
2052 adding the domain corresponding to its values, the groupbys in the
2053 context and by properly formatting the date/datetime values.
2055 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2056 for k,v in data.iteritems():
2057 gb = groupby_dict.get(k)
2058 if gb and gb['type'] in ('date', 'datetime') and v:
2059 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2061 data['__domain'] = domain_group + domain
2062 if len(groupby) - len(annotated_groupbys) >= 1:
2063 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2067 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2069 Get the list of records in list view grouped by the given ``groupby`` fields
2071 :param cr: database cursor
2072 :param uid: current user id
2073 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2074 :param list fields: list of fields present in the list view specified on the object
2075 :param list groupby: list of groupby descriptions by which the records will be grouped.
2076 A groupby description is either a field (then it will be grouped by that field)
2077 or a string 'field:groupby_function'. Right now, the only functions supported
2078 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2079 date/datetime fields.
2080 :param int offset: optional number of records to skip
2081 :param int limit: optional max number of records to return
2082 :param dict context: context arguments, like lang, time zone.
2083 :param list orderby: optional ``order by`` specification, for
2084 overriding the natural sort ordering of the
2085 groups, see also :py:meth:`~osv.osv.osv.search`
2086 (supported only for many2one fields currently)
2087 :param bool lazy: if true, the results are only grouped by the first groupby and the
2088 remaining groupbys are put in the __context key. If false, all the groupbys are
2090 :return: list of dictionaries(one dictionary for each record) containing:
2092 * the values of fields grouped by the fields in ``groupby`` argument
2093 * __domain: list of tuples specifying the search criteria
2094 * __context: dictionary with argument like ``groupby``
2095 :rtype: [{'field_name_1': value, ...]
2096 :raise AccessError: * if user has no read rights on the requested object
2097 * if user tries to bypass access rules for read on the requested object
2101 self.check_access_rights(cr, uid, 'read')
2102 query = self._where_calc(cr, uid, domain, context=context)
2103 fields = fields or self._columns.keys()
2105 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2106 groupby_list = groupby[:1] if lazy else groupby
2107 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2108 for gb in groupby_list]
2109 groupby_fields = [g['field'] for g in annotated_groupbys]
2110 order = orderby or ','.join([g for g in groupby_list])
2111 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2113 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2114 for gb in groupby_fields:
2115 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2116 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2117 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2118 if not (gb in self._all_columns):
2119 # Don't allow arbitrary values, as this would be a SQL injection vector!
2120 raise except_orm(_('Invalid group_by'),
2121 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2123 aggregated_fields = [
2125 if f not in ('id', 'sequence')
2126 if f not in groupby_fields
2127 if f in self._all_columns
2128 if self._all_columns[f].column._type in ('integer', 'float')
2129 if getattr(self._all_columns[f].column, '_classic_write')]
2131 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2132 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2134 for gb in annotated_groupbys:
2135 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2137 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2138 from_clause, where_clause, where_clause_params = query.get_sql()
2139 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2140 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2143 count_field += '_count'
2145 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2146 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2149 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2157 'table': self._table,
2158 'count_field': count_field,
2159 'extra_fields': prefix_terms(',', select_terms),
2160 'from': from_clause,
2161 'where': prefix_term('WHERE', where_clause),
2162 'groupby': prefix_terms('GROUP BY', groupby_terms),
2163 'orderby': prefix_terms('ORDER BY', orderby_terms),
2164 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2165 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2167 cr.execute(query, where_clause_params)
2168 fetched_data = cr.dictfetchall()
2170 if not groupby_fields:
2173 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2175 data_ids = [r['id'] for r in fetched_data]
2176 many2onefields = list(set(many2onefields))
2177 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2178 for d in fetched_data:
2179 d.update(data_dict[d['id']])
2181 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2182 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2183 if lazy and groupby_fields[0] in self._group_by_full:
2184 # Right now, read_group only fill results in lazy mode (by default).
2185 # If you need to have the empty groups in 'eager' mode, then the
2186 # method _read_group_fill_results need to be completely reimplemented
2188 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2189 aggregated_fields, count_field, result, read_group_order=order,
2193 def _inherits_join_add(self, current_model, parent_model_name, query):
2195 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2196 :param current_model: current model object
2197 :param parent_model_name: name of the parent model for which the clauses should be added
2198 :param query: query object on which the JOIN should be added
2200 inherits_field = current_model._inherits[parent_model_name]
2201 parent_model = self.pool[parent_model_name]
2202 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2205 def _inherits_join_calc(self, field, query):
2207 Adds missing table select and join clause(s) to ``query`` for reaching
2208 the field coming from an '_inherits' parent table (no duplicates).
2210 :param field: name of inherited field to reach
2211 :param query: query object on which the JOIN should be added
2212 :return: qualified name of field, to be used in SELECT clause
2214 current_table = self
2215 parent_alias = '"%s"' % current_table._table
2216 while field in current_table._inherit_fields and not field in current_table._columns:
2217 parent_model_name = current_table._inherit_fields[field][0]
2218 parent_table = self.pool[parent_model_name]
2219 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2220 current_table = parent_table
2221 return '%s."%s"' % (parent_alias, field)
2223 def _parent_store_compute(self, cr):
2224 if not self._parent_store:
2226 _logger.info('Computing parent left and right for table %s...', self._table)
2227 def browse_rec(root, pos=0):
2229 where = self._parent_name+'='+str(root)
2231 where = self._parent_name+' IS NULL'
2232 if self._parent_order:
2233 where += ' order by '+self._parent_order
2234 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2236 for id in cr.fetchall():
2237 pos2 = browse_rec(id[0], pos2)
2238 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2240 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2241 if self._parent_order:
2242 query += ' order by ' + self._parent_order
2245 for (root,) in cr.fetchall():
2246 pos = browse_rec(root, pos)
2247 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2250 def _update_store(self, cr, f, k):
2251 _logger.info("storing computed values of fields.function '%s'", k)
2252 ss = self._columns[k]._symbol_set
2253 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2254 cr.execute('select id from '+self._table)
2255 ids_lst = map(lambda x: x[0], cr.fetchall())
2257 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2258 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2259 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2260 for key, val in res.items():
2263 # if val is a many2one, just write the ID
2264 if type(val) == tuple:
2266 if val is not False:
2267 cr.execute(update_query, (ss[1](val), key))
2269 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2270 """Raise except_orm if value is not among the valid values for the selection field"""
2271 if self._columns[field]._type == 'reference':
2272 val_model, val_id_str = value.split(',', 1)
2275 val_id = long(val_id_str)
2279 raise except_orm(_('ValidateError'),
2280 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2284 if isinstance(self._columns[field].selection, (tuple, list)):
2285 if val in dict(self._columns[field].selection):
2287 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2289 raise except_orm(_('ValidateError'),
2290 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
2292 def _check_removed_columns(self, cr, log=False):
2293 # iterate on the database columns to drop the NOT NULL constraints
2294 # of fields which were required but have been removed (or will be added by another module)
2295 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2296 columns += MAGIC_COLUMNS
2297 cr.execute("SELECT a.attname, a.attnotnull"
2298 " FROM pg_class c, pg_attribute a"
2299 " WHERE c.relname=%s"
2300 " AND c.oid=a.attrelid"
2301 " AND a.attisdropped=%s"
2302 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2303 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2305 for column in cr.dictfetchall():
2307 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2308 column['attname'], self._table, self._name)
2309 if column['attnotnull']:
2310 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2311 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2312 self._table, column['attname'])
2314 def _save_constraint(self, cr, constraint_name, type):
2316 Record the creation of a constraint for this model, to make it possible
2317 to delete it later when the module is uninstalled. Type can be either
2318 'f' or 'u' depending on the constraint being a foreign key or not.
2320 if not self._module:
2321 # no need to save constraints for custom models as they're not part
2324 assert type in ('f', 'u')
2326 SELECT 1 FROM ir_model_constraint, ir_module_module
2327 WHERE ir_model_constraint.module=ir_module_module.id
2328 AND ir_model_constraint.name=%s
2329 AND ir_module_module.name=%s
2330 """, (constraint_name, self._module))
2333 INSERT INTO ir_model_constraint
2334 (name, date_init, date_update, module, model, type)
2335 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2336 (SELECT id FROM ir_module_module WHERE name=%s),
2337 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2338 (constraint_name, self._module, self._name, type))
2340 def _save_relation_table(self, cr, relation_table):
2342 Record the creation of a many2many for this model, to make it possible
2343 to delete it later when the module is uninstalled.
2346 SELECT 1 FROM ir_model_relation, ir_module_module
2347 WHERE ir_model_relation.module=ir_module_module.id
2348 AND ir_model_relation.name=%s
2349 AND ir_module_module.name=%s
2350 """, (relation_table, self._module))
2352 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2353 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2354 (SELECT id FROM ir_module_module WHERE name=%s),
2355 (SELECT id FROM ir_model WHERE model=%s))""",
2356 (relation_table, self._module, self._name))
2357 self.invalidate_cache(cr, SUPERUSER_ID)
2359 # checked version: for direct m2o starting from `self`
2360 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2361 assert self.is_transient() or not dest_model.is_transient(), \
2362 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2363 if self.is_transient() and not dest_model.is_transient():
2364 # TransientModel relationships to regular Models are annoying
2365 # usually because they could block deletion due to the FKs.
2366 # So unless stated otherwise we default them to ondelete=cascade.
2367 ondelete = ondelete or 'cascade'
2368 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2369 self._foreign_keys.add(fk_def)
2370 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2372 # unchecked version: for custom cases, such as m2m relationships
2373 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2374 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2375 self._foreign_keys.add(fk_def)
2376 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2378 def _drop_constraint(self, cr, source_table, constraint_name):
2379 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2381 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2382 # Find FK constraint(s) currently established for the m2o field,
2383 # and see whether they are stale or not
2384 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2385 cl2.relname as foreign_table
2386 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2387 pg_attribute as att1, pg_attribute as att2
2388 WHERE con.conrelid = cl1.oid
2389 AND cl1.relname = %s
2390 AND con.confrelid = cl2.oid
2391 AND array_lower(con.conkey, 1) = 1
2392 AND con.conkey[1] = att1.attnum
2393 AND att1.attrelid = cl1.oid
2394 AND att1.attname = %s
2395 AND array_lower(con.confkey, 1) = 1
2396 AND con.confkey[1] = att2.attnum
2397 AND att2.attrelid = cl2.oid
2398 AND att2.attname = %s
2399 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2400 constraints = cr.dictfetchall()
2402 if len(constraints) == 1:
2403 # Is it the right constraint?
2405 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2406 or cons['foreign_table'] != dest_model._table:
2407 # Wrong FK: drop it and recreate
2408 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2409 source_table, cons['constraint_name'])
2410 self._drop_constraint(cr, source_table, cons['constraint_name'])
2412 # it's all good, nothing to do!
2415 # Multiple FKs found for the same field, drop them all, and re-create
2416 for cons in constraints:
2417 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2418 source_table, cons['constraint_name'])
2419 self._drop_constraint(cr, source_table, cons['constraint_name'])
2421 # (re-)create the FK
2422 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2425 def _set_default_value_on_column(self, cr, column_name, context=None):
2426 # ideally should use add_default_value but fails
2427 # due to ir.values not being ready
2429 # get old-style default
2430 default = self._defaults.get(column_name)
2431 if callable(default):
2432 default = default(self, cr, SUPERUSER_ID, context)
2434 # get new_style default if no old-style
2436 record = self.new(cr, SUPERUSER_ID, context=context)
2437 field = self._fields[column_name]
2438 field.determine_default(record)
2439 defaults = dict(record._cache)
2440 if column_name in defaults:
2441 default = field.convert_to_write(defaults[column_name])
2443 column = self._columns[column_name]
2444 ss = column._symbol_set
2445 db_default = ss[1](default)
2446 # Write default if non-NULL, except for booleans for which False means
2447 # the same as NULL - this saves us an expensive query on large tables.
2448 write_default = (db_default is not None if column._type != 'boolean'
2451 _logger.debug("Table '%s': setting default value of new column %s to %r",
2452 self._table, column_name, default)
2453 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2454 self._table, column_name, ss[0], column_name)
2455 cr.execute(query, (db_default,))
2456 # this is a disgrace
2459 def _auto_init(self, cr, context=None):
2462 Call _field_create and, unless _auto is False:
2464 - create the corresponding table in database for the model,
2465 - possibly add the parent columns in database,
2466 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2467 'write_date' in database if _log_access is True (the default),
2468 - report on database columns no more existing in _columns,
2469 - remove no more existing not null constraints,
2470 - alter existing database columns to match _columns,
2471 - create database tables to match _columns,
2472 - add database indices to match _columns,
2473 - save in self._foreign_keys a list a foreign keys to create (see
2477 self._foreign_keys = set()
2478 raise_on_invalid_object_name(self._name)
2481 store_compute = False
2482 stored_fields = [] # new-style stored fields with compute
2484 update_custom_fields = context.get('update_custom_fields', False)
2485 self._field_create(cr, context=context)
2486 create = not self._table_exist(cr)
2490 self._create_table(cr)
2493 cr.execute('SELECT min(id) FROM "%s"' % (self._table,))
2494 has_rows = cr.fetchone()[0] is not None
2497 if self._parent_store:
2498 if not self._parent_columns_exist(cr):
2499 self._create_parent_columns(cr)
2500 store_compute = True
2502 self._check_removed_columns(cr, log=False)
2504 # iterate on the "object columns"
2505 column_data = self._select_column_data(cr)
2507 for k, f in self._columns.iteritems():
2508 if k == 'id': # FIXME: maybe id should be a regular column?
2510 # Don't update custom (also called manual) fields
2511 if f.manual and not update_custom_fields:
2514 if isinstance(f, fields.one2many):
2515 self._o2m_raise_on_missing_reference(cr, f)
2517 elif isinstance(f, fields.many2many):
2518 self._m2m_raise_or_create_relation(cr, f)
2521 res = column_data.get(k)
2523 # The field is not found as-is in database, try if it
2524 # exists with an old name.
2525 if not res and hasattr(f, 'oldname'):
2526 res = column_data.get(f.oldname)
2528 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2530 column_data[k] = res
2531 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2532 self._table, f.oldname, k)
2534 # The field already exists in database. Possibly
2535 # change its type, rename it, drop it or change its
2538 f_pg_type = res['typname']
2539 f_pg_size = res['size']
2540 f_pg_notnull = res['attnotnull']
2541 if isinstance(f, fields.function) and not f.store and\
2542 not getattr(f, 'nodrop', False):
2543 _logger.info('column %s (%s) converted to a function, removed from table %s',
2544 k, f.string, self._table)
2545 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2547 _schema.debug("Table '%s': dropped column '%s' with cascade",
2551 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2556 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2557 ('varchar', 'text', 'TEXT', ''),
2558 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2559 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2560 ('timestamp', 'date', 'date', '::date'),
2561 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2562 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2564 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2566 with cr.savepoint():
2567 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2568 except psycopg2.NotSupportedError:
2569 # In place alter table cannot be done because a view is depending of this field.
2570 # Do a manual copy. This will drop the view (that will be recreated later)
2571 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2572 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2573 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2574 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2576 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2577 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2579 if (f_pg_type==c[0]) and (f._type==c[1]):
2580 if f_pg_type != f_obj_type:
2582 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2583 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2584 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2585 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2587 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2588 self._table, k, c[0], c[1])
2591 if f_pg_type != f_obj_type:
2595 newname = k + '_moved' + str(i)
2596 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2597 "WHERE c.relname=%s " \
2598 "AND a.attname=%s " \
2599 "AND c.oid=a.attrelid ", (self._table, newname))
2600 if not cr.fetchone()[0]:
2604 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2605 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2606 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2607 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2608 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2609 self._table, k, f_pg_type, f._type, newname)
2611 # if the field is required and hasn't got a NOT NULL constraint
2612 if f.required and f_pg_notnull == 0:
2614 self._set_default_value_on_column(cr, k, context=context)
2615 # add the NOT NULL constraint
2617 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2619 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2622 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2623 "If you want to have it, you should update the records and execute manually:\n"\
2624 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2625 _schema.warning(msg, self._table, k, self._table, k)
2627 elif not f.required and f_pg_notnull == 1:
2628 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2630 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2633 indexname = '%s_%s_index' % (self._table, k)
2634 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2635 res2 = cr.dictfetchall()
2636 if not res2 and f.select:
2637 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2639 if f._type == 'text':
2640 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2641 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2642 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2643 " because there is a length limit for indexable btree values!\n"\
2644 "Use a search view instead if you simply want to make the field searchable."
2645 _schema.warning(msg, self._table, f._type, k)
2646 if res2 and not f.select:
2647 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2649 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2650 _schema.debug(msg, self._table, k, f._type)
2652 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2653 dest_model = self.pool[f._obj]
2654 if dest_model._auto and dest_model._table != 'ir_actions':
2655 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2657 # The field doesn't exist in database. Create it if necessary.
2659 if not isinstance(f, fields.function) or f.store:
2660 # add the missing field
2661 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2662 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2663 _schema.debug("Table '%s': added column '%s' with definition=%s",
2664 self._table, k, get_pg_type(f)[1])
2668 self._set_default_value_on_column(cr, k, context=context)
2670 # remember the functions to call for the stored fields
2671 if isinstance(f, fields.function):
2673 if f.store is not True: # i.e. if f.store is a dict
2674 order = f.store[f.store.keys()[0]][2]
2675 todo_end.append((order, self._update_store, (f, k)))
2677 # remember new-style stored fields with compute method
2678 if k in self._fields and self._fields[k].depends:
2679 stored_fields.append(self._fields[k])
2681 # and add constraints if needed
2682 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2683 if f._obj not in self.pool:
2684 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2685 dest_model = self.pool[f._obj]
2686 ref = dest_model._table
2687 # ir_actions is inherited so foreign key doesn't work on it
2688 if dest_model._auto and ref != 'ir_actions':
2689 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2691 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2695 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2696 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2699 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2700 "Try to re-run: openerp-server --update=module\n"\
2701 "If it doesn't work, update records and execute manually:\n"\
2702 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2703 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2707 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2708 create = not bool(cr.fetchone())
2710 cr.commit() # start a new transaction
2713 self._add_sql_constraints(cr)
2716 self._execute_sql(cr)
2719 self._parent_store_compute(cr)
2723 # trigger computation of new-style stored fields with a compute
2725 _logger.info("Storing computed values of %s fields %s",
2726 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2727 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2728 recs = recs.search([])
2730 map(recs._recompute_todo, stored_fields)
2733 todo_end.append((1000, func, ()))
2737 def _auto_end(self, cr, context=None):
2738 """ Create the foreign keys recorded by _auto_init. """
2739 for t, k, r, d in self._foreign_keys:
2740 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2741 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2743 del self._foreign_keys
2746 def _table_exist(self, cr):
2747 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2751 def _create_table(self, cr):
2752 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2753 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2754 _schema.debug("Table '%s': created", self._table)
2757 def _parent_columns_exist(self, cr):
2758 cr.execute("""SELECT c.relname
2759 FROM pg_class c, pg_attribute a
2760 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2761 """, (self._table, 'parent_left'))
2765 def _create_parent_columns(self, cr):
2766 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2767 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2768 if 'parent_left' not in self._columns:
2769 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2771 _schema.debug("Table '%s': added column '%s' with definition=%s",
2772 self._table, 'parent_left', 'INTEGER')
2773 elif not self._columns['parent_left'].select:
2774 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2776 if 'parent_right' not in self._columns:
2777 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2779 _schema.debug("Table '%s': added column '%s' with definition=%s",
2780 self._table, 'parent_right', 'INTEGER')
2781 elif not self._columns['parent_right'].select:
2782 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2784 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2785 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2786 self._parent_name, self._name)
2791 def _select_column_data(self, cr):
2792 # attlen is the number of bytes necessary to represent the type when
2793 # the type has a fixed size. If the type has a varying size attlen is
2794 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2795 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2796 "FROM pg_class c,pg_attribute a,pg_type t " \
2797 "WHERE c.relname=%s " \
2798 "AND c.oid=a.attrelid " \
2799 "AND a.atttypid=t.oid", (self._table,))
2800 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2803 def _o2m_raise_on_missing_reference(self, cr, f):
2804 # TODO this check should be a method on fields.one2many.
2805 if f._obj in self.pool:
2806 other = self.pool[f._obj]
2807 # TODO the condition could use fields_get_keys().
2808 if f._fields_id not in other._columns.keys():
2809 if f._fields_id not in other._inherit_fields.keys():
2810 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2812 def _m2m_raise_or_create_relation(self, cr, f):
2813 m2m_tbl, col1, col2 = f._sql_names(self)
2814 # do not create relations for custom fields as they do not belong to a module
2815 # they will be automatically removed when dropping the corresponding ir.model.field
2816 # table name for custom relation all starts with x_, see __init__
2817 if not m2m_tbl.startswith('x_'):
2818 self._save_relation_table(cr, m2m_tbl)
2819 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2820 if not cr.dictfetchall():
2821 if f._obj not in self.pool:
2822 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2823 dest_model = self.pool[f._obj]
2824 ref = dest_model._table
2825 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2826 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2827 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2828 if not cr.fetchall():
2829 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2830 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2831 if not cr.fetchall():
2832 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2834 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2835 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2836 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2838 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2841 def _add_sql_constraints(self, cr):
2844 Modify this model's database table constraints so they match the one in
2848 def unify_cons_text(txt):
2849 return txt.lower().replace(', ',',').replace(' (','(')
2851 for (key, con, _) in self._sql_constraints:
2852 conname = '%s_%s' % (self._table, key)
2854 self._save_constraint(cr, conname, 'u')
2855 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2856 existing_constraints = cr.dictfetchall()
2860 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2861 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2862 self._table, conname, con),
2863 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2868 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2869 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2870 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2876 if not existing_constraints:
2877 # constraint does not exists:
2878 sql_actions['add']['execute'] = True
2879 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2880 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2881 # constraint exists but its definition has changed:
2882 sql_actions['drop']['execute'] = True
2883 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2884 sql_actions['add']['execute'] = True
2885 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2887 # we need to add the constraint:
2888 sql_actions = [item for item in sql_actions.values()]
2889 sql_actions.sort(key=lambda x: x['order'])
2890 for sql_action in [action for action in sql_actions if action['execute']]:
2892 cr.execute(sql_action['query'])
2894 _schema.debug(sql_action['msg_ok'])
2896 _schema.warning(sql_action['msg_err'])
2900 def _execute_sql(self, cr):
2901 """ Execute the SQL code from the _sql attribute (if any)."""
2902 if hasattr(self, "_sql"):
2903 for line in self._sql.split(';'):
2904 line2 = line.replace('\n', '').strip()
2910 # Update objects that uses this one to update their _inherits fields
2914 def _inherits_reload_src(cls):
2915 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2916 for model in cls.pool.values():
2917 if cls._name in model._inherits:
2918 model._inherits_reload()
2921 def _inherits_reload(cls):
2922 """ Recompute the _inherit_fields mapping.
2924 This will also call itself on each inherits'd child model.
2928 for table in cls._inherits:
2929 other = cls.pool[table]
2930 for col in other._columns.keys():
2931 res[col] = (table, cls._inherits[table], other._columns[col], table)
2932 for col in other._inherit_fields.keys():
2933 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2934 cls._inherit_fields = res
2935 cls._all_columns = cls._get_column_infos()
2937 # interface columns with new-style fields
2938 for attr, column in cls._columns.items():
2939 if attr not in cls._fields:
2940 cls._add_field(attr, column.to_field())
2942 # interface inherited fields with new-style fields (note that the
2943 # reverse order is for being consistent with _all_columns above)
2944 for parent_model, parent_field in reversed(cls._inherits.items()):
2945 for attr, field in cls.pool[parent_model]._fields.iteritems():
2946 if attr not in cls._fields:
2947 cls._add_field(attr, field.copy(
2948 related=(parent_field, attr),
2953 cls._inherits_reload_src()
2956 def _get_column_infos(cls):
2957 """Returns a dict mapping all fields names (direct fields and
2958 inherited field via _inherits) to a ``column_info`` struct
2959 giving detailed columns """
2961 # do not inverse for loops, since local fields may hide inherited ones!
2962 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2963 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2964 for k, col in cls._columns.iteritems():
2965 result[k] = fields.column_info(k, col)
2969 def _inherits_check(cls):
2970 for table, field_name in cls._inherits.items():
2971 if field_name not in cls._columns:
2972 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2973 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2974 required=True, ondelete="cascade")
2975 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2976 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2977 cls._columns[field_name].required = True
2978 cls._columns[field_name].ondelete = "cascade"
2980 # reflect fields with delegate=True in dictionary cls._inherits
2981 for field in cls._fields.itervalues():
2982 if field.type == 'many2one' and not field.related and field.delegate:
2983 if not field.required:
2984 _logger.warning("Field %s with delegate=True must be required.", field)
2985 field.required = True
2986 if field.ondelete.lower() not in ('cascade', 'restrict'):
2987 field.ondelete = 'cascade'
2988 cls._inherits[field.comodel_name] = field.name
2991 def _prepare_setup_fields(self):
2992 """ Prepare the setup of fields once the models have been loaded. """
2993 for field in self._fields.itervalues():
2997 def _setup_fields(self, partial=False):
2998 """ Setup the fields (dependency triggers, etc). """
2999 for field in self._fields.itervalues():
3000 if partial and field.manual and \
3001 field.relational and \
3002 (field.comodel_name not in self.pool or \
3003 field.inverse_name not in self.pool[field.comodel_name]._fields):
3004 # do not set up manual fields that refer to unknown models
3006 field.setup(self.env)
3008 # group fields by compute to determine field.computed_fields
3009 fields_by_compute = defaultdict(list)
3010 for field in self._fields.itervalues():
3012 field.computed_fields = fields_by_compute[field.compute]
3013 field.computed_fields.append(field)
3015 field.computed_fields = []
3017 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3018 """ fields_get([fields])
3020 Return the definition of each field.
3022 The returned value is a dictionary (indiced by field name) of
3023 dictionaries. The _inherits'd fields are included. The string, help,
3024 and selection (if present) attributes are translated.
3026 :param cr: database cursor
3027 :param user: current user id
3028 :param allfields: list of fields
3029 :param context: context arguments, like lang, time zone
3030 :return: dictionary of field dictionaries, each one describing a field of the business object
3031 :raise AccessError: * if user has no create/write rights on the requested object
3034 recs = self.browse(cr, user, [], context)
3037 for fname, field in self._fields.iteritems():
3038 if allfields and fname not in allfields:
3040 if field.groups and not recs.user_has_groups(field.groups):
3042 res[fname] = field.get_description(recs.env)
3044 # if user cannot create or modify records, make all fields readonly
3045 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3046 if not (has_access('write') or has_access('create')):
3047 for description in res.itervalues():
3048 description['readonly'] = True
3049 description['states'] = {}
3053 def get_empty_list_help(self, cr, user, help, context=None):
3054 """ Generic method giving the help message displayed when having
3055 no result to display in a list or kanban view. By default it returns
3056 the help given in parameter that is generally the help message
3057 defined in the action.
3061 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3063 Check the user access rights on the given fields. This raises Access
3064 Denied if the user does not have the rights. Otherwise it returns the
3065 fields (as is if the fields is not falsy, or the readable/writable
3066 fields if fields is falsy).
3068 if user == SUPERUSER_ID:
3069 return fields or list(self._fields)
3072 """ determine whether user has access to field `fname` """
3073 field = self._fields.get(fname)
3074 if field and field.groups:
3075 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3080 fields = filter(valid, self._fields)
3082 invalid_fields = set(filter(lambda name: not valid(name), fields))
3084 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3085 operation, user, self._name, ', '.join(invalid_fields))
3087 _('The requested operation cannot be completed due to security restrictions. '
3088 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3089 (self._description, operation))
3093 # add explicit old-style implementation to read()
3095 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3096 records = self.browse(cr, user, ids, context)
3097 result = BaseModel.read(records, fields, load=load)
3098 return result if isinstance(ids, list) else (bool(result) and result[0])
3100 # new-style implementation of read()
3102 def read(self, fields=None, load='_classic_read'):
3105 Reads the requested fields for the records in `self`, low-level/RPC
3106 method. In Python code, prefer :meth:`~.browse`.
3108 :param fields: list of field names to return (default is all fields)
3109 :return: a list of dictionaries mapping field names to their values,
3110 with one dictionary per record
3111 :raise AccessError: if user has no read rights on some of the given
3114 # check access rights
3115 self.check_access_rights('read')
3116 fields = self.check_field_access_rights('read', fields)
3118 # split fields into stored and computed fields
3119 stored, computed = [], []
3121 if name in self._columns:
3123 elif name in self._fields:
3124 computed.append(name)
3126 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3128 # fetch stored fields from the database to the cache
3129 self._read_from_database(stored)
3131 # retrieve results from records; this takes values from the cache and
3132 # computes remaining fields
3134 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3135 use_name_get = (load == '_classic_read')
3138 values = {'id': record.id}
3139 for name, field in name_fields:
3140 values[name] = field.convert_to_read(record[name], use_name_get)
3141 result.append(values)
3142 except MissingError:
3148 def _prefetch_field(self, field):
3149 """ Read from the database in order to fetch `field` (:class:`Field`
3150 instance) for `self` in cache.
3152 # fetch the records of this model without field_name in their cache
3153 records = self._in_cache_without(field)
3155 if len(records) > PREFETCH_MAX:
3156 records = records[:PREFETCH_MAX] | self
3158 # by default, simply fetch field
3159 fnames = {field.name}
3161 if self.env.in_draft:
3162 # we may be doing an onchange, do not prefetch other fields
3164 elif self.env.field_todo(field):
3165 # field must be recomputed, do not prefetch records to recompute
3166 records -= self.env.field_todo(field)
3167 elif not self._context.get('prefetch_fields', True):
3168 # do not prefetch other fields
3170 elif self._columns[field.name]._prefetch:
3171 # here we can optimize: prefetch all classic and many2one fields
3173 for fname, fcolumn in self._columns.iteritems()
3174 if fcolumn._prefetch
3175 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3178 # fetch records with read()
3179 assert self in records and field.name in fnames
3182 result = records.read(list(fnames), load='_classic_write')
3186 # check the cache, and update it if necessary
3187 if not self._cache.contains(field):
3188 for values in result:
3189 record = self.browse(values.pop('id'))
3190 record._cache.update(record._convert_to_cache(values, validate=False))
3191 if not self._cache.contains(field):
3192 e = AccessError("No value found for %s.%s" % (self, field.name))
3193 self._cache[field] = FailedValue(e)
3196 def _read_from_database(self, field_names):
3197 """ Read the given fields of the records in `self` from the database,
3198 and store them in cache. Access errors are also stored in cache.
3201 cr, user, context = env.args
3203 # FIXME: The query construction needs to be rewritten using the internal Query
3204 # object, as in search(), to avoid ambiguous column references when
3205 # reading/sorting on a table that is auto_joined to another table with
3206 # common columns (e.g. the magical columns)
3208 # Construct a clause for the security rules.
3209 # 'tables' holds the list of tables necessary for the SELECT, including
3210 # the ir.rule clauses, and contains at least self._table.
3211 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3213 # determine the fields that are stored as columns in self._table
3214 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3216 # we need fully-qualified column names in case len(tables) > 1
3218 if isinstance(self._columns.get(f), fields.binary) and \
3219 context.get('bin_size_%s' % f, context.get('bin_size')):
3220 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3221 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3223 return '%s."%s"' % (self._table, f)
3224 qual_names = map(qualify, set(fields_pre + ['id']))
3226 query = """ SELECT %(qual_names)s FROM %(tables)s
3227 WHERE %(table)s.id IN %%s AND (%(extra)s)
3230 'qual_names': ",".join(qual_names),
3231 'tables': ",".join(tables),
3232 'table': self._table,
3233 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3234 'order': self._parent_order or self._order,
3238 for sub_ids in cr.split_for_in_conditions(self.ids):
3239 cr.execute(query, [tuple(sub_ids)] + rule_params)
3240 result.extend(cr.dictfetchall())
3242 ids = [vals['id'] for vals in result]
3245 # translate the fields if necessary
3246 if context.get('lang'):
3247 ir_translation = env['ir.translation']
3248 for f in fields_pre:
3249 if self._columns[f].translate:
3250 #TODO: optimize out of this loop
3251 res_trans = ir_translation._get_ids(
3252 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3254 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3256 # apply the symbol_get functions of the fields we just read
3257 for f in fields_pre:
3258 symbol_get = self._columns[f]._symbol_get
3261 vals[f] = symbol_get(vals[f])
3263 # store result in cache for POST fields
3265 record = self.browse(vals['id'])
3266 record._cache.update(record._convert_to_cache(vals, validate=False))
3268 # determine the fields that must be processed now
3269 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3271 # Compute POST fields, grouped by multi
3272 by_multi = defaultdict(list)
3273 for f in fields_post:
3274 by_multi[self._columns[f]._multi].append(f)
3276 for multi, fs in by_multi.iteritems():
3278 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3279 assert res2 is not None, \
3280 'The function field "%s" on the "%s" model returned None\n' \
3281 '(a dictionary was expected).' % (fs[0], self._name)
3283 # TOCHECK : why got string instend of dict in python2.6
3284 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3285 multi_fields = res2.get(vals['id'], {})
3288 vals[f] = multi_fields.get(f, [])
3291 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3294 vals[f] = res2[vals['id']]
3298 # Warn about deprecated fields now that fields_pre and fields_post are computed
3299 for f in field_names:
3300 column = self._columns[f]
3301 if column.deprecated:
3302 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3304 # store result in cache
3306 record = self.browse(vals.pop('id'))
3307 record._cache.update(record._convert_to_cache(vals, validate=False))
3309 # store failed values in cache for the records that could not be read
3310 fetched = self.browse(ids)
3311 missing = self - fetched
3313 extras = fetched - self
3316 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3317 ', '.join(map(repr, missing._ids)),
3318 ', '.join(map(repr, extras._ids)),
3320 # store an access error exception in existing records
3322 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3323 (self._name, 'read')
3325 forbidden = missing.exists()
3326 forbidden._cache.update(FailedValue(exc))
3327 # store a missing error exception in non-existing records
3329 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3331 (missing - forbidden)._cache.update(FailedValue(exc))
3334 def get_metadata(self):
3336 Returns some metadata about the given records.
3338 :return: list of ownership dictionaries for each requested record
3339 :rtype: list of dictionaries with the following keys:
3342 * create_uid: user who created the record
3343 * create_date: date when the record was created
3344 * write_uid: last user who changed the record
3345 * write_date: date of the last change to the record
3346 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3349 if self._log_access:
3350 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3351 quoted_table = '"%s"' % self._table
3352 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3353 query = '''SELECT %s, __imd.module, __imd.name
3354 FROM %s LEFT JOIN ir_model_data __imd
3355 ON (__imd.model = %%s and __imd.res_id = %s.id)
3356 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3357 self._cr.execute(query, (self._name, tuple(self.ids)))
3358 res = self._cr.dictfetchall()
3360 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3361 names = dict(self.env['res.users'].browse(uids).name_get())
3365 value = r[key] = r[key] or False
3366 if key in ('write_uid', 'create_uid') and value in names:
3367 r[key] = (value, names[value])
3368 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3369 del r['name'], r['module']
3372 def _check_concurrency(self, cr, ids, context):
3375 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3377 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3378 for sub_ids in cr.split_for_in_conditions(ids):
3381 id_ref = "%s,%s" % (self._name, id)
3382 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3384 ids_to_check.extend([id, update_date])
3385 if not ids_to_check:
3387 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3390 # mention the first one only to keep the error message readable
3391 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3393 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3394 """Verify the returned rows after applying record rules matches
3395 the length of `ids`, and raise an appropriate exception if it does not.
3399 ids, result_ids = set(ids), set(result_ids)
3400 missing_ids = ids - result_ids
3402 # Attempt to distinguish record rule restriction vs deleted records,
3403 # to provide a more specific error message - check if the missinf
3404 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3405 forbidden_ids = [x[0] for x in cr.fetchall()]
3407 # the missing ids are (at least partially) hidden by access rules
3408 if uid == SUPERUSER_ID:
3410 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3411 raise except_orm(_('Access Denied'),
3412 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3413 (self._description, operation))
3415 # If we get here, the missing_ids are not in the database
3416 if operation in ('read','unlink'):
3417 # No need to warn about deleting an already deleted record.
3418 # And no error when reading a record that was deleted, to prevent spurious
3419 # errors for non-transactional search/read sequences coming from clients
3421 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3422 raise except_orm(_('Missing document(s)'),
3423 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3426 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3427 """Verifies that the operation given by ``operation`` is allowed for the user
3428 according to the access rights."""
3429 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3431 def check_access_rule(self, cr, uid, ids, operation, context=None):
3432 """Verifies that the operation given by ``operation`` is allowed for the user
3433 according to ir.rules.
3435 :param operation: one of ``write``, ``unlink``
3436 :raise except_orm: * if current ir.rules do not permit this operation.
3437 :return: None if the operation is allowed
3439 if uid == SUPERUSER_ID:
3442 if self.is_transient():
3443 # Only one single implicit access rule for transient models: owner only!
3444 # This is ok to hardcode because we assert that TransientModels always
3445 # have log_access enabled so that the create_uid column is always there.
3446 # And even with _inherits, these fields are always present in the local
3447 # table too, so no need for JOINs.
3448 cr.execute("""SELECT distinct create_uid
3450 WHERE id IN %%s""" % self._table, (tuple(ids),))
3451 uids = [x[0] for x in cr.fetchall()]
3452 if len(uids) != 1 or uids[0] != uid:
3453 raise except_orm(_('Access Denied'),
3454 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3456 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3458 where_clause = ' and ' + ' and '.join(where_clause)
3459 for sub_ids in cr.split_for_in_conditions(ids):
3460 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3461 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3462 [sub_ids] + where_params)
3463 returned_ids = [x['id'] for x in cr.dictfetchall()]
3464 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3466 def create_workflow(self, cr, uid, ids, context=None):
3467 """Create a workflow instance for each given record IDs."""
3468 from openerp import workflow
3470 workflow.trg_create(uid, self._name, res_id, cr)
3471 # self.invalidate_cache(cr, uid, context=context) ?
3474 def delete_workflow(self, cr, uid, ids, context=None):
3475 """Delete the workflow instances bound to the given record IDs."""
3476 from openerp import workflow
3478 workflow.trg_delete(uid, self._name, res_id, cr)
3479 self.invalidate_cache(cr, uid, context=context)
3482 def step_workflow(self, cr, uid, ids, context=None):
3483 """Reevaluate the workflow instances of the given record IDs."""
3484 from openerp import workflow
3486 workflow.trg_write(uid, self._name, res_id, cr)
3487 # self.invalidate_cache(cr, uid, context=context) ?
3490 def signal_workflow(self, cr, uid, ids, signal, context=None):
3491 """Send given workflow signal and return a dict mapping ids to workflow results"""
3492 from openerp import workflow
3495 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3496 # self.invalidate_cache(cr, uid, context=context) ?
3499 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3500 """ Rebind the workflow instance bound to the given 'old' record IDs to
3501 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3503 from openerp import workflow
3504 for old_id, new_id in old_new_ids:
3505 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3506 self.invalidate_cache(cr, uid, context=context)
3509 def unlink(self, cr, uid, ids, context=None):
3512 Deletes the records of the current set
3514 :raise AccessError: * if user has no unlink rights on the requested object
3515 * if user tries to bypass access rules for unlink on the requested object
3516 :raise UserError: if the record is default property for other records
3521 if isinstance(ids, (int, long)):
3524 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3526 # for recomputing new-style fields
3527 recs = self.browse(cr, uid, ids, context)
3528 recs.modified(self._fields)
3530 self._check_concurrency(cr, ids, context)
3532 self.check_access_rights(cr, uid, 'unlink')
3534 ir_property = self.pool.get('ir.property')
3536 # Check if the records are used as default properties.
3537 domain = [('res_id', '=', False),
3538 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3540 if ir_property.search(cr, uid, domain, context=context):
3541 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3543 # Delete the records' properties.
3544 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3545 ir_property.unlink(cr, uid, property_ids, context=context)
3547 self.delete_workflow(cr, uid, ids, context=context)
3549 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3550 pool_model_data = self.pool.get('ir.model.data')
3551 ir_values_obj = self.pool.get('ir.values')
3552 ir_attachment_obj = self.pool.get('ir.attachment')
3553 for sub_ids in cr.split_for_in_conditions(ids):
3554 cr.execute('delete from ' + self._table + ' ' \
3555 'where id IN %s', (sub_ids,))
3557 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3558 # as these are not connected with real database foreign keys, and would be dangling references.
3559 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3560 # to avoid possible side-effects during admin calls.
3561 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3562 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3563 # Step 2. Marching towards the real deletion of referenced records
3565 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3567 # For the same reason, removing the record relevant to ir_values
3568 ir_value_ids = ir_values_obj.search(cr, uid,
3569 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3572 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3574 # For the same reason, removing the record relevant to ir_attachment
3575 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3576 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3577 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3578 if ir_attachment_ids:
3579 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3581 # invalidate the *whole* cache, since the orm does not handle all
3582 # changes made in the database, like cascading delete!
3583 recs.invalidate_cache()
3585 for order, obj_name, store_ids, fields in result_store:
3586 if obj_name == self._name:
3587 effective_store_ids = set(store_ids) - set(ids)
3589 effective_store_ids = store_ids
3590 if effective_store_ids:
3591 obj = self.pool[obj_name]
3592 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3593 rids = map(lambda x: x[0], cr.fetchall())
3595 obj._store_set_values(cr, uid, rids, fields, context)
3597 # recompute new-style fields
3606 def write(self, vals):
3609 Updates all records in the current set with the provided values.
3611 :param dict vals: fields to update and the value to set on them e.g::
3613 {'foo': 1, 'bar': "Qux"}
3615 will set the field ``foo`` to ``1`` and the field ``bar`` to
3616 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3618 :raise AccessError: * if user has no write rights on the requested object
3619 * if user tries to bypass access rules for write on the requested object
3620 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3621 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3623 .. _openerp/models/relationals/format:
3625 .. note:: Relational fields use a special "commands" format to manipulate their values
3627 This format is a list of command triplets executed sequentially,
3628 possible command triplets are:
3630 ``(0, _, values: dict)``
3631 links to a new record created from the provided values
3632 ``(1, id, values: dict)``
3633 updates the already-linked record of id ``id`` with the
3636 unlinks and deletes the linked record of id ``id``
3638 unlinks the linked record of id ``id`` without deleting it
3640 links to an existing record of id ``id``
3642 unlinks all records in the relation, equivalent to using
3643 the command ``3`` on every linked record
3645 replaces the existing list of linked records by the provoded
3646 ones, equivalent to using ``5`` then ``4`` for each id in
3649 (in command triplets, ``_`` values are ignored and can be
3650 anything, generally ``0`` or ``False``)
3652 Any command can be used on :class:`~openerp.fields.Many2many`,
3653 only ``0``, ``1`` and ``2`` can be used on
3654 :class:`~openerp.fields.One2many`.
3659 self._check_concurrency(self._ids)
3660 self.check_access_rights('write')
3662 # No user-driven update of these columns
3663 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3664 vals.pop(field, None)
3666 # split up fields into old-style and pure new-style ones
3667 old_vals, new_vals, unknown = {}, {}, []
3668 for key, val in vals.iteritems():
3669 if key in self._columns:
3671 elif key in self._fields:
3677 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3679 # write old-style fields with (low-level) method _write
3681 self._write(old_vals)
3683 # put the values of pure new-style fields into cache, and inverse them
3686 record._cache.update(record._convert_to_cache(new_vals, update=True))
3687 for key in new_vals:
3688 self._fields[key].determine_inverse(self)
3692 def _write(self, cr, user, ids, vals, context=None):
3693 # low-level implementation of write()
3698 self.check_field_access_rights(cr, user, 'write', vals.keys())
3699 deleted_related = defaultdict(list)
3700 for field in vals.keys():
3702 if field in self._columns:
3703 fobj = self._columns[field]
3704 elif field in self._inherit_fields:
3705 fobj = self._inherit_fields[field][2]
3708 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3709 for wtuple in vals[field]:
3710 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3711 deleted_related[fobj._obj].append(wtuple[1])
3716 for group in groups:
3717 module = group.split(".")[0]
3718 grp = group.split(".")[1]
3719 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3720 (grp, module, 'res.groups', user))
3721 readonly = cr.fetchall()
3722 if readonly[0][0] >= 1:
3729 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3731 # for recomputing new-style fields
3732 recs = self.browse(cr, user, ids, context)
3733 modified_fields = list(vals)
3734 if self._log_access:
3735 modified_fields += ['write_date', 'write_uid']
3736 recs.modified(modified_fields)
3738 parents_changed = []
3739 parent_order = self._parent_order or self._order
3740 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3741 # The parent_left/right computation may take up to
3742 # 5 seconds. No need to recompute the values if the
3743 # parent is the same.
3744 # Note: to respect parent_order, nodes must be processed in
3745 # order, so ``parents_changed`` must be ordered properly.
3746 parent_val = vals[self._parent_name]
3748 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3749 (self._table, self._parent_name, self._parent_name, parent_order)
3750 cr.execute(query, (tuple(ids), parent_val))
3752 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3753 (self._table, self._parent_name, parent_order)
3754 cr.execute(query, (tuple(ids),))
3755 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3762 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3764 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3765 if field_column and field_column.deprecated:
3766 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3767 if field in self._columns:
3768 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3769 if (not totranslate) or not self._columns[field].translate:
3770 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3771 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3772 direct.append(field)
3774 upd_todo.append(field)
3776 updend.append(field)
3777 if field in self._columns \
3778 and hasattr(self._columns[field], 'selection') \
3780 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3782 if self._log_access:
3783 upd0.append('write_uid=%s')
3784 upd0.append("write_date=(now() at time zone 'UTC')")
3788 self.check_access_rule(cr, user, ids, 'write', context=context)
3789 for sub_ids in cr.split_for_in_conditions(ids):
3790 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3791 'where id IN %s', upd1 + [sub_ids])
3792 if cr.rowcount != len(sub_ids):
3793 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3798 if self._columns[f].translate:
3799 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3802 # Inserting value to DB
3803 context_wo_lang = dict(context, lang=None)
3804 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3805 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3807 # call the 'set' method of fields which are not classic_write
3808 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3810 # default element in context must be removed when call a one2many or many2many
3811 rel_context = context.copy()
3812 for c in context.items():
3813 if c[0].startswith('default_'):
3814 del rel_context[c[0]]
3816 for field in upd_todo:
3818 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3820 unknown_fields = updend[:]
3821 for table in self._inherits:
3822 col = self._inherits[table]
3824 for sub_ids in cr.split_for_in_conditions(ids):
3825 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3826 'where id IN %s', (sub_ids,))
3827 nids.extend([x[0] for x in cr.fetchall()])
3831 if self._inherit_fields[val][0] == table:
3833 unknown_fields.remove(val)
3835 self.pool[table].write(cr, user, nids, v, context)
3839 'No such field(s) in model %s: %s.',
3840 self._name, ', '.join(unknown_fields))
3842 # check Python constraints
3843 recs._validate_fields(vals)
3845 # TODO: use _order to set dest at the right position and not first node of parent
3846 # We can't defer parent_store computation because the stored function
3847 # fields that are computer may refer (directly or indirectly) to
3848 # parent_left/right (via a child_of domain)
3851 self.pool._init_parent[self._name] = True
3853 order = self._parent_order or self._order
3854 parent_val = vals[self._parent_name]
3856 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3858 clause, params = '%s IS NULL' % (self._parent_name,), ()
3860 for id in parents_changed:
3861 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3862 pleft, pright = cr.fetchone()
3863 distance = pright - pleft + 1
3865 # Positions of current siblings, to locate proper insertion point;
3866 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3867 # after each update, in case several nodes are sequentially inserted one
3868 # next to the other (i.e computed incrementally)
3869 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3870 parents = cr.fetchall()
3872 # Find Position of the element
3874 for (parent_pright, parent_id) in parents:
3877 position = parent_pright and parent_pright + 1 or 1
3879 # It's the first node of the parent
3884 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3885 position = cr.fetchone()[0] + 1
3887 if pleft < position <= pright:
3888 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3890 if pleft < position:
3891 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3892 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3893 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3895 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3896 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3897 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3898 recs.invalidate_cache(['parent_left', 'parent_right'])
3900 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3903 # for recomputing new-style fields
3904 recs.modified(modified_fields)
3907 for order, model_name, ids_to_update, fields_to_recompute in result:
3908 key = (model_name, tuple(fields_to_recompute))
3909 done.setdefault(key, {})
3910 # avoid to do several times the same computation
3912 for id in ids_to_update:
3913 if id not in done[key]:
3914 done[key][id] = True
3915 if id not in deleted_related[model_name]:
3917 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3919 # recompute new-style fields
3920 if context.get('recompute', True):
3923 self.step_workflow(cr, user, ids, context=context)
3927 # TODO: Should set perm to user.xxx
3930 @api.returns('self', lambda value: value.id)
3931 def create(self, vals):
3932 """ create(vals) -> record
3934 Creates a new record for the model.
3936 The new record is initialized using the values from ``vals`` and
3937 if necessary those from :meth:`~.default_get`.
3940 values for the model's fields, as a dictionary::
3942 {'field_name': field_value, ...}
3944 see :meth:`~.write` for details
3945 :return: new record created
3946 :raise AccessError: * if user has no create rights on the requested object
3947 * if user tries to bypass access rules for create on the requested object
3948 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3949 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3951 self.check_access_rights('create')
3953 # add missing defaults, and drop fields that may not be set by user
3954 vals = self._add_missing_default_values(vals)
3955 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3956 vals.pop(field, None)
3958 # split up fields into old-style and pure new-style ones
3959 old_vals, new_vals, unknown = {}, {}, []
3960 for key, val in vals.iteritems():
3961 if key in self._all_columns:
3963 elif key in self._fields:
3969 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3971 # create record with old-style fields
3972 record = self.browse(self._create(old_vals))
3974 # put the values of pure new-style fields into cache, and inverse them
3975 record._cache.update(record._convert_to_cache(new_vals))
3976 for key in new_vals:
3977 self._fields[key].determine_inverse(record)
3981 def _create(self, cr, user, vals, context=None):
3982 # low-level implementation of create()
3986 if self.is_transient():
3987 self._transient_vacuum(cr, user)
3990 for v in self._inherits:
3991 if self._inherits[v] not in vals:
3994 tocreate[v] = {'id': vals[self._inherits[v]]}
3997 # list of column assignments defined as tuples like:
3998 # (column_name, format_string, column_value)
3999 # (column_name, sql_formula)
4000 # Those tuples will be used by the string formatting for the INSERT
4002 ('id', "nextval('%s')" % self._sequence),
4007 for v in vals.keys():
4008 if v in self._inherit_fields and v not in self._columns:
4009 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4010 tocreate[table][v] = vals[v]
4013 if (v not in self._inherit_fields) and (v not in self._columns):
4015 unknown_fields.append(v)
4018 'No such field(s) in model %s: %s.',
4019 self._name, ', '.join(unknown_fields))
4021 for table in tocreate:
4022 if self._inherits[table] in vals:
4023 del vals[self._inherits[table]]
4025 record_id = tocreate[table].pop('id', None)
4027 if record_id is None or not record_id:
4028 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4030 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4032 updates.append((self._inherits[table], '%s', record_id))
4034 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4035 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4037 for bool_field in bool_fields:
4038 if bool_field not in vals:
4039 vals[bool_field] = False
4041 for field in vals.keys():
4043 if field in self._columns:
4044 fobj = self._columns[field]
4046 fobj = self._inherit_fields[field][2]
4052 for group in groups:
4053 module = group.split(".")[0]
4054 grp = group.split(".")[1]
4055 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4056 (grp, module, 'res.groups', user))
4057 readonly = cr.fetchall()
4058 if readonly[0][0] >= 1:
4061 elif readonly[0][0] == 0:
4069 current_field = self._columns[field]
4070 if current_field._classic_write:
4071 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4073 #for the function fields that receive a value, we set them directly in the database
4074 #(they may be required), but we also need to trigger the _fct_inv()
4075 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4076 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4077 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4078 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4079 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4080 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4081 #after the release but, definitively, the behavior shouldn't be different for related and function
4083 upd_todo.append(field)
4085 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4086 #related. See the above TODO comment for further explanations.
4087 if not isinstance(current_field, fields.related):
4088 upd_todo.append(field)
4089 if field in self._columns \
4090 and hasattr(current_field, 'selection') \
4092 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4093 if self._log_access:
4094 updates.append(('create_uid', '%s', user))
4095 updates.append(('write_uid', '%s', user))
4096 updates.append(('create_date', "(now() at time zone 'UTC')"))
4097 updates.append(('write_date', "(now() at time zone 'UTC')"))
4099 # the list of tuples used in this formatting corresponds to
4100 # tuple(field_name, format, value)
4101 # In some case, for example (id, create_date, write_date) we does not
4102 # need to read the third value of the tuple, because the real value is
4103 # encoded in the second value (the format).
4105 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4107 ', '.join('"%s"' % u[0] for u in updates),
4108 ', '.join(u[1] for u in updates)
4110 tuple([u[2] for u in updates if len(u) > 2])
4113 id_new, = cr.fetchone()
4114 recs = self.browse(cr, user, id_new, context)
4115 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4117 if self._parent_store and not context.get('defer_parent_store_computation'):
4119 self.pool._init_parent[self._name] = True
4121 parent = vals.get(self._parent_name, False)
4123 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4125 result_p = cr.fetchall()
4126 for (pleft,) in result_p:
4131 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4132 pleft_old = cr.fetchone()[0]
4135 cr.execute('select max(parent_right) from '+self._table)
4136 pleft = cr.fetchone()[0] or 0
4137 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4138 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4139 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4140 recs.invalidate_cache(['parent_left', 'parent_right'])
4142 # default element in context must be remove when call a one2many or many2many
4143 rel_context = context.copy()
4144 for c in context.items():
4145 if c[0].startswith('default_'):
4146 del rel_context[c[0]]
4149 for field in upd_todo:
4150 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4152 # check Python constraints
4153 recs._validate_fields(vals)
4155 # invalidate and mark new-style fields to recompute
4156 modified_fields = list(vals)
4157 if self._log_access:
4158 modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
4159 recs.modified(modified_fields)
4161 if context.get('recompute', True):
4162 result += self._store_get_values(cr, user, [id_new],
4163 list(set(vals.keys() + self._inherits.values())),
4167 for order, model_name, ids, fields2 in result:
4168 if not (model_name, ids, fields2) in done:
4169 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4170 done.append((model_name, ids, fields2))
4171 # recompute new-style fields
4174 if self._log_create and context.get('recompute', True):
4175 message = self._description + \
4177 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4178 "' " + _("created.")
4179 self.log(cr, user, id_new, message, True, context=context)
4181 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4182 self.create_workflow(cr, user, [id_new], context=context)
4185 def _store_get_values(self, cr, uid, ids, fields, context):
4186 """Returns an ordered list of fields.function to call due to
4187 an update operation on ``fields`` of records with ``ids``,
4188 obtained by calling the 'store' triggers of these fields,
4189 as setup by their 'store' attribute.
4191 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4193 if fields is None: fields = []
4194 stored_functions = self.pool._store_function.get(self._name, [])
4196 # use indexed names for the details of the stored_functions:
4197 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4199 # only keep store triggers that should be triggered for the ``fields``
4201 triggers_to_compute = (
4202 f for f in stored_functions
4203 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4207 target_id_results = {}
4208 for store_trigger in triggers_to_compute:
4209 target_func_id_ = id(store_trigger[target_ids_func_])
4210 if target_func_id_ not in target_id_results:
4211 # use admin user for accessing objects having rules defined on store fields
4212 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4213 target_ids = target_id_results[target_func_id_]
4215 # the compound key must consider the priority and model name
4216 key = (store_trigger[priority_], store_trigger[model_name_])
4217 for target_id in target_ids:
4218 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4220 # Here to_compute_map looks like:
4221 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4222 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4223 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4226 # Now we need to generate the batch function calls list
4228 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4230 for ((priority,model), id_map) in to_compute_map.iteritems():
4231 trigger_ids_maps = {}
4232 # function_ids_maps =
4233 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4234 for target_id, triggers in id_map.iteritems():
4235 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4236 for triggers, target_ids in trigger_ids_maps.iteritems():
4237 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4238 [t[func_field_to_compute_] for t in triggers]))
4241 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4244 def _store_set_values(self, cr, uid, ids, fields, context):
4245 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4246 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4251 if self._log_access:
4252 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4256 field_dict.setdefault(r[0], [])
4257 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4258 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4259 for i in self.pool._store_function.get(self._name, []):
4261 up_write_date = write_date + datetime.timedelta(hours=i[5])
4262 if datetime.datetime.now() < up_write_date:
4264 field_dict[r[0]].append(i[1])
4270 if self._columns[f]._multi not in keys:
4271 keys.append(self._columns[f]._multi)
4272 todo.setdefault(self._columns[f]._multi, [])
4273 todo[self._columns[f]._multi].append(f)
4277 # use admin user for accessing objects having rules defined on store fields
4278 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4279 for id, value in result.items():
4281 for f in value.keys():
4282 if f in field_dict[id]:
4289 if self._columns[v]._type == 'many2one':
4291 value[v] = value[v][0]
4294 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4295 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4298 cr.execute('update "' + self._table + '" set ' + \
4299 ','.join(upd0) + ' where id = %s', upd1)
4303 # use admin user for accessing objects having rules defined on store fields
4304 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4305 for r in result.keys():
4307 if r in field_dict.keys():
4308 if f in field_dict[r]:
4310 for id, value in result.items():
4311 if self._columns[f]._type == 'many2one':
4316 cr.execute('update "' + self._table + '" set ' + \
4317 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4319 # invalidate and mark new-style fields to recompute
4320 self.browse(cr, uid, ids, context).modified(fields)
4324 # TODO: ameliorer avec NULL
4325 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4326 """Computes the WHERE clause needed to implement an OpenERP domain.
4327 :param domain: the domain to compute
4329 :param active_test: whether the default filtering of records with ``active``
4330 field set to ``False`` should be applied.
4331 :return: the query expressing the given domain as provided in domain
4332 :rtype: osv.query.Query
4337 # if the object has a field named 'active', filter out all inactive
4338 # records unless they were explicitely asked for
4339 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4341 # the item[0] trick below works for domain items and '&'/'|'/'!'
4343 if not any(item[0] == 'active' for item in domain):
4344 domain.insert(0, ('active', '=', 1))
4346 domain = [('active', '=', 1)]
4349 e = expression.expression(cr, user, domain, self, context)
4350 tables = e.get_tables()
4351 where_clause, where_params = e.to_sql()
4352 where_clause = where_clause and [where_clause] or []
4354 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4356 return Query(tables, where_clause, where_params)
4358 def _check_qorder(self, word):
4359 if not regex_order.match(word):
4360 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4363 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4364 """Add what's missing in ``query`` to implement all appropriate ir.rules
4365 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4367 :param query: the current query object
4369 if uid == SUPERUSER_ID:
4372 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4373 """ :param parent_model: name of the parent model, if the added
4374 clause comes from a parent model
4378 # as inherited rules are being applied, we need to add the missing JOIN
4379 # to reach the parent table (if it was not JOINed yet in the query)
4380 parent_alias = self._inherits_join_add(self, parent_model, query)
4381 # inherited rules are applied on the external table -> need to get the alias and replace
4382 parent_table = self.pool[parent_model]._table
4383 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4384 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4386 for table in added_tables:
4387 # table is just a table name -> switch to the full alias
4388 if table == '"%s"' % parent_table:
4389 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4390 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4392 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4393 added_tables = new_tables
4394 query.where_clause += added_clause
4395 query.where_clause_params += added_params
4396 for table in added_tables:
4397 if table not in query.tables:
4398 query.tables.append(table)
4402 # apply main rules on the object
4403 rule_obj = self.pool.get('ir.rule')
4404 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4405 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4407 # apply ir.rules from the parents (through _inherits)
4408 for inherited_model in self._inherits:
4409 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4410 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4411 parent_model=inherited_model)
4413 def _generate_m2o_order_by(self, order_field, query):
4415 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4416 either native m2o fields or function/related fields that are stored, including
4417 intermediate JOINs for inheritance if required.
4419 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4421 if order_field not in self._columns and order_field in self._inherit_fields:
4422 # also add missing joins for reaching the table containing the m2o field
4423 qualified_field = self._inherits_join_calc(order_field, query)
4424 order_field_column = self._inherit_fields[order_field][2]
4426 qualified_field = '"%s"."%s"' % (self._table, order_field)
4427 order_field_column = self._columns[order_field]
4429 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4430 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4431 _logger.debug("Many2one function/related fields must be stored " \
4432 "to be used as ordering fields! Ignoring sorting for %s.%s",
4433 self._name, order_field)
4436 # figure out the applicable order_by for the m2o
4437 dest_model = self.pool[order_field_column._obj]
4438 m2o_order = dest_model._order
4439 if not regex_order.match(m2o_order):
4440 # _order is complex, can't use it here, so we default to _rec_name
4441 m2o_order = dest_model._rec_name
4443 # extract the field names, to be able to qualify them and add desc/asc
4445 for order_part in m2o_order.split(","):
4446 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4447 m2o_order = m2o_order_list
4449 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4450 # as we don't want to exclude results that have NULL values for the m2o
4451 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4452 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4453 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4454 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4456 def _generate_order_by(self, order_spec, query):
4458 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4459 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4461 :raise" except_orm in case order_spec is malformed
4463 order_by_clause = ''
4464 order_spec = order_spec or self._order
4466 order_by_elements = []
4467 self._check_qorder(order_spec)
4468 for order_part in order_spec.split(','):
4469 order_split = order_part.strip().split(' ')
4470 order_field = order_split[0].strip()
4471 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4474 if order_field == 'id':
4475 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4476 elif order_field in self._columns:
4477 order_column = self._columns[order_field]
4478 if order_column._classic_read:
4479 inner_clause = '"%s"."%s"' % (self._table, order_field)
4480 elif order_column._type == 'many2one':
4481 inner_clause = self._generate_m2o_order_by(order_field, query)
4483 continue # ignore non-readable or "non-joinable" fields
4484 elif order_field in self._inherit_fields:
4485 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4486 order_column = parent_obj._columns[order_field]
4487 if order_column._classic_read:
4488 inner_clause = self._inherits_join_calc(order_field, query)
4489 elif order_column._type == 'many2one':
4490 inner_clause = self._generate_m2o_order_by(order_field, query)
4492 continue # ignore non-readable or "non-joinable" fields
4494 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4495 if order_column and order_column._type == 'boolean':
4496 inner_clause = "COALESCE(%s, false)" % inner_clause
4498 if isinstance(inner_clause, list):
4499 for clause in inner_clause:
4500 order_by_elements.append("%s %s" % (clause, order_direction))
4502 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4503 if order_by_elements:
4504 order_by_clause = ",".join(order_by_elements)
4506 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4508 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4510 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4511 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4512 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4513 This is ok at the security level because this method is private and not callable through XML-RPC.
4515 :param access_rights_uid: optional user ID to use when checking access rights
4516 (not for ir.rules, this is only for ir.model.access)
4520 self.check_access_rights(cr, access_rights_uid or user, 'read')
4522 # For transient models, restrict acces to the current user, except for the super-user
4523 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4524 args = expression.AND(([('create_uid', '=', user)], args or []))
4526 query = self._where_calc(cr, user, args, context=context)
4527 self._apply_ir_rules(cr, user, query, 'read', context=context)
4528 order_by = self._generate_order_by(order, query)
4529 from_clause, where_clause, where_clause_params = query.get_sql()
4531 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4534 # Ignore order, limit and offset when just counting, they don't make sense and could
4536 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4537 cr.execute(query_str, where_clause_params)
4541 limit_str = limit and ' limit %d' % limit or ''
4542 offset_str = offset and ' offset %d' % offset or ''
4543 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4544 cr.execute(query_str, where_clause_params)
4547 # TDE note: with auto_join, we could have several lines about the same result
4548 # i.e. a lead with several unread messages; we uniquify the result using
4549 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4550 def _uniquify_list(seq):
4552 return [x for x in seq if x not in seen and not seen.add(x)]
4554 return _uniquify_list([x[0] for x in res])
4556 # returns the different values ever entered for one field
4557 # this is used, for example, in the client when the user hits enter on
4559 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4562 if field in self._inherit_fields:
4563 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4565 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4567 def copy_data(self, cr, uid, id, default=None, context=None):
4569 Copy given record's data with all its fields values
4571 :param cr: database cursor
4572 :param uid: current user id
4573 :param id: id of the record to copy
4574 :param default: field values to override in the original values of the copied record
4575 :type default: dictionary
4576 :param context: context arguments, like lang, time zone
4577 :type context: dictionary
4578 :return: dictionary containing all the field values
4584 # avoid recursion through already copied records in case of circular relationship
4585 seen_map = context.setdefault('__copy_data_seen', {})
4586 if id in seen_map.setdefault(self._name, []):
4588 seen_map[self._name].append(id)
4592 if 'state' not in default:
4593 if 'state' in self._defaults:
4594 if callable(self._defaults['state']):
4595 default['state'] = self._defaults['state'](self, cr, uid, context)
4597 default['state'] = self._defaults['state']
4599 # build a black list of fields that should not be copied
4600 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4601 def blacklist_given_fields(obj):
4602 # blacklist the fields that are given by inheritance
4603 for other, field_to_other in obj._inherits.items():
4604 blacklist.add(field_to_other)
4605 if field_to_other in default:
4606 # all the fields of 'other' are given by the record: default[field_to_other],
4607 # except the ones redefined in self
4608 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4610 blacklist_given_fields(self.pool[other])
4611 # blacklist deprecated fields
4612 for name, field in obj._columns.items():
4613 if field.deprecated:
4616 blacklist_given_fields(self)
4619 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4622 if f not in blacklist)
4624 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4628 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4631 for f, colinfo in fields_to_copy.iteritems():
4632 field = colinfo.column
4633 if field._type == 'many2one':
4634 res[f] = data[f] and data[f][0]
4635 elif field._type == 'one2many':
4636 other = self.pool[field._obj]
4637 # duplicate following the order of the ids because we'll rely on
4638 # it later for copying translations in copy_translation()!
4639 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4640 # the lines are duplicated using the wrong (old) parent, but then
4641 # are reassigned to the correct one thanks to the (0, 0, ...)
4642 res[f] = [(0, 0, line) for line in lines if line]
4643 elif field._type == 'many2many':
4644 res[f] = [(6, 0, data[f])]
4650 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4654 # avoid recursion through already copied records in case of circular relationship
4655 seen_map = context.setdefault('__copy_translations_seen',{})
4656 if old_id in seen_map.setdefault(self._name,[]):
4658 seen_map[self._name].append(old_id)
4660 trans_obj = self.pool.get('ir.translation')
4661 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4662 fields = self.fields_get(cr, uid, context=context)
4664 for field_name, field_def in fields.items():
4665 # removing the lang to compare untranslated values
4666 context_wo_lang = dict(context, lang=None)
4667 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4668 # we must recursively copy the translations for o2o and o2m
4669 if field_def['type'] == 'one2many':
4670 target_obj = self.pool[field_def['relation']]
4671 # here we rely on the order of the ids to match the translations
4672 # as foreseen in copy_data()
4673 old_children = sorted(r.id for r in old_record[field_name])
4674 new_children = sorted(r.id for r in new_record[field_name])
4675 for (old_child, new_child) in zip(old_children, new_children):
4676 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4677 # and for translatable fields we keep them for copy
4678 elif field_def.get('translate'):
4679 if field_name in self._columns:
4680 trans_name = self._name + "," + field_name
4683 elif field_name in self._inherit_fields:
4684 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4685 # get the id of the parent record to set the translation
4686 inherit_field_name = self._inherit_fields[field_name][1]
4687 target_id = new_record[inherit_field_name].id
4688 source_id = old_record[inherit_field_name].id
4692 trans_ids = trans_obj.search(cr, uid, [
4693 ('name', '=', trans_name),
4694 ('res_id', '=', source_id)
4696 user_lang = context.get('lang')
4697 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4699 # remove source to avoid triggering _set_src
4700 del record['source']
4701 record.update({'res_id': target_id})
4702 if user_lang and user_lang == record['lang']:
4703 # 'source' to force the call to _set_src
4704 # 'value' needed if value is changed in copy(), want to see the new_value
4705 record['source'] = old_record[field_name]
4706 record['value'] = new_record[field_name]
4707 trans_obj.create(cr, uid, record, context=context)
4709 @api.returns('self', lambda value: value.id)
4710 def copy(self, cr, uid, id, default=None, context=None):
4711 """ copy(default=None)
4713 Duplicate record with given id updating it with default values
4715 :param dict default: dictionary of field values to override in the
4716 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4717 :returns: new record
4722 context = context.copy()
4723 data = self.copy_data(cr, uid, id, default, context)
4724 new_id = self.create(cr, uid, data, context)
4725 self.copy_translations(cr, uid, id, new_id, context)
4729 @api.returns('self')
4731 """ exists() -> records
4733 Returns the subset of records in `self` that exist, and marks deleted
4734 records as such in cache. It can be used as a test on records::
4739 By convention, new records are returned as existing.
4741 ids = filter(None, self._ids) # ids to check in database
4744 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4745 self._cr.execute(query, (ids,))
4746 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4747 [id for id in self._ids if not id]) # new ids
4748 existing = self.browse(ids)
4749 if len(existing) < len(self):
4750 # mark missing records in cache with a failed value
4751 exc = MissingError(_("Record does not exist or has been deleted."))
4752 (self - existing)._cache.update(FailedValue(exc))
4755 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4756 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4758 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4759 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4760 return self._check_recursion(cr, uid, ids, context, parent)
4762 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4764 Verifies that there is no loop in a hierarchical structure of records,
4765 by following the parent relationship using the **parent** field until a loop
4766 is detected or until a top-level record is found.
4768 :param cr: database cursor
4769 :param uid: current user id
4770 :param ids: list of ids of records to check
4771 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4772 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4775 parent = self._parent_name
4777 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4778 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4781 while current_id is not None:
4782 cr.execute(query, (current_id,))
4783 result = cr.fetchone()
4784 current_id = result[0] if result else None
4785 if current_id == id:
4789 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4791 Verifies that there is no loop in a hierarchical structure of records,
4792 by following the parent relationship using the **parent** field until a loop
4793 is detected or until a top-level record is found.
4795 :param cr: database cursor
4796 :param uid: current user id
4797 :param ids: list of ids of records to check
4798 :param field_name: field to check
4799 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4802 field = self._all_columns.get(field_name)
4803 field = field.column if field else None
4804 if not field or field._type != 'many2many' or field._obj != self._name:
4805 # field must be a many2many on itself
4806 raise ValueError('invalid field_name: %r' % (field_name,))
4808 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4812 for i in range(0, len(ids_parent), cr.IN_MAX):
4814 sub_ids_parent = ids_parent[i:j]
4815 cr.execute(query, (tuple(sub_ids_parent),))
4816 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4817 ids_parent = ids_parent2
4818 for i in ids_parent:
4823 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4824 """Retrieve the External ID(s) of any database record.
4826 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4828 :return: map of ids to the list of their fully qualified External IDs
4829 in the form ``module.key``, or an empty list when there's no External
4830 ID for a record, e.g.::
4832 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4835 ir_model_data = self.pool.get('ir.model.data')
4836 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4837 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4840 # can't use dict.fromkeys() as the list would be shared!
4842 for record in data_results:
4843 result[record['res_id']].append('%(module)s.%(name)s' % record)
4846 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4847 """Retrieve the External ID of any database record, if there
4848 is one. This method works as a possible implementation
4849 for a function field, to be able to add it to any
4850 model object easily, referencing it as ``Model.get_external_id``.
4852 When multiple External IDs exist for a record, only one
4853 of them is returned (randomly).
4855 :return: map of ids to their fully qualified XML ID,
4856 defaulting to an empty string when there's none
4857 (to be usable as a function field),
4860 { 'id': 'module.ext_id',
4863 results = self._get_xml_ids(cr, uid, ids)
4864 for k, v in results.iteritems():
4871 # backwards compatibility
4872 get_xml_id = get_external_id
4873 _get_xml_ids = _get_external_ids
4875 def print_report(self, cr, uid, ids, name, data, context=None):
4877 Render the report `name` for the given IDs. The report must be defined
4878 for this model, not another.
4880 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4881 assert self._name == report.table
4882 return report.create(cr, uid, ids, data, context)
4886 def is_transient(cls):
4887 """ Return whether the model is transient.
4889 See :class:`TransientModel`.
4892 return cls._transient
4894 def _transient_clean_rows_older_than(self, cr, seconds):
4895 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4896 # Never delete rows used in last 5 minutes
4897 seconds = max(seconds, 300)
4898 query = ("SELECT id FROM " + self._table + " WHERE"
4899 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4900 " < ((now() at time zone 'UTC') - interval %s)")
4901 cr.execute(query, ("%s seconds" % seconds,))
4902 ids = [x[0] for x in cr.fetchall()]
4903 self.unlink(cr, SUPERUSER_ID, ids)
4905 def _transient_clean_old_rows(self, cr, max_count):
4906 # Check how many rows we have in the table
4907 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4909 if res[0][0] <= max_count:
4910 return # max not reached, nothing to do
4911 self._transient_clean_rows_older_than(cr, 300)
4913 def _transient_vacuum(self, cr, uid, force=False):
4914 """Clean the transient records.
4916 This unlinks old records from the transient model tables whenever the
4917 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4918 Actual cleaning will happen only once every "_transient_check_time" calls.
4919 This means this method can be called frequently called (e.g. whenever
4920 a new record is created).
4921 Example with both max_hours and max_count active:
4922 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4923 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4924 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4925 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4926 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4927 would immediately cause the maximum to be reached again.
4928 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4930 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4931 _transient_check_time = 20 # arbitrary limit on vacuum executions
4932 self._transient_check_count += 1
4933 if not force and (self._transient_check_count < _transient_check_time):
4934 return True # no vacuum cleaning this time
4935 self._transient_check_count = 0
4937 # Age-based expiration
4938 if self._transient_max_hours:
4939 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4941 # Count-based expiration
4942 if self._transient_max_count:
4943 self._transient_clean_old_rows(cr, self._transient_max_count)
4947 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4948 """ Serializes one2many and many2many commands into record dictionaries
4949 (as if all the records came from the database via a read()). This
4950 method is aimed at onchange methods on one2many and many2many fields.
4952 Because commands might be creation commands, not all record dicts
4953 will contain an ``id`` field. Commands matching an existing record
4954 will have an ``id``.
4956 :param field_name: name of the one2many or many2many field matching the commands
4957 :type field_name: str
4958 :param commands: one2many or many2many commands to execute on ``field_name``
4959 :type commands: list((int|False, int|False, dict|False))
4960 :param fields: list of fields to read from the database, when applicable
4961 :type fields: list(str)
4962 :returns: records in a shape similar to that returned by ``read()``
4963 (except records may be missing the ``id`` field if they don't exist in db)
4966 result = [] # result (list of dict)
4967 record_ids = [] # ids of records to read
4968 updates = {} # {id: dict} of updates on particular records
4970 for command in commands or []:
4971 if not isinstance(command, (list, tuple)):
4972 record_ids.append(command)
4973 elif command[0] == 0:
4974 result.append(command[2])
4975 elif command[0] == 1:
4976 record_ids.append(command[1])
4977 updates.setdefault(command[1], {}).update(command[2])
4978 elif command[0] in (2, 3):
4979 record_ids = [id for id in record_ids if id != command[1]]
4980 elif command[0] == 4:
4981 record_ids.append(command[1])
4982 elif command[0] == 5:
4983 result, record_ids = [], []
4984 elif command[0] == 6:
4985 result, record_ids = [], list(command[2])
4987 # read the records and apply the updates
4988 other_model = self.pool[self._all_columns[field_name].column._obj]
4989 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4990 record.update(updates.get(record['id'], {}))
4991 result.append(record)
4995 # for backward compatibility
4996 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
4998 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5000 Performs a ``search()`` followed by a ``read()``.
5002 :param cr: database cursor
5003 :param user: current user id
5004 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5005 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5006 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5007 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5008 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5009 :param context: context arguments.
5010 :return: List of dictionaries containing the asked fields.
5011 :rtype: List of dictionaries.
5014 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5018 if fields and fields == ['id']:
5019 # shortcut read if we only want the ids
5020 return [{'id': id} for id in record_ids]
5022 # read() ignores active_test, but it would forward it to any downstream search call
5023 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5024 # was presumably only meant for the main search().
5025 # TODO: Move this to read() directly?
5026 read_ctx = dict(context or {})
5027 read_ctx.pop('active_test', None)
5029 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5030 if len(result) <= 1:
5034 index = dict((r['id'], r) for r in result)
5035 return [index[x] for x in record_ids if x in index]
5037 def _register_hook(self, cr):
5038 """ stuff to do right after the registry is built """
5042 def _patch_method(cls, name, method):
5043 """ Monkey-patch a method for all instances of this model. This replaces
5044 the method called `name` by `method` in the given class.
5045 The original method is then accessible via ``method.origin``, and it
5046 can be restored with :meth:`~._revert_method`.
5051 def do_write(self, values):
5052 # do stuff, and call the original method
5053 return do_write.origin(self, values)
5055 # patch method write of model
5056 model._patch_method('write', do_write)
5058 # this will call do_write
5059 records = model.search([...])
5062 # restore the original method
5063 model._revert_method('write')
5065 origin = getattr(cls, name)
5066 method.origin = origin
5067 # propagate decorators from origin to method, and apply api decorator
5068 wrapped = api.guess(api.propagate(origin, method))
5069 wrapped.origin = origin
5070 setattr(cls, name, wrapped)
5073 def _revert_method(cls, name):
5074 """ Revert the original method called `name` in the given class.
5075 See :meth:`~._patch_method`.
5077 method = getattr(cls, name)
5078 setattr(cls, name, method.origin)
5083 # An instance represents an ordered collection of records in a given
5084 # execution environment. The instance object refers to the environment, and
5085 # the records themselves are represented by their cache dictionary. The 'id'
5086 # of each record is found in its corresponding cache dictionary.
5088 # This design has the following advantages:
5089 # - cache access is direct and thus fast;
5090 # - one can consider records without an 'id' (see new records);
5091 # - the global cache is only an index to "resolve" a record 'id'.
5095 def _browse(cls, env, ids):
5096 """ Create an instance attached to `env`; `ids` is a tuple of record
5099 records = object.__new__(cls)
5102 env.prefetch[cls._name].update(ids)
5106 def browse(self, cr, uid, arg=None, context=None):
5107 ids = _normalize_ids(arg)
5108 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5109 return self._browse(Environment(cr, uid, context or {}), ids)
5112 def browse(self, arg=None):
5113 """ browse([ids]) -> records
5115 Returns a recordset for the ids provided as parameter in the current
5118 Can take no ids, a single id or a sequence of ids.
5120 ids = _normalize_ids(arg)
5121 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5122 return self._browse(self.env, ids)
5125 # Internal properties, for manipulating the instance's implementation
5130 """ List of actual record ids in this recordset (ignores placeholder
5131 ids for records to create)
5133 return filter(None, list(self._ids))
5135 # backward-compatibility with former browse records
5136 _cr = property(lambda self: self.env.cr)
5137 _uid = property(lambda self: self.env.uid)
5138 _context = property(lambda self: self.env.context)
5141 # Conversion methods
5144 def ensure_one(self):
5145 """ Verifies that the current recorset holds a single record. Raises
5146 an exception otherwise.
5150 raise except_orm("ValueError", "Expected singleton: %s" % self)
5152 def with_env(self, env):
5153 """ Returns a new version of this recordset attached to the provided
5156 :type env: :class:`~openerp.api.Environment`
5158 return self._browse(env, self._ids)
5160 def sudo(self, user=SUPERUSER_ID):
5161 """ sudo([user=SUPERUSER])
5163 Returns a new version of this recordset attached to the provided
5166 return self.with_env(self.env(user=user))
5168 def with_context(self, *args, **kwargs):
5169 """ with_context([context][, **overrides]) -> records
5171 Returns a new version of this recordset attached to an extended
5174 The extended context is either the provided ``context`` in which
5175 ``overrides`` are merged or the *current* context in which
5176 ``overrides`` are merged e.g.::
5178 # current context is {'key1': True}
5179 r2 = records.with_context({}, key2=True)
5180 # -> r2._context is {'key2': True}
5181 r2 = records.with_context(key2=True)
5182 # -> r2._context is {'key1': True, 'key2': True}
5184 context = dict(args[0] if args else self._context, **kwargs)
5185 return self.with_env(self.env(context=context))
5187 def _convert_to_cache(self, values, update=False, validate=True):
5188 """ Convert the `values` dictionary into cached values.
5190 :param update: whether the conversion is made for updating `self`;
5191 this is necessary for interpreting the commands of *2many fields
5192 :param validate: whether values must be checked
5194 fields = self._fields
5195 target = self if update else self.browse()
5197 name: fields[name].convert_to_cache(value, target, validate=validate)
5198 for name, value in values.iteritems()
5202 def _convert_to_write(self, values):
5203 """ Convert the `values` dictionary into the format of :meth:`write`. """
5204 fields = self._fields
5206 for name, value in values.iteritems():
5208 value = fields[name].convert_to_write(value)
5209 if not isinstance(value, NewId):
5210 result[name] = value
5214 # Record traversal and update
5217 def _mapped_func(self, func):
5218 """ Apply function `func` on all records in `self`, and return the
5219 result as a list or a recordset (if `func` return recordsets).
5221 vals = [func(rec) for rec in self]
5222 val0 = vals[0] if vals else func(self)
5223 if isinstance(val0, BaseModel):
5224 return reduce(operator.or_, vals, val0)
5227 def mapped(self, func):
5228 """ Apply `func` on all records in `self`, and return the result as a
5229 list or a recordset (if `func` return recordsets). In the latter
5230 case, the order of the returned recordset is arbritrary.
5232 :param func: a function or a dot-separated sequence of field names
5234 if isinstance(func, basestring):
5236 for name in func.split('.'):
5237 recs = recs._mapped_func(operator.itemgetter(name))
5240 return self._mapped_func(func)
5242 def _mapped_cache(self, name_seq):
5243 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5244 field names, and only cached values are used.
5247 for name in name_seq.split('.'):
5248 field = recs._fields[name]
5249 null = field.null(self.env)
5250 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5253 def filtered(self, func):
5254 """ Select the records in `self` such that `func(rec)` is true, and
5255 return them as a recordset.
5257 :param func: a function or a dot-separated sequence of field names
5259 if isinstance(func, basestring):
5261 func = lambda rec: filter(None, rec.mapped(name))
5262 return self.browse([rec.id for rec in self if func(rec)])
5264 def sorted(self, key=None):
5265 """ Return the recordset `self` ordered by `key` """
5267 return self.search([('id', 'in', self.ids)])
5269 return self.browse(map(int, sorted(self, key=key)))
5271 def update(self, values):
5272 """ Update record `self[0]` with `values`. """
5273 for name, value in values.iteritems():
5277 # New records - represent records that do not exist in the database yet;
5278 # they are used to compute default values and perform onchanges.
5282 def new(self, values={}):
5283 """ new([values]) -> record
5285 Return a new record instance attached to the current environment and
5286 initialized with the provided ``value``. The record is *not* created
5287 in database, it only exists in memory.
5289 record = self.browse([NewId()])
5290 record._cache.update(record._convert_to_cache(values, update=True))
5292 if record.env.in_onchange:
5293 # The cache update does not set inverse fields, so do it manually.
5294 # This is useful for computing a function field on secondary
5295 # records, if that field depends on the main record.
5297 field = self._fields.get(name)
5299 for invf in field.inverse_fields:
5300 invf._update(record[name], record)
5305 # Dirty flag, to mark records modified (in draft mode)
5310 """ Return whether any record in `self` is dirty. """
5311 dirty = self.env.dirty
5312 return any(record in dirty for record in self)
5315 def _dirty(self, value):
5316 """ Mark the records in `self` as dirty. """
5318 map(self.env.dirty.add, self)
5320 map(self.env.dirty.discard, self)
5326 def __nonzero__(self):
5327 """ Test whether `self` is nonempty. """
5328 return bool(getattr(self, '_ids', True))
5331 """ Return the size of `self`. """
5332 return len(self._ids)
5335 """ Return an iterator over `self`. """
5336 for id in self._ids:
5337 yield self._browse(self.env, (id,))
5339 def __contains__(self, item):
5340 """ Test whether `item` (record or field name) is an element of `self`.
5341 In the first case, the test is fully equivalent to::
5343 any(item == record for record in self)
5345 if isinstance(item, BaseModel) and self._name == item._name:
5346 return len(item) == 1 and item.id in self._ids
5347 elif isinstance(item, basestring):
5348 return item in self._fields
5350 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5352 def __add__(self, other):
5353 """ Return the concatenation of two recordsets. """
5354 if not isinstance(other, BaseModel) or self._name != other._name:
5355 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5356 return self.browse(self._ids + other._ids)
5358 def __sub__(self, other):
5359 """ Return the recordset of all the records in `self` that are not in `other`. """
5360 if not isinstance(other, BaseModel) or self._name != other._name:
5361 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5362 other_ids = set(other._ids)
5363 return self.browse([id for id in self._ids if id not in other_ids])
5365 def __and__(self, other):
5366 """ Return the intersection of two recordsets.
5367 Note that recordset order is not preserved.
5369 if not isinstance(other, BaseModel) or self._name != other._name:
5370 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5371 return self.browse(set(self._ids) & set(other._ids))
5373 def __or__(self, other):
5374 """ Return the union of two recordsets.
5375 Note that recordset order is not preserved.
5377 if not isinstance(other, BaseModel) or self._name != other._name:
5378 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5379 return self.browse(set(self._ids) | set(other._ids))
5381 def __eq__(self, other):
5382 """ Test whether two recordsets are equivalent (up to reordering). """
5383 if not isinstance(other, BaseModel):
5385 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5387 return self._name == other._name and set(self._ids) == set(other._ids)
5389 def __ne__(self, other):
5390 return not self == other
5392 def __lt__(self, other):
5393 if not isinstance(other, BaseModel) or self._name != other._name:
5394 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5395 return set(self._ids) < set(other._ids)
5397 def __le__(self, other):
5398 if not isinstance(other, BaseModel) or self._name != other._name:
5399 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5400 return set(self._ids) <= set(other._ids)
5402 def __gt__(self, other):
5403 if not isinstance(other, BaseModel) or self._name != other._name:
5404 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5405 return set(self._ids) > set(other._ids)
5407 def __ge__(self, other):
5408 if not isinstance(other, BaseModel) or self._name != other._name:
5409 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5410 return set(self._ids) >= set(other._ids)
5416 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5418 def __unicode__(self):
5419 return unicode(str(self))
5424 if hasattr(self, '_ids'):
5425 return hash((self._name, frozenset(self._ids)))
5427 return hash(self._name)
5429 def __getitem__(self, key):
5430 """ If `key` is an integer or a slice, return the corresponding record
5431 selection as an instance (attached to `self.env`).
5432 Otherwise read the field `key` of the first record in `self`.
5436 inst = model.search(dom) # inst is a recordset
5437 r4 = inst[3] # fourth record in inst
5438 rs = inst[10:20] # subset of inst
5439 nm = rs['name'] # name of first record in inst
5441 if isinstance(key, basestring):
5442 # important: one must call the field's getter
5443 return self._fields[key].__get__(self, type(self))
5444 elif isinstance(key, slice):
5445 return self._browse(self.env, self._ids[key])
5447 return self._browse(self.env, (self._ids[key],))
5449 def __setitem__(self, key, value):
5450 """ Assign the field `key` to `value` in record `self`. """
5451 # important: one must call the field's setter
5452 return self._fields[key].__set__(self, value)
5455 # Cache and recomputation management
5460 """ Return the cache of `self`, mapping field names to values. """
5461 return RecordCache(self)
5464 def _in_cache_without(self, field):
5465 """ Make sure `self` is present in cache (for prefetching), and return
5466 the records of model `self` in cache that have no value for `field`
5467 (:class:`Field` instance).
5470 prefetch_ids = env.prefetch[self._name]
5471 prefetch_ids.update(self._ids)
5472 ids = filter(None, prefetch_ids - set(env.cache[field]))
5473 return self.browse(ids)
5477 """ Clear the records cache.
5480 The record cache is automatically invalidated.
5482 self.invalidate_cache()
5485 def invalidate_cache(self, fnames=None, ids=None):
5486 """ Invalidate the record caches after some records have been modified.
5487 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5489 :param fnames: the list of modified fields, or ``None`` for all fields
5490 :param ids: the list of modified record ids, or ``None`` for all
5494 return self.env.invalidate_all()
5495 fields = self._fields.values()
5497 fields = map(self._fields.__getitem__, fnames)
5499 # invalidate fields and inverse fields, too
5500 spec = [(f, ids) for f in fields] + \
5501 [(invf, None) for f in fields for invf in f.inverse_fields]
5502 self.env.invalidate(spec)
5505 def modified(self, fnames):
5506 """ Notify that fields have been modified on `self`. This invalidates
5507 the cache, and prepares the recomputation of stored function fields
5508 (new-style fields only).
5510 :param fnames: iterable of field names that have been modified on
5513 # each field knows what to invalidate and recompute
5515 for fname in fnames:
5516 spec += self._fields[fname].modified(self)
5520 for env in self.env.all
5521 for field in env.cache
5523 # invalidate non-stored fields.function which are currently cached
5524 spec += [(f, None) for f in self.pool.pure_function_fields
5525 if f in cached_fields]
5527 self.env.invalidate(spec)
5529 def _recompute_check(self, field):
5530 """ If `field` must be recomputed on some record in `self`, return the
5531 corresponding records that must be recomputed.
5533 return self.env.check_todo(field, self)
5535 def _recompute_todo(self, field):
5536 """ Mark `field` to be recomputed. """
5537 self.env.add_todo(field, self)
5539 def _recompute_done(self, field):
5540 """ Mark `field` as recomputed. """
5541 self.env.remove_todo(field, self)
5544 def recompute(self):
5545 """ Recompute stored function fields. The fields and records to
5546 recompute have been determined by method :meth:`modified`.
5548 while self.env.has_todo():
5549 field, recs = self.env.get_todo()
5550 # evaluate the fields to recompute, and save them to database
5551 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5553 values = rec._convert_to_write({
5554 f.name: rec[f.name] for f in field.computed_fields
5557 except MissingError:
5559 # mark the computed fields as done
5560 map(recs._recompute_done, field.computed_fields)
5563 # Generic onchange method
5566 def _has_onchange(self, field, other_fields):
5567 """ Return whether `field` should trigger an onchange event in the
5568 presence of `other_fields`.
5570 # test whether self has an onchange method for field, or field is a
5571 # dependency of any field in other_fields
5572 return field.name in self._onchange_methods or \
5573 any(dep in other_fields for dep in field.dependents)
5576 def _onchange_spec(self, view_info=None):
5577 """ Return the onchange spec from a view description; if not given, the
5578 result of ``self.fields_view_get()`` is used.
5582 # for traversing the XML arch and populating result
5583 def process(node, info, prefix):
5584 if node.tag == 'field':
5585 name = node.attrib['name']
5586 names = "%s.%s" % (prefix, name) if prefix else name
5587 if not result.get(names):
5588 result[names] = node.attrib.get('on_change')
5589 # traverse the subviews included in relational fields
5590 for subinfo in info['fields'][name].get('views', {}).itervalues():
5591 process(etree.fromstring(subinfo['arch']), subinfo, names)
5594 process(child, info, prefix)
5596 if view_info is None:
5597 view_info = self.fields_view_get()
5598 process(etree.fromstring(view_info['arch']), view_info, '')
5601 def _onchange_eval(self, field_name, onchange, result):
5602 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5603 on record `self`. Value assignments are applied on `self`, while
5604 domain and warning messages are put in dictionary `result`.
5606 onchange = onchange.strip()
5609 if onchange in ("1", "true"):
5610 for method in self._onchange_methods.get(field_name, ()):
5611 method_res = method(self)
5614 if 'domain' in method_res:
5615 result.setdefault('domain', {}).update(method_res['domain'])
5616 if 'warning' in method_res:
5617 result['warning'] = method_res['warning']
5621 match = onchange_v7.match(onchange)
5623 method, params = match.groups()
5625 # evaluate params -> tuple
5626 global_vars = {'context': self._context, 'uid': self._uid}
5627 if self._context.get('field_parent'):
5628 class RawRecord(object):
5629 def __init__(self, record):
5630 self._record = record
5631 def __getattr__(self, name):
5632 field = self._record._fields[name]
5633 value = self._record[name]
5634 return field.convert_to_onchange(value)
5635 record = self[self._context['field_parent']]
5636 global_vars['parent'] = RawRecord(record)
5638 key: self._fields[key].convert_to_onchange(val)
5639 for key, val in self._cache.iteritems()
5641 params = eval("[%s]" % params, global_vars, field_vars)
5643 # call onchange method
5644 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5645 method_res = getattr(self._model, method)(*args)
5646 if not isinstance(method_res, dict):
5648 if 'value' in method_res:
5649 method_res['value'].pop('id', None)
5650 self.update(self._convert_to_cache(method_res['value'], validate=False))
5651 if 'domain' in method_res:
5652 result.setdefault('domain', {}).update(method_res['domain'])
5653 if 'warning' in method_res:
5654 result['warning'] = method_res['warning']
5657 def onchange(self, values, field_name, field_onchange):
5658 """ Perform an onchange on the given field.
5660 :param values: dictionary mapping field names to values, giving the
5661 current state of modification
5662 :param field_name: name of the modified field_name
5663 :param field_onchange: dictionary mapping field names to their
5668 if field_name and field_name not in self._fields:
5671 # determine subfields for field.convert_to_write() below
5673 subfields = defaultdict(set)
5674 for dotname in field_onchange:
5676 secondary.append(dotname)
5677 name, subname = dotname.split('.')
5678 subfields[name].add(subname)
5680 # create a new record with values, and attach `self` to it
5681 with env.do_in_onchange():
5682 record = self.new(values)
5683 values = dict(record._cache)
5684 # attach `self` with a different context (for cache consistency)
5685 record._origin = self.with_context(__onchange=True)
5687 # determine which field should be triggered an onchange
5688 todo = set([field_name]) if field_name else set(values)
5691 # dummy assignment: trigger invalidations on the record
5693 value = record[name]
5694 field = self._fields[name]
5695 if not field_name and field.type == 'many2one' and field.delegate and not value:
5696 # do not nullify all fields of parent record for new records
5698 record[name] = value
5700 result = {'value': {}}
5708 with env.do_in_onchange():
5709 # apply field-specific onchange methods
5710 if field_onchange.get(name):
5711 record._onchange_eval(name, field_onchange[name], result)
5713 # force re-evaluation of function fields on secondary records
5714 for field_seq in secondary:
5715 record.mapped(field_seq)
5717 # determine which fields have been modified
5718 for name, oldval in values.iteritems():
5719 field = self._fields[name]
5720 newval = record[name]
5721 if field.type in ('one2many', 'many2many'):
5722 if newval != oldval or newval._dirty:
5723 # put new value in result
5724 result['value'][name] = field.convert_to_write(
5725 newval, record._origin, subfields.get(name),
5729 # keep result: newval may have been dirty before
5732 if newval != oldval:
5733 # put new value in result
5734 result['value'][name] = field.convert_to_write(
5735 newval, record._origin, subfields.get(name),
5739 # clean up result to not return another value
5740 result['value'].pop(name, None)
5742 # At the moment, the client does not support updates on a *2many field
5743 # while this one is modified by the user.
5744 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5745 result['value'].pop(field_name, None)
5750 class RecordCache(MutableMapping):
5751 """ Implements a proxy dictionary to read/update the cache of a record.
5752 Upon iteration, it looks like a dictionary mapping field names to
5753 values. However, fields may be used as keys as well.
5755 def __init__(self, records):
5756 self._recs = records
5758 def contains(self, field):
5759 """ Return whether `records[0]` has a value for `field` in cache. """
5760 if isinstance(field, basestring):
5761 field = self._recs._fields[field]
5762 return self._recs.id in self._recs.env.cache[field]
5764 def __contains__(self, field):
5765 """ Return whether `records[0]` has a regular value for `field` in cache. """
5766 if isinstance(field, basestring):
5767 field = self._recs._fields[field]
5768 dummy = SpecialValue(None)
5769 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5770 return not isinstance(value, SpecialValue)
5772 def __getitem__(self, field):
5773 """ Return the cached value of `field` for `records[0]`. """
5774 if isinstance(field, basestring):
5775 field = self._recs._fields[field]
5776 value = self._recs.env.cache[field][self._recs.id]
5777 return value.get() if isinstance(value, SpecialValue) else value
5779 def __setitem__(self, field, value):
5780 """ Assign the cached value of `field` for all records in `records`. """
5781 if isinstance(field, basestring):
5782 field = self._recs._fields[field]
5783 values = dict.fromkeys(self._recs._ids, value)
5784 self._recs.env.cache[field].update(values)
5786 def update(self, *args, **kwargs):
5787 """ Update the cache of all records in `records`. If the argument is a
5788 `SpecialValue`, update all fields (except "magic" columns).
5790 if args and isinstance(args[0], SpecialValue):
5791 values = dict.fromkeys(self._recs._ids, args[0])
5792 for name, field in self._recs._fields.iteritems():
5794 self._recs.env.cache[field].update(values)
5796 return super(RecordCache, self).update(*args, **kwargs)
5798 def __delitem__(self, field):
5799 """ Remove the cached value of `field` for all `records`. """
5800 if isinstance(field, basestring):
5801 field = self._recs._fields[field]
5802 field_cache = self._recs.env.cache[field]
5803 for id in self._recs._ids:
5804 field_cache.pop(id, None)
5807 """ Iterate over the field names with a regular value in cache. """
5808 cache, id = self._recs.env.cache, self._recs.id
5809 dummy = SpecialValue(None)
5810 for name, field in self._recs._fields.iteritems():
5811 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5815 """ Return the number of fields with a regular value in cache. """
5816 return sum(1 for name in self)
5818 class Model(BaseModel):
5819 """Main super-class for regular database-persisted OpenERP models.
5821 OpenERP models are created by inheriting from this class::
5826 The system will later instantiate the class once per database (on
5827 which the class' module is installed).
5830 _register = False # not visible in ORM registry, meant to be python-inherited only
5831 _transient = False # True in a TransientModel
5833 class TransientModel(BaseModel):
5834 """Model super-class for transient records, meant to be temporarily
5835 persisted, and regularly vaccuum-cleaned.
5837 A TransientModel has a simplified access rights management,
5838 all users can create new records, and may only access the
5839 records they created. The super-user has unrestricted access
5840 to all TransientModel records.
5843 _register = False # not visible in ORM registry, meant to be python-inherited only
5846 class AbstractModel(BaseModel):
5847 """Abstract Model super-class for creating an abstract class meant to be
5848 inherited by regular models (Models or TransientModels) but not meant to
5849 be usable on its own, or persisted.
5851 Technical note: we don't want to make AbstractModel the super-class of
5852 Model or BaseModel because it would not make sense to put the main
5853 definition of persistence methods such as create() in it, and still we
5854 should be able to override them within an AbstractModel.
5856 _auto = False # don't create any database backend for AbstractModels
5857 _register = False # not visible in ORM registry, meant to be python-inherited only
5860 def itemgetter_tuple(items):
5861 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5862 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5867 return lambda gettable: (gettable[items[0]],)
5868 return operator.itemgetter(*items)
5870 def convert_pgerror_23502(model, fields, info, e):
5871 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5872 r'not-null constraint\n',
5874 field_name = m and m.group('field')
5875 if not m or field_name not in fields:
5876 return {'message': unicode(e)}
5877 message = _(u"Missing required value for the field '%s'.") % field_name
5878 field = fields.get(field_name)
5880 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5883 'field': field_name,
5886 def convert_pgerror_23505(model, fields, info, e):
5887 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5889 field_name = m and m.group('field')
5890 if not m or field_name not in fields:
5891 return {'message': unicode(e)}
5892 message = _(u"The value for the field '%s' already exists.") % field_name
5893 field = fields.get(field_name)
5895 message = _(u"%s This might be '%s' in the current model, or a field "
5896 u"of the same name in an o2m.") % (message, field['string'])
5899 'field': field_name,
5902 PGERROR_TO_OE = defaultdict(
5903 # shape of mapped converters
5904 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5905 # not_null_violation
5906 '23502': convert_pgerror_23502,
5907 # unique constraint error
5908 '23505': convert_pgerror_23505,
5911 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5912 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5914 Various implementations were tested on the corpus of all browse() calls
5915 performed during a full crawler run (after having installed all website_*
5916 modules) and this one was the most efficient overall.
5918 A possible bit of correctness was sacrificed by not doing any test on
5919 Iterable and just assuming that any non-atomic type was an iterable of
5924 # much of the corpus is falsy objects (empty list, tuple or set, None)
5928 # `type in set` is significantly faster (because more restrictive) than
5929 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5930 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5931 # (and looks much worse) in most cases, but over millions of calls it
5932 # does have a very minor effect.
5933 if arg.__class__ in atoms:
5938 # keep those imports here to avoid dependency cycle errors
5939 from .osv import expression
5940 from .fields import Field, SpecialValue, FailedValue
5942 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: