1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
52 from collections import defaultdict, MutableMapping
53 from inspect import getmembers
56 import dateutil.relativedelta
58 from lxml import etree
61 from . import SUPERUSER_ID
64 from .api import Environment
65 from .exceptions import except_orm, AccessError, MissingError, ValidationError
66 from .osv import fields
67 from .osv.query import Query
68 from .tools import lazy_property, ormcache
69 from .tools.config import config
70 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
71 from .tools.safe_eval import safe_eval as eval
72 from .tools.translate import _
74 _logger = logging.getLogger(__name__)
75 _schema = logging.getLogger(__name__ + '.schema')
77 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
78 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
79 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
81 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
84 def check_object_name(name):
85 """ Check if the given name is a valid openerp object name.
87 The _name attribute in osv and osv_memory object is subject to
88 some restrictions. This function returns True or False whether
89 the given name is allowed or not.
91 TODO: this is an approximation. The goal in this approximation
92 is to disallow uppercase characters (in some places, we quote
93 table/column names and in other not, which leads to this kind
96 psycopg2.ProgrammingError: relation "xxx" does not exist).
98 The same restriction should apply to both osv and osv_memory
99 objects for consistency.
102 if regex_object_name.match(name) is None:
106 def raise_on_invalid_object_name(name):
107 if not check_object_name(name):
108 msg = "The _name attribute %s is not valid." % name
110 raise except_orm('ValueError', msg)
112 POSTGRES_CONFDELTYPES = {
120 def intersect(la, lb):
121 return filter(lambda x: x in lb, la)
124 """ Test whether functions `f` and `g` are identical or have the same name """
125 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
127 def fix_import_export_id_paths(fieldname):
129 Fixes the id fields in import and exports, and splits field paths
132 :param str fieldname: name of the field to import/export
133 :return: split field name
136 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
137 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
138 return fixed_external_id.split('/')
140 def pg_varchar(size=0):
141 """ Returns the VARCHAR declaration for the provided size:
143 * If no size (or an empty or negative size is provided) return an
145 * Otherwise return a VARCHAR(n)
147 :type int size: varchar size, optional
151 if not isinstance(size, int):
152 raise TypeError("VARCHAR parameter should be an int, got %s"
155 return 'VARCHAR(%d)' % size
158 FIELDS_TO_PGTYPES = {
159 fields.boolean: 'bool',
160 fields.integer: 'int4',
164 fields.datetime: 'timestamp',
165 fields.binary: 'bytea',
166 fields.many2one: 'int4',
167 fields.serialized: 'text',
170 def get_pg_type(f, type_override=None):
172 :param fields._column f: field to get a Postgres type for
173 :param type type_override: use the provided type for dispatching instead of the field's own type
174 :returns: (postgres_identification_type, postgres_type_specification)
177 field_type = type_override or type(f)
179 if field_type in FIELDS_TO_PGTYPES:
180 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
181 elif issubclass(field_type, fields.float):
183 pg_type = ('numeric', 'NUMERIC')
185 pg_type = ('float8', 'DOUBLE PRECISION')
186 elif issubclass(field_type, (fields.char, fields.reference)):
187 pg_type = ('varchar', pg_varchar(f.size))
188 elif issubclass(field_type, fields.selection):
189 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
190 or getattr(f, 'size', None) == -1:
191 pg_type = ('int4', 'INTEGER')
193 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
194 elif issubclass(field_type, fields.function):
195 if f._type == 'selection':
196 pg_type = ('varchar', pg_varchar())
198 pg_type = get_pg_type(f, getattr(fields, f._type))
200 _logger.warning('%s type not supported!', field_type)
206 class MetaModel(api.Meta):
207 """ Metaclass for the models.
209 This class is used as the metaclass for the class :class:`BaseModel` to
210 discover the models defined in a module (without instanciating them).
211 If the automatic discovery is not needed, it is possible to set the model's
212 ``_register`` attribute to False.
216 module_to_models = {}
218 def __init__(self, name, bases, attrs):
219 if not self._register:
220 self._register = True
221 super(MetaModel, self).__init__(name, bases, attrs)
224 if not hasattr(self, '_module'):
225 # The (OpenERP) module name can be in the `openerp.addons` namespace
226 # or not. For instance, module `sale` can be imported as
227 # `openerp.addons.sale` (the right way) or `sale` (for backward
229 module_parts = self.__module__.split('.')
230 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
231 module_name = self.__module__.split('.')[2]
233 module_name = self.__module__.split('.')[0]
234 self._module = module_name
236 # Remember which models to instanciate for this module.
238 self.module_to_models.setdefault(self._module, []).append(self)
240 # transform columns into new-style fields (enables field inheritance)
241 for name, column in self._columns.iteritems():
242 if not hasattr(self, name):
243 setattr(self, name, column.to_field())
247 """ Pseudo-ids for new records. """
248 def __nonzero__(self):
251 IdType = (int, long, basestring, NewId)
254 # maximum number of prefetched records
257 # special columns automatically created by the ORM
258 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
259 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
261 class BaseModel(object):
262 """ Base class for OpenERP models.
264 OpenERP models are created by inheriting from this class' subclasses:
266 * :class:`Model` for regular database-persisted models
268 * :class:`TransientModel` for temporary data, stored in the database but
269 automatically vaccuumed every so often
271 * :class:`AbstractModel` for abstract super classes meant to be shared by
272 multiple inheriting model
274 The system automatically instantiates every model once per database. Those
275 instances represent the available models on each database, and depend on
276 which modules are installed on that database. The actual class of each
277 instance is built from the Python classes that create and inherit from the
280 Every model instance is a "recordset", i.e., an ordered collection of
281 records of the model. Recordsets are returned by methods like
282 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
283 explicit representation: a record is represented as a recordset of one
286 To create a class that should not be instantiated, the _register class
287 attribute may be set to False.
289 __metaclass__ = MetaModel
290 _auto = True # create database backend
291 _register = False # Set to false if the model shouldn't be automatically discovered.
298 _parent_name = 'parent_id'
299 _parent_store = False
300 _parent_order = False
306 _translate = True # set to False to disable translations export for this model
308 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
309 # to include in the _read_group, if grouped on this field
313 _transient = False # True in a TransientModel
316 # { 'parent_model': 'm2o_field', ... }
319 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
320 # model from which it is inherits'd, r is the (local) field towards m, f
321 # is the _column object itself, and n is the original (i.e. top-most)
324 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
325 # field_column_obj, origina_parent_model), ... }
328 # Mapping field name/column_info object
329 # This is similar to _inherit_fields but:
330 # 1. includes self fields,
331 # 2. uses column_info instead of a triple.
336 _sql_constraints = []
338 # model dependencies, for models backed up by sql views:
339 # {model_name: field_names, ...}
342 CONCURRENCY_CHECK_FIELD = '__last_update'
344 def log(self, cr, uid, id, message, secondary=False, context=None):
345 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
347 def view_init(self, cr, uid, fields_list, context=None):
348 """Override this method to do specific things when a view on the object is opened."""
351 def _field_create(self, cr, context=None):
352 """ Create entries in ir_model_fields for all the model's fields.
354 If necessary, also create an entry in ir_model, and if called from the
355 modules loading scheme (by receiving 'module' in the context), also
356 create entries in ir_model_data (for the model and the fields).
358 - create an entry in ir_model (if there is not already one),
359 - create an entry in ir_model_data (if there is not already one, and if
360 'module' is in the context),
361 - update ir_model_fields with the fields found in _columns
362 (TODO there is some redundancy as _columns is updated from
363 ir_model_fields in __init__).
368 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
370 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
371 model_id = cr.fetchone()[0]
372 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
374 model_id = cr.fetchone()[0]
375 if 'module' in context:
376 name_id = 'model_'+self._name.replace('.', '_')
377 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
379 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
380 (name_id, context['module'], 'ir.model', model_id)
383 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
385 for rec in cr.dictfetchall():
386 cols[rec['name']] = rec
388 ir_model_fields_obj = self.pool.get('ir.model.fields')
390 # sparse field should be created at the end, as it depends on its serialized field already existing
391 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
392 for (k, f) in model_fields:
394 'model_id': model_id,
397 'field_description': f.string,
399 'relation': f._obj or '',
400 'select_level': tools.ustr(int(f.select)),
401 'readonly': (f.readonly and 1) or 0,
402 'required': (f.required and 1) or 0,
403 'selectable': (f.selectable and 1) or 0,
404 'translate': (f.translate and 1) or 0,
405 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
406 'serialization_field_id': None,
408 if getattr(f, 'serialization_field', None):
409 # resolve link to serialization_field if specified by name
410 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
411 if not serialization_field_id:
412 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
413 vals['serialization_field_id'] = serialization_field_id[0]
415 # When its a custom field,it does not contain f.select
416 if context.get('field_state', 'base') == 'manual':
417 if context.get('field_name', '') == k:
418 vals['select_level'] = context.get('select', '0')
419 #setting value to let the problem NOT occur next time
421 vals['select_level'] = cols[k]['select_level']
424 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
425 id = cr.fetchone()[0]
427 cr.execute("""INSERT INTO ir_model_fields (
428 id, model_id, model, name, field_description, ttype,
429 relation,state,select_level,relation_field, translate, serialization_field_id
431 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
433 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
434 vals['relation'], 'base',
435 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
437 if 'module' in context:
438 name1 = 'field_' + self._table + '_' + k
439 cr.execute("select name from ir_model_data where name=%s", (name1,))
441 name1 = name1 + "_" + str(id)
442 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
443 (name1, context['module'], 'ir.model.fields', id)
446 for key, val in vals.items():
447 if cols[k][key] != vals[key]:
448 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
449 cr.execute("""UPDATE ir_model_fields SET
450 model_id=%s, field_description=%s, ttype=%s, relation=%s,
451 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
453 model=%s AND name=%s""", (
454 vals['model_id'], vals['field_description'], vals['ttype'],
456 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
459 self.invalidate_cache(cr, SUPERUSER_ID)
462 def _add_field(cls, name, field):
463 """ Add the given `field` under the given `name` in the class """
464 field.set_class_name(cls, name)
466 # add field in _fields (for reflection)
467 cls._fields[name] = field
469 # add field as an attribute, unless another kind of value already exists
470 if isinstance(getattr(cls, name, field), Field):
471 setattr(cls, name, field)
473 _logger.warning("In model %r, member %r is not a field", cls._name, name)
476 cls._columns[name] = field.to_column()
478 # remove potential column that may be overridden by field
479 cls._columns.pop(name, None)
482 def _pop_field(cls, name):
483 """ Remove the field with the given `name` from the model.
484 This method should only be used for manual fields.
486 field = cls._fields.pop(name)
487 cls._columns.pop(name, None)
488 cls._all_columns.pop(name, None)
489 if hasattr(cls, name):
494 def _add_magic_fields(cls):
495 """ Introduce magic fields on the current class
497 * id is a "normal" field (with a specific getter)
498 * create_uid, create_date, write_uid and write_date have become
500 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
501 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
502 to get the same structure as the previous
503 ``(now() at time zone 'UTC')::timestamp``::
505 # select (now() at time zone 'UTC')::timestamp;
507 ----------------------------
508 2013-06-18 08:30:37.292809
510 >>> str(datetime.datetime.utcnow())
511 '2013-06-18 08:31:32.821177'
513 def add(name, field):
514 """ add `field` with the given `name` if it does not exist yet """
515 if name not in cls._columns and name not in cls._fields:
516 cls._add_field(name, field)
521 # this field 'id' must override any other column or field
522 cls._add_field('id', fields.Id(automatic=True))
524 add('display_name', fields.Char(string='Display Name', automatic=True,
525 compute='_compute_display_name'))
528 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
529 add('create_date', fields.Datetime(string='Created on', automatic=True))
530 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
531 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
532 last_modified_name = 'compute_concurrency_field_with_access'
534 last_modified_name = 'compute_concurrency_field'
536 # this field must override any other column or field
537 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
538 string='Last Modified on', compute=last_modified_name, automatic=True))
541 def compute_concurrency_field(self):
542 self[self.CONCURRENCY_CHECK_FIELD] = \
543 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
546 @api.depends('create_date', 'write_date')
547 def compute_concurrency_field_with_access(self):
548 self[self.CONCURRENCY_CHECK_FIELD] = \
549 self.write_date or self.create_date or \
550 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
553 # Goal: try to apply inheritance at the instanciation level and
554 # put objects in the pool var
557 def _build_model(cls, pool, cr):
558 """ Instanciate a given model.
560 This class method instanciates the class of some model (i.e. a class
561 deriving from osv or osv_memory). The class might be the class passed
562 in argument or, if it inherits from another class, a class constructed
563 by combining the two classes.
567 # IMPORTANT: the registry contains an instance for each model. The class
568 # of each model carries inferred metadata that is shared among the
569 # model's instances for this registry, but not among registries. Hence
570 # we cannot use that "registry class" for combining model classes by
571 # inheritance, since it confuses the metadata inference process.
573 # Keep links to non-inherited constraints in cls; this is useful for
574 # instance when exporting translations
575 cls._local_constraints = cls.__dict__.get('_constraints', [])
576 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
578 # determine inherited models
579 parents = getattr(cls, '_inherit', [])
580 parents = [parents] if isinstance(parents, basestring) else (parents or [])
582 # determine the model's name
583 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
585 # determine the module that introduced the model
586 original_module = pool[name]._original_module if name in parents else cls._module
588 # build the class hierarchy for the model
589 for parent in parents:
590 if parent not in pool:
591 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
592 'You may need to add a dependency on the parent class\' module.' % (name, parent))
593 parent_model = pool[parent]
595 # do no use the class of parent_model, since that class contains
596 # inferred metadata; use its ancestor instead
597 parent_class = type(parent_model).__base__
599 # don't inherit custom fields
600 columns = dict((key, val)
601 for key, val in parent_class._columns.iteritems()
604 columns.update(cls._columns)
606 defaults = dict(parent_class._defaults)
607 defaults.update(cls._defaults)
609 inherits = dict(parent_class._inherits)
610 inherits.update(cls._inherits)
612 depends = dict(parent_class._depends)
613 for m, fs in cls._depends.iteritems():
614 depends[m] = depends.get(m, []) + fs
616 old_constraints = parent_class._constraints
617 new_constraints = cls._constraints
618 # filter out from old_constraints the ones overridden by a
619 # constraint with the same function name in new_constraints
620 constraints = new_constraints + [oldc
621 for oldc in old_constraints
622 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
623 for newc in new_constraints)
626 sql_constraints = cls._sql_constraints + \
627 parent_class._sql_constraints
633 '_defaults': defaults,
634 '_inherits': inherits,
636 '_constraints': constraints,
637 '_sql_constraints': sql_constraints,
639 cls = type(name, (cls, parent_class), attrs)
641 # introduce the "registry class" of the model;
642 # duplicate some attributes so that the ORM can modify them
646 '_columns': dict(cls._columns),
647 '_defaults': dict(cls._defaults),
648 '_inherits': dict(cls._inherits),
649 '_depends': dict(cls._depends),
650 '_constraints': list(cls._constraints),
651 '_sql_constraints': list(cls._sql_constraints),
652 '_original_module': original_module,
654 cls = type(cls._name, (cls,), attrs)
656 # float fields are registry-dependent (digit attribute); duplicate them
658 for key, col in cls._columns.items():
659 if col._type == 'float':
660 cls._columns[key] = copy.copy(col)
662 # instantiate the model, and initialize it
663 model = object.__new__(cls)
664 model.__init__(pool, cr)
668 def _init_function_fields(cls, pool, cr):
669 # initialize the list of non-stored function fields for this model
670 pool._pure_function_fields[cls._name] = []
672 # process store of low-level function fields
673 for fname, column in cls._columns.iteritems():
674 if hasattr(column, 'digits_change'):
675 column.digits_change(cr)
676 # filter out existing store about this field
677 pool._store_function[cls._name] = [
679 for stored in pool._store_function.get(cls._name, [])
680 if (stored[0], stored[1]) != (cls._name, fname)
682 if not isinstance(column, fields.function):
685 # register it on the pool for invalidation
686 pool._pure_function_fields[cls._name].append(fname)
688 # process store parameter
691 get_ids = lambda self, cr, uid, ids, c={}: ids
692 store = {cls._name: (get_ids, None, column.priority, None)}
693 for model, spec in store.iteritems():
695 (fnct, fields2, order, length) = spec
697 (fnct, fields2, order) = spec
700 raise except_orm('Error',
701 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
702 pool._store_function.setdefault(model, [])
703 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
704 if t not in pool._store_function[model]:
705 pool._store_function[model].append(t)
706 pool._store_function[model].sort(key=lambda x: x[4])
709 def _init_manual_fields(cls, pool, cr):
710 # Check whether the query is already done
711 if pool.fields_by_model is not None:
712 manual_fields = pool.fields_by_model.get(cls._name, [])
714 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
715 manual_fields = cr.dictfetchall()
717 for field in manual_fields:
718 if field['name'] in cls._columns:
721 'string': field['field_description'],
722 'required': bool(field['required']),
723 'readonly': bool(field['readonly']),
724 'domain': eval(field['domain']) if field['domain'] else None,
725 'size': field['size'] or None,
726 'ondelete': field['on_delete'],
727 'translate': (field['translate']),
730 #'select': int(field['select_level'])
732 if field['serialization_field_id']:
733 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
734 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
735 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
736 attrs.update({'relation': field['relation']})
737 cls._columns[field['name']] = fields.sparse(**attrs)
738 elif field['ttype'] == 'selection':
739 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
740 elif field['ttype'] == 'reference':
741 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
742 elif field['ttype'] == 'many2one':
743 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
744 elif field['ttype'] == 'one2many':
745 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
746 elif field['ttype'] == 'many2many':
747 _rel1 = field['relation'].replace('.', '_')
748 _rel2 = field['model'].replace('.', '_')
749 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
750 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
752 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
755 def _init_constraints_onchanges(cls):
756 # store sql constraint error messages
757 for (key, _, msg) in cls._sql_constraints:
758 cls.pool._sql_error[cls._table + '_' + key] = msg
760 # collect constraint and onchange methods
761 cls._constraint_methods = []
762 cls._onchange_methods = defaultdict(list)
763 for attr, func in getmembers(cls, callable):
764 if hasattr(func, '_constrains'):
765 if not all(name in cls._fields for name in func._constrains):
766 _logger.warning("@constrains%r parameters must be field names", func._constrains)
767 cls._constraint_methods.append(func)
768 if hasattr(func, '_onchange'):
769 if not all(name in cls._fields for name in func._onchange):
770 _logger.warning("@onchange%r parameters must be field names", func._onchange)
771 for name in func._onchange:
772 cls._onchange_methods[name].append(func)
775 # In the past, this method was registering the model class in the server.
776 # This job is now done entirely by the metaclass MetaModel.
778 # Do not create an instance here. Model instances are created by method
782 def __init__(self, pool, cr):
783 """ Initialize a model and make it part of the given registry.
785 - copy the stored fields' functions in the registry,
786 - retrieve custom fields and add them in the model,
787 - ensure there is a many2one for each _inherits'd parent,
788 - update the children's _columns,
789 - give a chance to each field to initialize itself.
794 # link the class to the registry, and update the registry
796 cls._model = self # backward compatibility
797 pool.add(cls._name, self)
799 # determine description, table, sequence and log_access
800 if not cls._description:
801 cls._description = cls._name
803 cls._table = cls._name.replace('.', '_')
804 if not cls._sequence:
805 cls._sequence = cls._table + '_id_seq'
806 if not hasattr(cls, '_log_access'):
807 # If _log_access is not specified, it is the same value as _auto.
808 cls._log_access = cls._auto
811 if cls.is_transient():
812 cls._transient_check_count = 0
813 cls._transient_max_count = config.get('osv_memory_count_limit')
814 cls._transient_max_hours = config.get('osv_memory_age_limit')
815 assert cls._log_access, \
816 "TransientModels must have log_access turned on, " \
817 "in order to implement their access rights policy"
819 # retrieve new-style fields and duplicate them (to avoid clashes with
820 # inheritance between different models)
822 for attr, field in getmembers(cls, Field.__instancecheck__):
823 if not field._origin:
824 cls._add_field(attr, field.copy())
826 # introduce magic fields
827 cls._add_magic_fields()
829 # register stuff about low-level function fields and custom fields
830 cls._init_function_fields(pool, cr)
831 cls._init_manual_fields(pool, cr)
834 cls._inherits_check()
835 cls._inherits_reload()
837 # register constraints and onchange methods
838 cls._init_constraints_onchanges()
841 for k in cls._defaults:
842 assert k in cls._fields, \
843 "Model %s has a default for nonexiting field %s" % (cls._name, k)
846 for column in cls._columns.itervalues():
851 assert cls._rec_name in cls._fields, \
852 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
853 elif 'name' in cls._fields:
854 cls._rec_name = 'name'
856 # prepare ormcache, which must be shared by all instances of the model
861 def _is_an_ordinary_table(self):
862 self.env.cr.execute("""\
866 AND relkind = %s""", [self._table, 'r'])
867 return bool(self.env.cr.fetchone())
869 def __export_xml_id(self):
870 """ Return a valid xml_id for the record `self`. """
871 if not self._is_an_ordinary_table():
873 "You can not export the column ID of model %s, because the "
874 "table %s is not an ordinary table."
875 % (self._name, self._table))
876 ir_model_data = self.sudo().env['ir.model.data']
877 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
880 return '%s.%s' % (data[0].module, data[0].name)
885 name = '%s_%s' % (self._table, self.id)
886 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
888 name = '%s_%s_%s' % (self._table, self.id, postfix)
889 ir_model_data.create({
892 'module': '__export__',
895 return '__export__.' + name
898 def __export_rows(self, fields):
899 """ Export fields of the records in `self`.
901 :param fields: list of lists of fields to traverse
902 :return: list of lists of corresponding values
906 # main line of record, initially empty
907 current = [''] * len(fields)
908 lines.append(current)
910 # list of primary fields followed by secondary field(s)
913 # process column by column
914 for i, path in enumerate(fields):
919 if name in primary_done:
923 current[i] = str(record.id)
925 current[i] = record.__export_xml_id()
927 field = record._fields[name]
930 # this part could be simpler, but it has to be done this way
931 # in order to reproduce the former behavior
932 if not isinstance(value, BaseModel):
933 current[i] = field.convert_to_export(value, self.env)
935 primary_done.append(name)
937 # This is a special case, its strange behavior is intended!
938 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
939 xml_ids = [r.__export_xml_id() for r in value]
940 current[i] = ','.join(xml_ids) or False
943 # recursively export the fields that follow name
944 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
945 lines2 = value.__export_rows(fields2)
947 # merge first line with record's main line
948 for j, val in enumerate(lines2[0]):
951 # check value of current field
953 # assign xml_ids, and forget about remaining lines
954 xml_ids = [item[1] for item in value.name_get()]
955 current[i] = ','.join(xml_ids)
957 # append the other lines at the end
965 def export_data(self, fields_to_export, raw_data=False):
966 """ Export fields for selected objects
968 :param fields_to_export: list of fields
969 :param raw_data: True to return value in native Python type
970 :rtype: dictionary with a *datas* matrix
972 This method is used when exporting data via client menu
974 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
976 self = self.with_context(export_raw_data=True)
977 return {'datas': self.__export_rows(fields_to_export)}
979 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
982 Use :meth:`~load` instead
984 Import given data in given module
986 This method is used when importing data via client menu.
988 Example of fields to import for a sale.order::
991 partner_id, (=name_search)
992 order_line/.id, (=database_id)
994 order_line/product_id/id, (=xml id)
995 order_line/price_unit,
996 order_line/product_uom_qty,
997 order_line/product_uom/id (=xml_id)
999 This method returns a 4-tuple with the following structure::
1001 (return_code, errored_resource, error_message, unused)
1003 * The first item is a return code, it is ``-1`` in case of
1004 import error, or the last imported row number in case of success
1005 * The second item contains the record data dict that failed to import
1006 in case of error, otherwise it's 0
1007 * The third item contains an error message string in case of error,
1009 * The last item is currently unused, with no specific semantics
1011 :param fields: list of fields to import
1012 :param datas: data to import
1013 :param mode: 'init' or 'update' for record creation
1014 :param current_module: module name
1015 :param noupdate: flag for record creation
1016 :param filename: optional file to store partial import state for recovery
1017 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1018 :rtype: (int, dict or 0, str or 0, str or 0)
1020 context = dict(context) if context is not None else {}
1021 context['_import_current_module'] = current_module
1023 fields = map(fix_import_export_id_paths, fields)
1024 ir_model_data_obj = self.pool.get('ir.model.data')
1027 if m['type'] == 'error':
1028 raise Exception(m['message'])
1030 if config.get('import_partial') and filename:
1031 with open(config.get('import_partial'), 'rb') as partial_import_file:
1032 data = pickle.load(partial_import_file)
1033 position = data.get(filename, 0)
1037 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1038 self._extract_records(cr, uid, fields, datas,
1039 context=context, log=log),
1040 context=context, log=log):
1041 ir_model_data_obj._update(cr, uid, self._name,
1042 current_module, res, mode=mode, xml_id=xml_id,
1043 noupdate=noupdate, res_id=res_id, context=context)
1044 position = info.get('rows', {}).get('to', 0) + 1
1045 if config.get('import_partial') and filename and (not (position%100)):
1046 with open(config.get('import_partial'), 'rb') as partial_import:
1047 data = pickle.load(partial_import)
1048 data[filename] = position
1049 with open(config.get('import_partial'), 'wb') as partial_import:
1050 pickle.dump(data, partial_import)
1051 if context.get('defer_parent_store_computation'):
1052 self._parent_store_compute(cr)
1054 except Exception, e:
1056 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1058 if context.get('defer_parent_store_computation'):
1059 self._parent_store_compute(cr)
1060 return position, 0, 0, 0
1062 def load(self, cr, uid, fields, data, context=None):
1064 Attempts to load the data matrix, and returns a list of ids (or
1065 ``False`` if there was an error and no id could be generated) and a
1068 The ids are those of the records created and saved (in database), in
1069 the same order they were extracted from the file. They can be passed
1070 directly to :meth:`~read`
1072 :param fields: list of fields to import, at the same index as the corresponding data
1073 :type fields: list(str)
1074 :param data: row-major matrix of data to import
1075 :type data: list(list(str))
1076 :param dict context:
1077 :returns: {ids: list(int)|False, messages: [Message]}
1079 cr.execute('SAVEPOINT model_load')
1082 fields = map(fix_import_export_id_paths, fields)
1083 ModelData = self.pool['ir.model.data'].clear_caches()
1085 fg = self.fields_get(cr, uid, context=context)
1092 for id, xid, record, info in self._convert_records(cr, uid,
1093 self._extract_records(cr, uid, fields, data,
1094 context=context, log=messages.append),
1095 context=context, log=messages.append):
1097 cr.execute('SAVEPOINT model_load_save')
1098 except psycopg2.InternalError, e:
1099 # broken transaction, exit and hope the source error was
1101 if not any(message['type'] == 'error' for message in messages):
1102 messages.append(dict(info, type='error',message=
1103 u"Unknown database error: '%s'" % e))
1106 ids.append(ModelData._update(cr, uid, self._name,
1107 current_module, record, mode=mode, xml_id=xid,
1108 noupdate=noupdate, res_id=id, context=context))
1109 cr.execute('RELEASE SAVEPOINT model_load_save')
1110 except psycopg2.Warning, e:
1111 messages.append(dict(info, type='warning', message=str(e)))
1112 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1113 except psycopg2.Error, e:
1114 messages.append(dict(
1116 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1117 # Failed to write, log to messages, rollback savepoint (to
1118 # avoid broken transaction) and keep going
1119 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1120 except Exception, e:
1121 message = (_('Unknown error during import:') +
1122 ' %s: %s' % (type(e), unicode(e)))
1123 moreinfo = _('Resolve other errors first')
1124 messages.append(dict(info, type='error',
1127 # Failed for some reason, perhaps due to invalid data supplied,
1128 # rollback savepoint and keep going
1129 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1130 if any(message['type'] == 'error' for message in messages):
1131 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1133 return {'ids': ids, 'messages': messages}
1135 def _extract_records(self, cr, uid, fields_, data,
1136 context=None, log=lambda a: None):
1137 """ Generates record dicts from the data sequence.
1139 The result is a generator of dicts mapping field names to raw
1140 (unconverted, unvalidated) values.
1142 For relational fields, if sub-fields were provided the value will be
1143 a list of sub-records
1145 The following sub-fields may be set on the record (by key):
1146 * None is the name_get for the record (to use with name_create/name_search)
1147 * "id" is the External ID for the record
1148 * ".id" is the Database ID for the record
1150 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1151 # Fake columns to avoid special cases in extractor
1152 columns[None] = fields.char('rec_name')
1153 columns['id'] = fields.char('External ID')
1154 columns['.id'] = fields.integer('Database ID')
1156 # m2o fields can't be on multiple lines so exclude them from the
1157 # is_relational field rows filter, but special-case it later on to
1158 # be handled with relational fields (as it can have subfields)
1159 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1160 get_o2m_values = itemgetter_tuple(
1161 [index for index, field in enumerate(fields_)
1162 if columns[field[0]]._type == 'one2many'])
1163 get_nono2m_values = itemgetter_tuple(
1164 [index for index, field in enumerate(fields_)
1165 if columns[field[0]]._type != 'one2many'])
1166 # Checks if the provided row has any non-empty non-relational field
1167 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1168 return any(g(row)) and not any(f(row))
1172 if index >= len(data): return
1175 # copy non-relational fields to record dict
1176 record = dict((field[0], value)
1177 for field, value in itertools.izip(fields_, row)
1178 if not is_relational(field[0]))
1180 # Get all following rows which have relational values attached to
1181 # the current record (no non-relational values)
1182 record_span = itertools.takewhile(
1183 only_o2m_values, itertools.islice(data, index + 1, None))
1184 # stitch record row back on for relational fields
1185 record_span = list(itertools.chain([row], record_span))
1186 for relfield in set(
1187 field[0] for field in fields_
1188 if is_relational(field[0])):
1189 column = columns[relfield]
1190 # FIXME: how to not use _obj without relying on fields_get?
1191 Model = self.pool[column._obj]
1193 # get only cells for this sub-field, should be strictly
1194 # non-empty, field path [None] is for name_get column
1195 indices, subfields = zip(*((index, field[1:] or [None])
1196 for index, field in enumerate(fields_)
1197 if field[0] == relfield))
1199 # return all rows which have at least one value for the
1200 # subfields of relfield
1201 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1202 record[relfield] = [subrecord
1203 for subrecord, _subinfo in Model._extract_records(
1204 cr, uid, subfields, relfield_data,
1205 context=context, log=log)]
1207 yield record, {'rows': {
1209 'to': index + len(record_span) - 1
1211 index += len(record_span)
1213 def _convert_records(self, cr, uid, records,
1214 context=None, log=lambda a: None):
1215 """ Converts records from the source iterable (recursive dicts of
1216 strings) into forms which can be written to the database (via
1217 self.create or (ir.model.data)._update)
1219 :returns: a list of triplets of (id, xid, record)
1220 :rtype: list((int|None, str|None, dict))
1222 if context is None: context = {}
1223 Converter = self.pool['ir.fields.converter']
1224 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1225 Translation = self.pool['ir.translation']
1227 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1228 context.get('lang'))
1230 for f, column in columns.iteritems())
1232 convert = Converter.for_model(cr, uid, self, context=context)
1234 def _log(base, field, exception):
1235 type = 'warning' if isinstance(exception, Warning) else 'error'
1236 # logs the logical (not human-readable) field name for automated
1237 # processing of response, but injects human readable in message
1238 record = dict(base, type=type, field=field,
1239 message=unicode(exception.args[0]) % base)
1240 if len(exception.args) > 1 and exception.args[1]:
1241 record.update(exception.args[1])
1244 stream = CountingStream(records)
1245 for record, extras in stream:
1248 # name_get/name_create
1249 if None in record: pass
1256 dbid = int(record['.id'])
1258 # in case of overridden id column
1259 dbid = record['.id']
1260 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1263 record=stream.index,
1265 message=_(u"Unknown database identifier '%s'") % dbid))
1268 converted = convert(record, lambda field, err:\
1269 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1271 yield dbid, xid, converted, dict(extras, record=stream.index)
1274 def _validate_fields(self, field_names):
1275 field_names = set(field_names)
1277 # old-style constraint methods
1278 trans = self.env['ir.translation']
1279 cr, uid, context = self.env.args
1282 for fun, msg, names in self._constraints:
1284 # validation must be context-independent; call `fun` without context
1285 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1287 except Exception, e:
1288 _logger.debug('Exception while validating constraint', exc_info=True)
1290 extra_error = tools.ustr(e)
1293 res_msg = msg(self._model, cr, uid, ids, context=context)
1294 if isinstance(res_msg, tuple):
1295 template, params = res_msg
1296 res_msg = template % params
1298 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1300 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1302 _("Field(s) `%s` failed against a constraint: %s") %
1303 (', '.join(names), res_msg)
1306 raise ValidationError('\n'.join(errors))
1308 # new-style constraint methods
1309 for check in self._constraint_methods:
1310 if set(check._constrains) & field_names:
1313 except ValidationError, e:
1315 except Exception, e:
1316 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1318 def default_get(self, cr, uid, fields_list, context=None):
1319 """ default_get(fields) -> default_values
1321 Return default values for the fields in `fields_list`. Default
1322 values are determined by the context, user defaults, and the model
1325 :param fields_list: a list of field names
1326 :return: a dictionary mapping each field name to its corresponding
1327 default value; the keys of the dictionary are the fields in
1328 `fields_list` that have a default value different from ``False``.
1330 This method should not be overridden. In order to change the
1331 mechanism for determining default values, you should override method
1332 :meth:`add_default_value` instead.
1334 # trigger view init hook
1335 self.view_init(cr, uid, fields_list, context)
1337 # use a new record to determine default values; evaluate fields on the
1338 # new record and put default values in result
1339 record = self.new(cr, uid, {}, context=context)
1341 for name in fields_list:
1342 if name in self._fields:
1343 value = record[name]
1344 if name in record._cache:
1345 result[name] = value # it really is a default value
1347 # convert default values to the expected format
1348 result = self._convert_to_write(result)
1351 def add_default_value(self, field):
1352 """ Set the default value of `field` to the new record `self`.
1353 The value must be assigned to `self`.
1355 assert not self.id, "Expected new record: %s" % self
1356 cr, uid, context = self.env.args
1359 # 1. look up context
1360 key = 'default_' + name
1362 self[name] = context[key]
1365 # 2. look up ir_values
1366 # Note: performance is good, because get_defaults_dict is cached!
1367 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1368 if name in ir_values_dict:
1369 self[name] = ir_values_dict[name]
1372 # 3. look up property fields
1373 # TODO: get rid of this one
1374 column = self._columns.get(name)
1375 if isinstance(column, fields.property):
1376 self[name] = self.env['ir.property'].get(name, self._name)
1379 # 4. look up _defaults
1380 if name in self._defaults:
1381 value = self._defaults[name]
1383 value = value(self._model, cr, uid, context)
1387 # 5. delegate to field
1388 field.determine_default(self)
1390 def fields_get_keys(self, cr, user, context=None):
1391 res = self._columns.keys()
1392 # TODO I believe this loop can be replace by
1393 # res.extend(self._inherit_fields.key())
1394 for parent in self._inherits:
1395 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1398 def _rec_name_fallback(self, cr, uid, context=None):
1399 rec_name = self._rec_name
1400 if rec_name not in self._columns:
1401 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1405 # Overload this method if you need a window title which depends on the context
1407 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1410 def user_has_groups(self, cr, uid, groups, context=None):
1411 """Return true if the user is at least member of one of the groups
1412 in groups_str. Typically used to resolve `groups` attribute
1413 in view and model definitions.
1415 :param str groups: comma-separated list of fully-qualified group
1416 external IDs, e.g.: ``base.group_user,base.group_system``
1417 :return: True if the current user is a member of one of the
1420 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1421 for group_ext_id in groups.split(','))
1423 def _get_default_form_view(self, cr, user, context=None):
1424 """ Generates a default single-line form view using all fields
1425 of the current model except the m2m and o2m ones.
1427 :param cr: database cursor
1428 :param int user: user id
1429 :param dict context: connection context
1430 :returns: a form view as an lxml document
1431 :rtype: etree._Element
1433 view = etree.Element('form', string=self._description)
1434 group = etree.SubElement(view, 'group', col="4")
1435 for fname, field in self._fields.iteritems():
1436 if field.automatic or field.type in ('one2many', 'many2many'):
1439 etree.SubElement(group, 'field', name=fname)
1440 if field.type == 'text':
1441 etree.SubElement(group, 'newline')
1444 def _get_default_search_view(self, cr, user, context=None):
1445 """ Generates a single-field search view, based on _rec_name.
1447 :param cr: database cursor
1448 :param int user: user id
1449 :param dict context: connection context
1450 :returns: a tree view as an lxml document
1451 :rtype: etree._Element
1453 view = etree.Element('search', string=self._description)
1454 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1457 def _get_default_tree_view(self, cr, user, context=None):
1458 """ Generates a single-field tree view, based on _rec_name.
1460 :param cr: database cursor
1461 :param int user: user id
1462 :param dict context: connection context
1463 :returns: a tree view as an lxml document
1464 :rtype: etree._Element
1466 view = etree.Element('tree', string=self._description)
1467 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1470 def _get_default_calendar_view(self, cr, user, context=None):
1471 """ Generates a default calendar view by trying to infer
1472 calendar fields from a number of pre-set attribute names
1474 :param cr: database cursor
1475 :param int user: user id
1476 :param dict context: connection context
1477 :returns: a calendar view
1478 :rtype: etree._Element
1480 def set_first_of(seq, in_, to):
1481 """Sets the first value of `seq` also found in `in_` to
1482 the `to` attribute of the view being closed over.
1484 Returns whether it's found a suitable value (and set it on
1485 the attribute) or not
1493 view = etree.Element('calendar', string=self._description)
1494 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1496 if self._date_name not in self._columns:
1498 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1499 if dt in self._columns:
1500 self._date_name = dt
1505 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1506 view.set('date_start', self._date_name)
1508 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1509 self._columns, 'color')
1511 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1512 self._columns, 'date_stop'):
1513 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1514 self._columns, 'date_delay'):
1516 _('Invalid Object Architecture!'),
1517 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1521 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1522 """ fields_view_get([view_id | view_type='form'])
1524 Get the detailed composition of the requested view like fields, model, view architecture
1526 :param view_id: id of the view or None
1527 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1528 :param toolbar: true to include contextual actions
1529 :param submenu: deprecated
1530 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1531 :raise AttributeError:
1532 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1533 * if some tag other than 'position' is found in parent view
1534 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1538 View = self.pool['ir.ui.view']
1541 'model': self._name,
1542 'field_parent': False,
1545 # try to find a view_id if none provided
1547 # <view_type>_view_ref in context can be used to overrride the default view
1548 view_ref_key = view_type + '_view_ref'
1549 view_ref = context.get(view_ref_key)
1552 module, view_ref = view_ref.split('.', 1)
1553 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1554 view_ref_res = cr.fetchone()
1556 view_id = view_ref_res[0]
1558 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1559 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1563 # otherwise try to find the lowest priority matching ir.ui.view
1564 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1566 # context for post-processing might be overriden
1569 # read the view with inherited views applied
1570 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1571 result['arch'] = root_view['arch']
1572 result['name'] = root_view['name']
1573 result['type'] = root_view['type']
1574 result['view_id'] = root_view['id']
1575 result['field_parent'] = root_view['field_parent']
1576 # override context fro postprocessing
1577 if root_view.get('model') != self._name:
1578 ctx = dict(context, base_model_name=root_view.get('model'))
1580 # fallback on default views methods if no ir.ui.view could be found
1582 get_func = getattr(self, '_get_default_%s_view' % view_type)
1583 arch_etree = get_func(cr, uid, context)
1584 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1585 result['type'] = view_type
1586 result['name'] = 'default'
1587 except AttributeError:
1588 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1590 # Apply post processing, groups and modifiers etc...
1591 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1592 result['arch'] = xarch
1593 result['fields'] = xfields
1595 # Add related action information if aksed
1597 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1603 ir_values_obj = self.pool.get('ir.values')
1604 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1605 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1606 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1607 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1608 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1609 #When multi="True" set it will display only in More of the list view
1610 resrelate = [clean(action) for action in resrelate
1611 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1613 for x in itertools.chain(resprint, resaction, resrelate):
1614 x['string'] = x['name']
1616 result['toolbar'] = {
1618 'action': resaction,
1623 def get_formview_id(self, cr, uid, id, context=None):
1624 """ Return an view id to open the document with. This method is meant to be
1625 overridden in addons that want to give specific view ids for example.
1627 :param int id: id of the document to open
1631 def get_formview_action(self, cr, uid, id, context=None):
1632 """ Return an action to open the document. This method is meant to be
1633 overridden in addons that want to give specific view ids for example.
1635 :param int id: id of the document to open
1637 view_id = self.get_formview_id(cr, uid, id, context=context)
1639 'type': 'ir.actions.act_window',
1640 'res_model': self._name,
1641 'view_type': 'form',
1642 'view_mode': 'form',
1643 'views': [(view_id, 'form')],
1644 'target': 'current',
1648 def get_access_action(self, cr, uid, id, context=None):
1649 """ Return an action to open the document. This method is meant to be
1650 overridden in addons that want to give specific access to the document.
1651 By default it opens the formview of the document.
1653 :paramt int id: id of the document to open
1655 return self.get_formview_action(cr, uid, id, context=context)
1657 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1658 return self.pool['ir.ui.view'].postprocess_and_fields(
1659 cr, uid, self._name, node, view_id, context=context)
1661 def search_count(self, cr, user, args, context=None):
1662 """ search_count(args) -> int
1664 Returns the number of records in the current model matching :ref:`the
1665 provided domain <reference/orm/domains>`.
1667 res = self.search(cr, user, args, context=context, count=True)
1668 if isinstance(res, list):
1672 @api.returns('self')
1673 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1674 """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1676 Searches for records based on the ``args``
1677 :ref:`search domain <reference/orm/domains>`.
1679 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1680 list to match all records.
1681 :param int offset: number of results to ignore (default: none)
1682 :param int limit: maximum number of records to return (default: all)
1683 :param str order: sort string
1684 :param bool count: if ``True``, the call should return the number of
1685 records matching ``args`` rather than the records
1687 :returns: at most ``limit`` records matching the search criteria
1689 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1691 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1694 # display_name, name_get, name_create, name_search
1697 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1698 def _compute_display_name(self):
1699 names = dict(self.name_get())
1701 record.display_name = names.get(record.id, False)
1705 """ name_get() -> [(id, name), ...]
1707 Returns a textual representation for the records in ``self``.
1708 By default this is the value of the ``display_name`` field.
1710 :return: list of pairs ``(id, text_repr)`` for each records
1714 name = self._rec_name
1715 if name in self._fields:
1716 convert = self._fields[name].convert_to_display_name
1718 result.append((record.id, convert(record[name])))
1721 result.append((record.id, "%s,%s" % (record._name, record.id)))
1726 def name_create(self, name):
1727 """ name_create(name) -> record
1729 Create a new record by calling :meth:`~.create` with only one value
1730 provided: the display name of the new record.
1732 The new record will be initialized with any default values
1733 applicable to this model, or provided through the context. The usual
1734 behavior of :meth:`~.create` applies.
1736 :param name: display name of the record to create
1738 :return: the :meth:`~.name_get` pair value of the created record
1741 record = self.create({self._rec_name: name})
1742 return record.name_get()[0]
1744 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1748 def name_search(self, name='', args=None, operator='ilike', limit=100):
1749 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1751 Search for records that have a display name matching the given
1752 `name` pattern when compared with the given `operator`, while also
1753 matching the optional search domain (`args`).
1755 This is used for example to provide suggestions based on a partial
1756 value for a relational field. Sometimes be seen as the inverse
1757 function of :meth:`~.name_get`, but it is not guaranteed to be.
1759 This method is equivalent to calling :meth:`~.search` with a search
1760 domain based on ``display_name`` and then :meth:`~.name_get` on the
1761 result of the search.
1763 :param str name: the name pattern to match
1764 :param list args: optional search domain (see :meth:`~.search` for
1765 syntax), specifying further restrictions
1766 :param str operator: domain operator for matching `name`, such as
1767 ``'like'`` or ``'='``.
1768 :param int limit: optional max number of records to return
1770 :return: list of pairs ``(id, text_repr)`` for all matching records.
1772 return self._name_search(name, args, operator, limit=limit)
1774 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1775 # private implementation of name_search, allows passing a dedicated user
1776 # for the name_get part to solve some access rights issues
1777 args = list(args or [])
1778 # optimize out the default criterion of ``ilike ''`` that matches everything
1779 if not self._rec_name:
1780 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1781 elif not (name == '' and operator == 'ilike'):
1782 args += [(self._rec_name, operator, name)]
1783 access_rights_uid = name_get_uid or user
1784 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1785 res = self.name_get(cr, access_rights_uid, ids, context)
1788 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1791 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1793 fields = self._columns.keys() + self._inherit_fields.keys()
1794 #FIXME: collect all calls to _get_source into one SQL call.
1796 res[lang] = {'code': lang}
1798 if f in self._columns:
1799 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1801 res[lang][f] = res_trans
1803 res[lang][f] = self._columns[f].string
1804 for table in self._inherits:
1805 cols = intersect(self._inherit_fields.keys(), fields)
1806 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1809 res[lang]['code'] = lang
1810 for f in res2[lang]:
1811 res[lang][f] = res2[lang][f]
1814 def write_string(self, cr, uid, id, langs, vals, context=None):
1815 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1816 #FIXME: try to only call the translation in one SQL
1819 if field in self._columns:
1820 src = self._columns[field].string
1821 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1822 for table in self._inherits:
1823 cols = intersect(self._inherit_fields.keys(), vals)
1825 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1828 def _add_missing_default_values(self, cr, uid, values, context=None):
1829 # avoid overriding inherited values when parent is set
1831 for tables, parent_field in self._inherits.items():
1832 if parent_field in values:
1833 avoid_tables.append(tables)
1835 # compute missing fields
1836 missing_defaults = set()
1837 for field in self._columns.keys():
1838 if not field in values:
1839 missing_defaults.add(field)
1840 for field in self._inherit_fields.keys():
1841 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1842 missing_defaults.add(field)
1843 # discard magic fields
1844 missing_defaults -= set(MAGIC_COLUMNS)
1846 if missing_defaults:
1847 # override defaults with the provided values, never allow the other way around
1848 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1850 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1851 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1852 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1853 defaults[dv] = [(6, 0, defaults[dv])]
1854 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1855 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1856 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1857 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1858 defaults.update(values)
1862 def clear_caches(self):
1863 """ Clear the caches
1865 This clears the caches associated to methods decorated with
1866 ``tools.ormcache`` or ``tools.ormcache_multi``.
1869 self._ormcache.clear()
1870 self.pool._any_cache_cleared = True
1871 except AttributeError:
1875 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1876 aggregated_fields, count_field,
1877 read_group_result, read_group_order=None, context=None):
1878 """Helper method for filling in empty groups for all possible values of
1879 the field being grouped by"""
1881 # self._group_by_full should map groupable fields to a method that returns
1882 # a list of all aggregated values that we want to display for this field,
1883 # in the form of a m2o-like pair (key,label).
1884 # This is useful to implement kanban views for instance, where all columns
1885 # should be displayed even if they don't contain any record.
1887 # Grab the list of all groups that should be displayed, including all present groups
1888 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1889 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1890 read_group_order=read_group_order,
1891 access_rights_uid=openerp.SUPERUSER_ID,
1894 result_template = dict.fromkeys(aggregated_fields, False)
1895 result_template[groupby + '_count'] = 0
1896 if remaining_groupbys:
1897 result_template['__context'] = {'group_by': remaining_groupbys}
1899 # Merge the left_side (current results as dicts) with the right_side (all
1900 # possible values as m2o pairs). Both lists are supposed to be using the
1901 # same ordering, and can be merged in one pass.
1904 def append_left(left_side):
1905 grouped_value = left_side[groupby] and left_side[groupby][0]
1906 if not grouped_value in known_values:
1907 result.append(left_side)
1908 known_values[grouped_value] = left_side
1910 known_values[grouped_value].update({count_field: left_side[count_field]})
1911 def append_right(right_side):
1912 grouped_value = right_side[0]
1913 if not grouped_value in known_values:
1914 line = dict(result_template)
1915 line[groupby] = right_side
1916 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1918 known_values[grouped_value] = line
1919 while read_group_result or all_groups:
1920 left_side = read_group_result[0] if read_group_result else None
1921 right_side = all_groups[0] if all_groups else None
1922 assert left_side is None or left_side[groupby] is False \
1923 or isinstance(left_side[groupby], (tuple,list)), \
1924 'M2O-like pair expected, got %r' % left_side[groupby]
1925 assert right_side is None or isinstance(right_side, (tuple,list)), \
1926 'M2O-like pair expected, got %r' % right_side
1927 if left_side is None:
1928 append_right(all_groups.pop(0))
1929 elif right_side is None:
1930 append_left(read_group_result.pop(0))
1931 elif left_side[groupby] == right_side:
1932 append_left(read_group_result.pop(0))
1933 all_groups.pop(0) # discard right_side
1934 elif not left_side[groupby] or not left_side[groupby][0]:
1935 # left side == "Undefined" entry, not present on right_side
1936 append_left(read_group_result.pop(0))
1938 append_right(all_groups.pop(0))
1942 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1945 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1947 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1948 to the query if order should be computed against m2o field.
1949 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1950 :param aggregated_fields: list of aggregated fields in the query
1951 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1952 These dictionaries contains the qualified name of each groupby
1953 (fully qualified SQL name for the corresponding field),
1954 and the (non raw) field name.
1955 :param osv.Query query: the query under construction
1956 :return: (groupby_terms, orderby_terms)
1959 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1960 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1962 return groupby_terms, orderby_terms
1964 self._check_qorder(orderby)
1965 for order_part in orderby.split(','):
1966 order_split = order_part.split()
1967 order_field = order_split[0]
1968 if order_field in groupby_fields:
1970 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1971 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1973 orderby_terms.append(order_clause)
1974 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1976 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1977 orderby_terms.append(order)
1978 elif order_field in aggregated_fields:
1979 orderby_terms.append(order_part)
1981 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1982 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1983 self._name, order_part)
1984 return groupby_terms, orderby_terms
1986 def _read_group_process_groupby(self, gb, query, context):
1988 Helper method to collect important information about groupbys: raw
1989 field name, type, time informations, qualified name, ...
1991 split = gb.split(':')
1992 field_type = self._all_columns[split[0]].column._type
1993 gb_function = split[1] if len(split) == 2 else None
1994 temporal = field_type in ('date', 'datetime')
1995 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1996 qualified_field = self._inherits_join_calc(split[0], query)
1999 'day': 'dd MMM YYYY',
2000 'week': "'W'w YYYY",
2001 'month': 'MMMM YYYY',
2002 'quarter': 'QQQ YYYY',
2006 'day': dateutil.relativedelta.relativedelta(days=1),
2007 'week': datetime.timedelta(days=7),
2008 'month': dateutil.relativedelta.relativedelta(months=1),
2009 'quarter': dateutil.relativedelta.relativedelta(months=3),
2010 'year': dateutil.relativedelta.relativedelta(years=1)
2013 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
2014 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
2015 if field_type == 'boolean':
2016 qualified_field = "coalesce(%s,false)" % qualified_field
2021 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2022 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2023 'tz_convert': tz_convert,
2024 'qualified_field': qualified_field
2027 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2029 Helper method to sanitize the data received by read_group. The None
2030 values are converted to False, and the date/datetime are formatted,
2031 and corrected according to the timezones.
2033 value = False if value is None else value
2034 gb = groupby_dict.get(key)
2035 if gb and gb['type'] in ('date', 'datetime') and value:
2036 if isinstance(value, basestring):
2037 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2038 value = datetime.datetime.strptime(value, dt_format)
2039 if gb['tz_convert']:
2040 value = pytz.timezone(context['tz']).localize(value)
2043 def _read_group_get_domain(self, groupby, value):
2045 Helper method to construct the domain corresponding to a groupby and
2046 a given value. This is mostly relevant for date/datetime.
2048 if groupby['type'] in ('date', 'datetime') and value:
2049 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2050 domain_dt_begin = value
2051 domain_dt_end = value + groupby['interval']
2052 if groupby['tz_convert']:
2053 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2054 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2055 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2056 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2057 if groupby['type'] == 'many2one' and value:
2059 return [(groupby['field'], '=', value)]
2061 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2063 Helper method to format the data contained in the dictianary data by
2064 adding the domain corresponding to its values, the groupbys in the
2065 context and by properly formatting the date/datetime values.
2067 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2068 for k,v in data.iteritems():
2069 gb = groupby_dict.get(k)
2070 if gb and gb['type'] in ('date', 'datetime') and v:
2071 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2073 data['__domain'] = domain_group + domain
2074 if len(groupby) - len(annotated_groupbys) >= 1:
2075 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2079 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2081 Get the list of records in list view grouped by the given ``groupby`` fields
2083 :param cr: database cursor
2084 :param uid: current user id
2085 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2086 :param list fields: list of fields present in the list view specified on the object
2087 :param list groupby: list of groupby descriptions by which the records will be grouped.
2088 A groupby description is either a field (then it will be grouped by that field)
2089 or a string 'field:groupby_function'. Right now, the only functions supported
2090 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2091 date/datetime fields.
2092 :param int offset: optional number of records to skip
2093 :param int limit: optional max number of records to return
2094 :param dict context: context arguments, like lang, time zone.
2095 :param list orderby: optional ``order by`` specification, for
2096 overriding the natural sort ordering of the
2097 groups, see also :py:meth:`~osv.osv.osv.search`
2098 (supported only for many2one fields currently)
2099 :param bool lazy: if true, the results are only grouped by the first groupby and the
2100 remaining groupbys are put in the __context key. If false, all the groupbys are
2102 :return: list of dictionaries(one dictionary for each record) containing:
2104 * the values of fields grouped by the fields in ``groupby`` argument
2105 * __domain: list of tuples specifying the search criteria
2106 * __context: dictionary with argument like ``groupby``
2107 :rtype: [{'field_name_1': value, ...]
2108 :raise AccessError: * if user has no read rights on the requested object
2109 * if user tries to bypass access rules for read on the requested object
2113 self.check_access_rights(cr, uid, 'read')
2114 query = self._where_calc(cr, uid, domain, context=context)
2115 fields = fields or self._columns.keys()
2117 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2118 groupby_list = groupby[:1] if lazy else groupby
2119 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2120 for gb in groupby_list]
2121 groupby_fields = [g['field'] for g in annotated_groupbys]
2122 order = orderby or ','.join([g for g in groupby_list])
2123 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2125 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2126 for gb in groupby_fields:
2127 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2128 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2129 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2130 if not (gb in self._all_columns):
2131 # Don't allow arbitrary values, as this would be a SQL injection vector!
2132 raise except_orm(_('Invalid group_by'),
2133 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2135 aggregated_fields = [
2137 if f not in ('id', 'sequence')
2138 if f not in groupby_fields
2139 if f in self._all_columns
2140 if self._all_columns[f].column._type in ('integer', 'float')
2141 if getattr(self._all_columns[f].column, '_classic_write')]
2143 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2144 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2146 for gb in annotated_groupbys:
2147 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2149 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2150 from_clause, where_clause, where_clause_params = query.get_sql()
2151 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2152 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2155 count_field += '_count'
2157 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2158 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2161 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2169 'table': self._table,
2170 'count_field': count_field,
2171 'extra_fields': prefix_terms(',', select_terms),
2172 'from': from_clause,
2173 'where': prefix_term('WHERE', where_clause),
2174 'groupby': prefix_terms('GROUP BY', groupby_terms),
2175 'orderby': prefix_terms('ORDER BY', orderby_terms),
2176 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2177 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2179 cr.execute(query, where_clause_params)
2180 fetched_data = cr.dictfetchall()
2182 if not groupby_fields:
2185 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2187 data_ids = [r['id'] for r in fetched_data]
2188 many2onefields = list(set(many2onefields))
2189 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2190 for d in fetched_data:
2191 d.update(data_dict[d['id']])
2193 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2194 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2195 if lazy and groupby_fields[0] in self._group_by_full:
2196 # Right now, read_group only fill results in lazy mode (by default).
2197 # If you need to have the empty groups in 'eager' mode, then the
2198 # method _read_group_fill_results need to be completely reimplemented
2200 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2201 aggregated_fields, count_field, result, read_group_order=order,
2205 def _inherits_join_add(self, current_model, parent_model_name, query):
2207 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2208 :param current_model: current model object
2209 :param parent_model_name: name of the parent model for which the clauses should be added
2210 :param query: query object on which the JOIN should be added
2212 inherits_field = current_model._inherits[parent_model_name]
2213 parent_model = self.pool[parent_model_name]
2214 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2217 def _inherits_join_calc(self, field, query):
2219 Adds missing table select and join clause(s) to ``query`` for reaching
2220 the field coming from an '_inherits' parent table (no duplicates).
2222 :param field: name of inherited field to reach
2223 :param query: query object on which the JOIN should be added
2224 :return: qualified name of field, to be used in SELECT clause
2226 current_table = self
2227 parent_alias = '"%s"' % current_table._table
2228 while field in current_table._inherit_fields and not field in current_table._columns:
2229 parent_model_name = current_table._inherit_fields[field][0]
2230 parent_table = self.pool[parent_model_name]
2231 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2232 current_table = parent_table
2233 return '%s."%s"' % (parent_alias, field)
2235 def _parent_store_compute(self, cr):
2236 if not self._parent_store:
2238 _logger.info('Computing parent left and right for table %s...', self._table)
2239 def browse_rec(root, pos=0):
2241 where = self._parent_name+'='+str(root)
2243 where = self._parent_name+' IS NULL'
2244 if self._parent_order:
2245 where += ' order by '+self._parent_order
2246 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2248 for id in cr.fetchall():
2249 pos2 = browse_rec(id[0], pos2)
2250 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2252 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2253 if self._parent_order:
2254 query += ' order by ' + self._parent_order
2257 for (root,) in cr.fetchall():
2258 pos = browse_rec(root, pos)
2259 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2262 def _update_store(self, cr, f, k):
2263 _logger.info("storing computed values of fields.function '%s'", k)
2264 ss = self._columns[k]._symbol_set
2265 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2266 cr.execute('select id from '+self._table)
2267 ids_lst = map(lambda x: x[0], cr.fetchall())
2269 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2270 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2271 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2272 for key, val in res.items():
2275 # if val is a many2one, just write the ID
2276 if type(val) == tuple:
2278 if val is not False:
2279 cr.execute(update_query, (ss[1](val), key))
2281 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2282 """Raise except_orm if value is not among the valid values for the selection field"""
2283 if self._columns[field]._type == 'reference':
2284 val_model, val_id_str = value.split(',', 1)
2287 val_id = long(val_id_str)
2291 raise except_orm(_('ValidateError'),
2292 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2296 if isinstance(self._columns[field].selection, (tuple, list)):
2297 if val in dict(self._columns[field].selection):
2299 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2301 raise except_orm(_('ValidateError'),
2302 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
2304 def _check_removed_columns(self, cr, log=False):
2305 # iterate on the database columns to drop the NOT NULL constraints
2306 # of fields which were required but have been removed (or will be added by another module)
2307 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2308 columns += MAGIC_COLUMNS
2309 cr.execute("SELECT a.attname, a.attnotnull"
2310 " FROM pg_class c, pg_attribute a"
2311 " WHERE c.relname=%s"
2312 " AND c.oid=a.attrelid"
2313 " AND a.attisdropped=%s"
2314 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2315 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2317 for column in cr.dictfetchall():
2319 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2320 column['attname'], self._table, self._name)
2321 if column['attnotnull']:
2322 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2323 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2324 self._table, column['attname'])
2326 def _save_constraint(self, cr, constraint_name, type):
2328 Record the creation of a constraint for this model, to make it possible
2329 to delete it later when the module is uninstalled. Type can be either
2330 'f' or 'u' depending on the constraint being a foreign key or not.
2332 if not self._module:
2333 # no need to save constraints for custom models as they're not part
2336 assert type in ('f', 'u')
2338 SELECT 1 FROM ir_model_constraint, ir_module_module
2339 WHERE ir_model_constraint.module=ir_module_module.id
2340 AND ir_model_constraint.name=%s
2341 AND ir_module_module.name=%s
2342 """, (constraint_name, self._module))
2345 INSERT INTO ir_model_constraint
2346 (name, date_init, date_update, module, model, type)
2347 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2348 (SELECT id FROM ir_module_module WHERE name=%s),
2349 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2350 (constraint_name, self._module, self._name, type))
2352 def _save_relation_table(self, cr, relation_table):
2354 Record the creation of a many2many for this model, to make it possible
2355 to delete it later when the module is uninstalled.
2358 SELECT 1 FROM ir_model_relation, ir_module_module
2359 WHERE ir_model_relation.module=ir_module_module.id
2360 AND ir_model_relation.name=%s
2361 AND ir_module_module.name=%s
2362 """, (relation_table, self._module))
2364 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2365 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2366 (SELECT id FROM ir_module_module WHERE name=%s),
2367 (SELECT id FROM ir_model WHERE model=%s))""",
2368 (relation_table, self._module, self._name))
2369 self.invalidate_cache(cr, SUPERUSER_ID)
2371 # checked version: for direct m2o starting from `self`
2372 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2373 assert self.is_transient() or not dest_model.is_transient(), \
2374 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2375 if self.is_transient() and not dest_model.is_transient():
2376 # TransientModel relationships to regular Models are annoying
2377 # usually because they could block deletion due to the FKs.
2378 # So unless stated otherwise we default them to ondelete=cascade.
2379 ondelete = ondelete or 'cascade'
2380 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2381 self._foreign_keys.add(fk_def)
2382 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2384 # unchecked version: for custom cases, such as m2m relationships
2385 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2386 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2387 self._foreign_keys.add(fk_def)
2388 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2390 def _drop_constraint(self, cr, source_table, constraint_name):
2391 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2393 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2394 # Find FK constraint(s) currently established for the m2o field,
2395 # and see whether they are stale or not
2396 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2397 cl2.relname as foreign_table
2398 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2399 pg_attribute as att1, pg_attribute as att2
2400 WHERE con.conrelid = cl1.oid
2401 AND cl1.relname = %s
2402 AND con.confrelid = cl2.oid
2403 AND array_lower(con.conkey, 1) = 1
2404 AND con.conkey[1] = att1.attnum
2405 AND att1.attrelid = cl1.oid
2406 AND att1.attname = %s
2407 AND array_lower(con.confkey, 1) = 1
2408 AND con.confkey[1] = att2.attnum
2409 AND att2.attrelid = cl2.oid
2410 AND att2.attname = %s
2411 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2412 constraints = cr.dictfetchall()
2414 if len(constraints) == 1:
2415 # Is it the right constraint?
2417 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2418 or cons['foreign_table'] != dest_model._table:
2419 # Wrong FK: drop it and recreate
2420 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2421 source_table, cons['constraint_name'])
2422 self._drop_constraint(cr, source_table, cons['constraint_name'])
2424 # it's all good, nothing to do!
2427 # Multiple FKs found for the same field, drop them all, and re-create
2428 for cons in constraints:
2429 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2430 source_table, cons['constraint_name'])
2431 self._drop_constraint(cr, source_table, cons['constraint_name'])
2433 # (re-)create the FK
2434 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2437 def _set_default_value_on_column(self, cr, column_name, context=None):
2438 # ideally should use add_default_value but fails
2439 # due to ir.values not being ready
2441 # get old-style default
2442 default = self._defaults.get(column_name)
2443 if callable(default):
2444 default = default(self, cr, SUPERUSER_ID, context)
2446 # get new_style default if no old-style
2448 record = self.new(cr, SUPERUSER_ID, context=context)
2449 field = self._fields[column_name]
2450 field.determine_default(record)
2451 defaults = dict(record._cache)
2452 if column_name in defaults:
2453 default = field.convert_to_write(defaults[column_name])
2455 column = self._columns[column_name]
2456 ss = column._symbol_set
2457 db_default = ss[1](default)
2458 # Write default if non-NULL, except for booleans for which False means
2459 # the same as NULL - this saves us an expensive query on large tables.
2460 write_default = (db_default is not None if column._type != 'boolean'
2463 _logger.debug("Table '%s': setting default value of new column %s to %r",
2464 self._table, column_name, default)
2465 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2466 self._table, column_name, ss[0], column_name)
2467 cr.execute(query, (db_default,))
2468 # this is a disgrace
2471 def _auto_init(self, cr, context=None):
2474 Call _field_create and, unless _auto is False:
2476 - create the corresponding table in database for the model,
2477 - possibly add the parent columns in database,
2478 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2479 'write_date' in database if _log_access is True (the default),
2480 - report on database columns no more existing in _columns,
2481 - remove no more existing not null constraints,
2482 - alter existing database columns to match _columns,
2483 - create database tables to match _columns,
2484 - add database indices to match _columns,
2485 - save in self._foreign_keys a list a foreign keys to create (see
2489 self._foreign_keys = set()
2490 raise_on_invalid_object_name(self._name)
2493 store_compute = False
2494 stored_fields = [] # new-style stored fields with compute
2496 update_custom_fields = context.get('update_custom_fields', False)
2497 self._field_create(cr, context=context)
2498 create = not self._table_exist(cr)
2502 self._create_table(cr)
2505 cr.execute('SELECT min(id) FROM "%s"' % (self._table,))
2506 has_rows = cr.fetchone()[0] is not None
2509 if self._parent_store:
2510 if not self._parent_columns_exist(cr):
2511 self._create_parent_columns(cr)
2512 store_compute = True
2514 self._check_removed_columns(cr, log=False)
2516 # iterate on the "object columns"
2517 column_data = self._select_column_data(cr)
2519 for k, f in self._columns.iteritems():
2520 if k == 'id': # FIXME: maybe id should be a regular column?
2522 # Don't update custom (also called manual) fields
2523 if f.manual and not update_custom_fields:
2526 if isinstance(f, fields.one2many):
2527 self._o2m_raise_on_missing_reference(cr, f)
2529 elif isinstance(f, fields.many2many):
2530 self._m2m_raise_or_create_relation(cr, f)
2533 res = column_data.get(k)
2535 # The field is not found as-is in database, try if it
2536 # exists with an old name.
2537 if not res and hasattr(f, 'oldname'):
2538 res = column_data.get(f.oldname)
2540 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2542 column_data[k] = res
2543 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2544 self._table, f.oldname, k)
2546 # The field already exists in database. Possibly
2547 # change its type, rename it, drop it or change its
2550 f_pg_type = res['typname']
2551 f_pg_size = res['size']
2552 f_pg_notnull = res['attnotnull']
2553 if isinstance(f, fields.function) and not f.store and\
2554 not getattr(f, 'nodrop', False):
2555 _logger.info('column %s (%s) converted to a function, removed from table %s',
2556 k, f.string, self._table)
2557 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2559 _schema.debug("Table '%s': dropped column '%s' with cascade",
2563 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2568 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2569 ('varchar', 'text', 'TEXT', ''),
2570 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2571 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2572 ('timestamp', 'date', 'date', '::date'),
2573 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2574 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2576 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2578 with cr.savepoint():
2579 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2580 except psycopg2.NotSupportedError:
2581 # In place alter table cannot be done because a view is depending of this field.
2582 # Do a manual copy. This will drop the view (that will be recreated later)
2583 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2584 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2585 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2586 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2588 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2589 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2591 if (f_pg_type==c[0]) and (f._type==c[1]):
2592 if f_pg_type != f_obj_type:
2594 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2595 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2596 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2597 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2599 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2600 self._table, k, c[0], c[1])
2603 if f_pg_type != f_obj_type:
2607 newname = k + '_moved' + str(i)
2608 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2609 "WHERE c.relname=%s " \
2610 "AND a.attname=%s " \
2611 "AND c.oid=a.attrelid ", (self._table, newname))
2612 if not cr.fetchone()[0]:
2616 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2617 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2618 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2619 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2620 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2621 self._table, k, f_pg_type, f._type, newname)
2623 # if the field is required and hasn't got a NOT NULL constraint
2624 if f.required and f_pg_notnull == 0:
2626 self._set_default_value_on_column(cr, k, context=context)
2627 # add the NOT NULL constraint
2629 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2631 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2634 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2635 "If you want to have it, you should update the records and execute manually:\n"\
2636 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2637 _schema.warning(msg, self._table, k, self._table, k)
2639 elif not f.required and f_pg_notnull == 1:
2640 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2642 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2645 indexname = '%s_%s_index' % (self._table, k)
2646 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2647 res2 = cr.dictfetchall()
2648 if not res2 and f.select:
2649 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2651 if f._type == 'text':
2652 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2653 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2654 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2655 " because there is a length limit for indexable btree values!\n"\
2656 "Use a search view instead if you simply want to make the field searchable."
2657 _schema.warning(msg, self._table, f._type, k)
2658 if res2 and not f.select:
2659 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2661 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2662 _schema.debug(msg, self._table, k, f._type)
2664 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2665 dest_model = self.pool[f._obj]
2666 if dest_model._auto and dest_model._table != 'ir_actions':
2667 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2669 # The field doesn't exist in database. Create it if necessary.
2671 if not isinstance(f, fields.function) or f.store:
2672 # add the missing field
2673 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2674 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2675 _schema.debug("Table '%s': added column '%s' with definition=%s",
2676 self._table, k, get_pg_type(f)[1])
2680 self._set_default_value_on_column(cr, k, context=context)
2682 # remember the functions to call for the stored fields
2683 if isinstance(f, fields.function):
2685 if f.store is not True: # i.e. if f.store is a dict
2686 order = f.store[f.store.keys()[0]][2]
2687 todo_end.append((order, self._update_store, (f, k)))
2689 # remember new-style stored fields with compute method
2690 if k in self._fields and self._fields[k].depends:
2691 stored_fields.append(self._fields[k])
2693 # and add constraints if needed
2694 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2695 if f._obj not in self.pool:
2696 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2697 dest_model = self.pool[f._obj]
2698 ref = dest_model._table
2699 # ir_actions is inherited so foreign key doesn't work on it
2700 if dest_model._auto and ref != 'ir_actions':
2701 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2703 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2707 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2708 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2711 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2712 "Try to re-run: openerp-server --update=module\n"\
2713 "If it doesn't work, update records and execute manually:\n"\
2714 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2715 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2719 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2720 create = not bool(cr.fetchone())
2722 cr.commit() # start a new transaction
2725 self._add_sql_constraints(cr)
2728 self._execute_sql(cr)
2731 self._parent_store_compute(cr)
2735 # trigger computation of new-style stored fields with a compute
2737 _logger.info("Storing computed values of %s fields %s",
2738 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2739 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2740 recs = recs.search([])
2742 map(recs._recompute_todo, stored_fields)
2745 todo_end.append((1000, func, ()))
2749 def _auto_end(self, cr, context=None):
2750 """ Create the foreign keys recorded by _auto_init. """
2751 for t, k, r, d in self._foreign_keys:
2752 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2753 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2755 del self._foreign_keys
2758 def _table_exist(self, cr):
2759 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2763 def _create_table(self, cr):
2764 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2765 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2766 _schema.debug("Table '%s': created", self._table)
2769 def _parent_columns_exist(self, cr):
2770 cr.execute("""SELECT c.relname
2771 FROM pg_class c, pg_attribute a
2772 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2773 """, (self._table, 'parent_left'))
2777 def _create_parent_columns(self, cr):
2778 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2779 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2780 if 'parent_left' not in self._columns:
2781 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2783 _schema.debug("Table '%s': added column '%s' with definition=%s",
2784 self._table, 'parent_left', 'INTEGER')
2785 elif not self._columns['parent_left'].select:
2786 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2788 if 'parent_right' not in self._columns:
2789 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2791 _schema.debug("Table '%s': added column '%s' with definition=%s",
2792 self._table, 'parent_right', 'INTEGER')
2793 elif not self._columns['parent_right'].select:
2794 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2796 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2797 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2798 self._parent_name, self._name)
2803 def _select_column_data(self, cr):
2804 # attlen is the number of bytes necessary to represent the type when
2805 # the type has a fixed size. If the type has a varying size attlen is
2806 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2807 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2808 "FROM pg_class c,pg_attribute a,pg_type t " \
2809 "WHERE c.relname=%s " \
2810 "AND c.oid=a.attrelid " \
2811 "AND a.atttypid=t.oid", (self._table,))
2812 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2815 def _o2m_raise_on_missing_reference(self, cr, f):
2816 # TODO this check should be a method on fields.one2many.
2817 if f._obj in self.pool:
2818 other = self.pool[f._obj]
2819 # TODO the condition could use fields_get_keys().
2820 if f._fields_id not in other._columns.keys():
2821 if f._fields_id not in other._inherit_fields.keys():
2822 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2824 def _m2m_raise_or_create_relation(self, cr, f):
2825 m2m_tbl, col1, col2 = f._sql_names(self)
2826 # do not create relations for custom fields as they do not belong to a module
2827 # they will be automatically removed when dropping the corresponding ir.model.field
2828 # table name for custom relation all starts with x_, see __init__
2829 if not m2m_tbl.startswith('x_'):
2830 self._save_relation_table(cr, m2m_tbl)
2831 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2832 if not cr.dictfetchall():
2833 if f._obj not in self.pool:
2834 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2835 dest_model = self.pool[f._obj]
2836 ref = dest_model._table
2837 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2838 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2839 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2840 if not cr.fetchall():
2841 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2842 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2843 if not cr.fetchall():
2844 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2846 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2847 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2848 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2850 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2853 def _add_sql_constraints(self, cr):
2856 Modify this model's database table constraints so they match the one in
2860 def unify_cons_text(txt):
2861 return txt.lower().replace(', ',',').replace(' (','(')
2863 for (key, con, _) in self._sql_constraints:
2864 conname = '%s_%s' % (self._table, key)
2866 self._save_constraint(cr, conname, 'u')
2867 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2868 existing_constraints = cr.dictfetchall()
2872 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2873 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2874 self._table, conname, con),
2875 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2880 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2881 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2882 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2888 if not existing_constraints:
2889 # constraint does not exists:
2890 sql_actions['add']['execute'] = True
2891 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2892 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2893 # constraint exists but its definition has changed:
2894 sql_actions['drop']['execute'] = True
2895 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2896 sql_actions['add']['execute'] = True
2897 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2899 # we need to add the constraint:
2900 sql_actions = [item for item in sql_actions.values()]
2901 sql_actions.sort(key=lambda x: x['order'])
2902 for sql_action in [action for action in sql_actions if action['execute']]:
2904 cr.execute(sql_action['query'])
2906 _schema.debug(sql_action['msg_ok'])
2908 _schema.warning(sql_action['msg_err'])
2912 def _execute_sql(self, cr):
2913 """ Execute the SQL code from the _sql attribute (if any)."""
2914 if hasattr(self, "_sql"):
2915 for line in self._sql.split(';'):
2916 line2 = line.replace('\n', '').strip()
2922 # Update objects that uses this one to update their _inherits fields
2926 def _inherits_reload_src(cls):
2927 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2928 for model in cls.pool.values():
2929 if cls._name in model._inherits:
2930 model._inherits_reload()
2933 def _inherits_reload(cls):
2934 """ Recompute the _inherit_fields mapping.
2936 This will also call itself on each inherits'd child model.
2940 for table in cls._inherits:
2941 other = cls.pool[table]
2942 for col in other._columns.keys():
2943 res[col] = (table, cls._inherits[table], other._columns[col], table)
2944 for col in other._inherit_fields.keys():
2945 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2946 cls._inherit_fields = res
2947 cls._all_columns = cls._get_column_infos()
2949 # interface columns with new-style fields
2950 for attr, column in cls._columns.items():
2951 if attr not in cls._fields:
2952 cls._add_field(attr, column.to_field())
2954 # interface inherited fields with new-style fields (note that the
2955 # reverse order is for being consistent with _all_columns above)
2956 for parent_model, parent_field in reversed(cls._inherits.items()):
2957 for attr, field in cls.pool[parent_model]._fields.iteritems():
2958 if attr not in cls._fields:
2959 cls._add_field(attr, field.copy(
2960 related=(parent_field, attr),
2965 cls._inherits_reload_src()
2968 def _get_column_infos(cls):
2969 """Returns a dict mapping all fields names (direct fields and
2970 inherited field via _inherits) to a ``column_info`` struct
2971 giving detailed columns """
2973 # do not inverse for loops, since local fields may hide inherited ones!
2974 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2975 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2976 for k, col in cls._columns.iteritems():
2977 result[k] = fields.column_info(k, col)
2981 def _inherits_check(cls):
2982 for table, field_name in cls._inherits.items():
2983 if field_name not in cls._columns:
2984 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2985 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2986 required=True, ondelete="cascade")
2987 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2988 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2989 cls._columns[field_name].required = True
2990 cls._columns[field_name].ondelete = "cascade"
2992 # reflect fields with delegate=True in dictionary cls._inherits
2993 for field in cls._fields.itervalues():
2994 if field.type == 'many2one' and not field.related and field.delegate:
2995 if not field.required:
2996 _logger.warning("Field %s with delegate=True must be required.", field)
2997 field.required = True
2998 if field.ondelete.lower() not in ('cascade', 'restrict'):
2999 field.ondelete = 'cascade'
3000 cls._inherits[field.comodel_name] = field.name
3003 def _prepare_setup_fields(self):
3004 """ Prepare the setup of fields once the models have been loaded. """
3005 for field in self._fields.itervalues():
3009 def _setup_fields(self, partial=False):
3010 """ Setup the fields (dependency triggers, etc). """
3011 for field in self._fields.itervalues():
3012 if partial and field.manual and \
3013 field.relational and \
3014 (field.comodel_name not in self.pool or \
3015 (field.type == 'one2many' and field.inverse_name not in self.pool[field.comodel_name]._fields)):
3016 # do not set up manual fields that refer to unknown models
3018 field.setup(self.env)
3020 # group fields by compute to determine field.computed_fields
3021 fields_by_compute = defaultdict(list)
3022 for field in self._fields.itervalues():
3024 field.computed_fields = fields_by_compute[field.compute]
3025 field.computed_fields.append(field)
3027 field.computed_fields = []
3029 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3030 """ fields_get([fields])
3032 Return the definition of each field.
3034 The returned value is a dictionary (indiced by field name) of
3035 dictionaries. The _inherits'd fields are included. The string, help,
3036 and selection (if present) attributes are translated.
3038 :param cr: database cursor
3039 :param user: current user id
3040 :param allfields: list of fields
3041 :param context: context arguments, like lang, time zone
3042 :return: dictionary of field dictionaries, each one describing a field of the business object
3043 :raise AccessError: * if user has no create/write rights on the requested object
3046 recs = self.browse(cr, user, [], context)
3049 for fname, field in self._fields.iteritems():
3050 if allfields and fname not in allfields:
3052 if field.groups and not recs.user_has_groups(field.groups):
3054 res[fname] = field.get_description(recs.env)
3056 # if user cannot create or modify records, make all fields readonly
3057 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3058 if not (has_access('write') or has_access('create')):
3059 for description in res.itervalues():
3060 description['readonly'] = True
3061 description['states'] = {}
3065 def get_empty_list_help(self, cr, user, help, context=None):
3066 """ Generic method giving the help message displayed when having
3067 no result to display in a list or kanban view. By default it returns
3068 the help given in parameter that is generally the help message
3069 defined in the action.
3073 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3075 Check the user access rights on the given fields. This raises Access
3076 Denied if the user does not have the rights. Otherwise it returns the
3077 fields (as is if the fields is not falsy, or the readable/writable
3078 fields if fields is falsy).
3080 if user == SUPERUSER_ID:
3081 return fields or list(self._fields)
3084 """ determine whether user has access to field `fname` """
3085 field = self._fields.get(fname)
3086 if field and field.groups:
3087 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3092 fields = filter(valid, self._fields)
3094 invalid_fields = set(filter(lambda name: not valid(name), fields))
3096 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3097 operation, user, self._name, ', '.join(invalid_fields))
3099 _('The requested operation cannot be completed due to security restrictions. '
3100 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3101 (self._description, operation))
3105 # add explicit old-style implementation to read()
3107 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3108 records = self.browse(cr, user, ids, context)
3109 result = BaseModel.read(records, fields, load=load)
3110 return result if isinstance(ids, list) else (bool(result) and result[0])
3112 # new-style implementation of read()
3114 def read(self, fields=None, load='_classic_read'):
3117 Reads the requested fields for the records in `self`, low-level/RPC
3118 method. In Python code, prefer :meth:`~.browse`.
3120 :param fields: list of field names to return (default is all fields)
3121 :return: a list of dictionaries mapping field names to their values,
3122 with one dictionary per record
3123 :raise AccessError: if user has no read rights on some of the given
3126 # check access rights
3127 self.check_access_rights('read')
3128 fields = self.check_field_access_rights('read', fields)
3130 # split fields into stored and computed fields
3131 stored, computed = [], []
3133 if name in self._columns:
3135 elif name in self._fields:
3136 computed.append(name)
3138 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3140 # fetch stored fields from the database to the cache
3141 self._read_from_database(stored)
3143 # retrieve results from records; this takes values from the cache and
3144 # computes remaining fields
3146 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3147 use_name_get = (load == '_classic_read')
3150 values = {'id': record.id}
3151 for name, field in name_fields:
3152 values[name] = field.convert_to_read(record[name], use_name_get)
3153 result.append(values)
3154 except MissingError:
3160 def _prefetch_field(self, field):
3161 """ Read from the database in order to fetch `field` (:class:`Field`
3162 instance) for `self` in cache.
3164 # fetch the records of this model without field_name in their cache
3165 records = self._in_cache_without(field)
3167 if len(records) > PREFETCH_MAX:
3168 records = records[:PREFETCH_MAX] | self
3170 # by default, simply fetch field
3171 fnames = {field.name}
3173 if self.env.in_draft:
3174 # we may be doing an onchange, do not prefetch other fields
3176 elif self.env.field_todo(field):
3177 # field must be recomputed, do not prefetch records to recompute
3178 records -= self.env.field_todo(field)
3179 elif not self._context.get('prefetch_fields', True):
3180 # do not prefetch other fields
3182 elif self._columns[field.name]._prefetch:
3183 # here we can optimize: prefetch all classic and many2one fields
3185 for fname, fcolumn in self._columns.iteritems()
3186 if fcolumn._prefetch
3187 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3190 # fetch records with read()
3191 assert self in records and field.name in fnames
3194 result = records.read(list(fnames), load='_classic_write')
3198 # check the cache, and update it if necessary
3199 if not self._cache.contains(field):
3200 for values in result:
3201 record = self.browse(values.pop('id'))
3202 record._cache.update(record._convert_to_cache(values, validate=False))
3203 if not self._cache.contains(field):
3204 e = AccessError("No value found for %s.%s" % (self, field.name))
3205 self._cache[field] = FailedValue(e)
3208 def _read_from_database(self, field_names):
3209 """ Read the given fields of the records in `self` from the database,
3210 and store them in cache. Access errors are also stored in cache.
3213 cr, user, context = env.args
3215 # FIXME: The query construction needs to be rewritten using the internal Query
3216 # object, as in search(), to avoid ambiguous column references when
3217 # reading/sorting on a table that is auto_joined to another table with
3218 # common columns (e.g. the magical columns)
3220 # Construct a clause for the security rules.
3221 # 'tables' holds the list of tables necessary for the SELECT, including
3222 # the ir.rule clauses, and contains at least self._table.
3223 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3225 # determine the fields that are stored as columns in self._table
3226 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3228 # we need fully-qualified column names in case len(tables) > 1
3230 if isinstance(self._columns.get(f), fields.binary) and \
3231 context.get('bin_size_%s' % f, context.get('bin_size')):
3232 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3233 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3235 return '%s."%s"' % (self._table, f)
3236 qual_names = map(qualify, set(fields_pre + ['id']))
3238 query = """ SELECT %(qual_names)s FROM %(tables)s
3239 WHERE %(table)s.id IN %%s AND (%(extra)s)
3242 'qual_names': ",".join(qual_names),
3243 'tables': ",".join(tables),
3244 'table': self._table,
3245 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3246 'order': self._parent_order or self._order,
3250 for sub_ids in cr.split_for_in_conditions(self.ids):
3251 cr.execute(query, [tuple(sub_ids)] + rule_params)
3252 result.extend(cr.dictfetchall())
3254 ids = [vals['id'] for vals in result]
3257 # translate the fields if necessary
3258 if context.get('lang'):
3259 ir_translation = env['ir.translation']
3260 for f in fields_pre:
3261 if self._columns[f].translate:
3262 #TODO: optimize out of this loop
3263 res_trans = ir_translation._get_ids(
3264 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3266 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3268 # apply the symbol_get functions of the fields we just read
3269 for f in fields_pre:
3270 symbol_get = self._columns[f]._symbol_get
3273 vals[f] = symbol_get(vals[f])
3275 # store result in cache for POST fields
3277 record = self.browse(vals['id'])
3278 record._cache.update(record._convert_to_cache(vals, validate=False))
3280 # determine the fields that must be processed now
3281 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3283 # Compute POST fields, grouped by multi
3284 by_multi = defaultdict(list)
3285 for f in fields_post:
3286 by_multi[self._columns[f]._multi].append(f)
3288 for multi, fs in by_multi.iteritems():
3290 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3291 assert res2 is not None, \
3292 'The function field "%s" on the "%s" model returned None\n' \
3293 '(a dictionary was expected).' % (fs[0], self._name)
3295 # TOCHECK : why got string instend of dict in python2.6
3296 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3297 multi_fields = res2.get(vals['id'], {})
3300 vals[f] = multi_fields.get(f, [])
3303 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3306 vals[f] = res2[vals['id']]
3310 # Warn about deprecated fields now that fields_pre and fields_post are computed
3311 for f in field_names:
3312 column = self._columns[f]
3313 if column.deprecated:
3314 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3316 # store result in cache
3318 record = self.browse(vals.pop('id'))
3319 record._cache.update(record._convert_to_cache(vals, validate=False))
3321 # store failed values in cache for the records that could not be read
3322 fetched = self.browse(ids)
3323 missing = self - fetched
3325 extras = fetched - self
3328 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3329 ', '.join(map(repr, missing._ids)),
3330 ', '.join(map(repr, extras._ids)),
3332 # store an access error exception in existing records
3334 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3335 (self._name, 'read')
3337 forbidden = missing.exists()
3338 forbidden._cache.update(FailedValue(exc))
3339 # store a missing error exception in non-existing records
3341 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3343 (missing - forbidden)._cache.update(FailedValue(exc))
3346 def get_metadata(self):
3348 Returns some metadata about the given records.
3350 :return: list of ownership dictionaries for each requested record
3351 :rtype: list of dictionaries with the following keys:
3354 * create_uid: user who created the record
3355 * create_date: date when the record was created
3356 * write_uid: last user who changed the record
3357 * write_date: date of the last change to the record
3358 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3361 if self._log_access:
3362 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3363 quoted_table = '"%s"' % self._table
3364 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3365 query = '''SELECT %s, __imd.module, __imd.name
3366 FROM %s LEFT JOIN ir_model_data __imd
3367 ON (__imd.model = %%s and __imd.res_id = %s.id)
3368 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3369 self._cr.execute(query, (self._name, tuple(self.ids)))
3370 res = self._cr.dictfetchall()
3372 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3373 names = dict(self.env['res.users'].browse(uids).name_get())
3377 value = r[key] = r[key] or False
3378 if key in ('write_uid', 'create_uid') and value in names:
3379 r[key] = (value, names[value])
3380 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3381 del r['name'], r['module']
3384 def _check_concurrency(self, cr, ids, context):
3387 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3389 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3390 for sub_ids in cr.split_for_in_conditions(ids):
3393 id_ref = "%s,%s" % (self._name, id)
3394 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3396 ids_to_check.extend([id, update_date])
3397 if not ids_to_check:
3399 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3402 # mention the first one only to keep the error message readable
3403 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3405 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3406 """Verify the returned rows after applying record rules matches
3407 the length of `ids`, and raise an appropriate exception if it does not.
3411 ids, result_ids = set(ids), set(result_ids)
3412 missing_ids = ids - result_ids
3414 # Attempt to distinguish record rule restriction vs deleted records,
3415 # to provide a more specific error message - check if the missinf
3416 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3417 forbidden_ids = [x[0] for x in cr.fetchall()]
3419 # the missing ids are (at least partially) hidden by access rules
3420 if uid == SUPERUSER_ID:
3422 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3423 raise except_orm(_('Access Denied'),
3424 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3425 (self._description, operation))
3427 # If we get here, the missing_ids are not in the database
3428 if operation in ('read','unlink'):
3429 # No need to warn about deleting an already deleted record.
3430 # And no error when reading a record that was deleted, to prevent spurious
3431 # errors for non-transactional search/read sequences coming from clients
3433 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3434 raise except_orm(_('Missing document(s)'),
3435 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3438 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3439 """Verifies that the operation given by ``operation`` is allowed for the user
3440 according to the access rights."""
3441 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3443 def check_access_rule(self, cr, uid, ids, operation, context=None):
3444 """Verifies that the operation given by ``operation`` is allowed for the user
3445 according to ir.rules.
3447 :param operation: one of ``write``, ``unlink``
3448 :raise except_orm: * if current ir.rules do not permit this operation.
3449 :return: None if the operation is allowed
3451 if uid == SUPERUSER_ID:
3454 if self.is_transient():
3455 # Only one single implicit access rule for transient models: owner only!
3456 # This is ok to hardcode because we assert that TransientModels always
3457 # have log_access enabled so that the create_uid column is always there.
3458 # And even with _inherits, these fields are always present in the local
3459 # table too, so no need for JOINs.
3460 cr.execute("""SELECT distinct create_uid
3462 WHERE id IN %%s""" % self._table, (tuple(ids),))
3463 uids = [x[0] for x in cr.fetchall()]
3464 if len(uids) != 1 or uids[0] != uid:
3465 raise except_orm(_('Access Denied'),
3466 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3468 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3470 where_clause = ' and ' + ' and '.join(where_clause)
3471 for sub_ids in cr.split_for_in_conditions(ids):
3472 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3473 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3474 [sub_ids] + where_params)
3475 returned_ids = [x['id'] for x in cr.dictfetchall()]
3476 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3478 def create_workflow(self, cr, uid, ids, context=None):
3479 """Create a workflow instance for each given record IDs."""
3480 from openerp import workflow
3482 workflow.trg_create(uid, self._name, res_id, cr)
3483 # self.invalidate_cache(cr, uid, context=context) ?
3486 def delete_workflow(self, cr, uid, ids, context=None):
3487 """Delete the workflow instances bound to the given record IDs."""
3488 from openerp import workflow
3490 workflow.trg_delete(uid, self._name, res_id, cr)
3491 self.invalidate_cache(cr, uid, context=context)
3494 def step_workflow(self, cr, uid, ids, context=None):
3495 """Reevaluate the workflow instances of the given record IDs."""
3496 from openerp import workflow
3498 workflow.trg_write(uid, self._name, res_id, cr)
3499 # self.invalidate_cache(cr, uid, context=context) ?
3502 def signal_workflow(self, cr, uid, ids, signal, context=None):
3503 """Send given workflow signal and return a dict mapping ids to workflow results"""
3504 from openerp import workflow
3507 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3508 # self.invalidate_cache(cr, uid, context=context) ?
3511 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3512 """ Rebind the workflow instance bound to the given 'old' record IDs to
3513 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3515 from openerp import workflow
3516 for old_id, new_id in old_new_ids:
3517 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3518 self.invalidate_cache(cr, uid, context=context)
3521 def unlink(self, cr, uid, ids, context=None):
3524 Deletes the records of the current set
3526 :raise AccessError: * if user has no unlink rights on the requested object
3527 * if user tries to bypass access rules for unlink on the requested object
3528 :raise UserError: if the record is default property for other records
3533 if isinstance(ids, (int, long)):
3536 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3538 # for recomputing new-style fields
3539 recs = self.browse(cr, uid, ids, context)
3540 recs.modified(self._fields)
3542 self._check_concurrency(cr, ids, context)
3544 self.check_access_rights(cr, uid, 'unlink')
3546 ir_property = self.pool.get('ir.property')
3548 # Check if the records are used as default properties.
3549 domain = [('res_id', '=', False),
3550 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3552 if ir_property.search(cr, uid, domain, context=context):
3553 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3555 # Delete the records' properties.
3556 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3557 ir_property.unlink(cr, uid, property_ids, context=context)
3559 self.delete_workflow(cr, uid, ids, context=context)
3561 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3562 pool_model_data = self.pool.get('ir.model.data')
3563 ir_values_obj = self.pool.get('ir.values')
3564 ir_attachment_obj = self.pool.get('ir.attachment')
3565 for sub_ids in cr.split_for_in_conditions(ids):
3566 cr.execute('delete from ' + self._table + ' ' \
3567 'where id IN %s', (sub_ids,))
3569 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3570 # as these are not connected with real database foreign keys, and would be dangling references.
3571 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3572 # to avoid possible side-effects during admin calls.
3573 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3574 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3575 # Step 2. Marching towards the real deletion of referenced records
3577 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3579 # For the same reason, removing the record relevant to ir_values
3580 ir_value_ids = ir_values_obj.search(cr, uid,
3581 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3584 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3586 # For the same reason, removing the record relevant to ir_attachment
3587 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3588 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3589 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3590 if ir_attachment_ids:
3591 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3593 # invalidate the *whole* cache, since the orm does not handle all
3594 # changes made in the database, like cascading delete!
3595 recs.invalidate_cache()
3597 for order, obj_name, store_ids, fields in result_store:
3598 if obj_name == self._name:
3599 effective_store_ids = set(store_ids) - set(ids)
3601 effective_store_ids = store_ids
3602 if effective_store_ids:
3603 obj = self.pool[obj_name]
3604 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3605 rids = map(lambda x: x[0], cr.fetchall())
3607 obj._store_set_values(cr, uid, rids, fields, context)
3609 # recompute new-style fields
3618 def write(self, vals):
3621 Updates all records in the current set with the provided values.
3623 :param dict vals: fields to update and the value to set on them e.g::
3625 {'foo': 1, 'bar': "Qux"}
3627 will set the field ``foo`` to ``1`` and the field ``bar`` to
3628 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3630 :raise AccessError: * if user has no write rights on the requested object
3631 * if user tries to bypass access rules for write on the requested object
3632 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3633 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3635 .. _openerp/models/relationals/format:
3637 .. note:: Relational fields use a special "commands" format to manipulate their values
3639 This format is a list of command triplets executed sequentially,
3640 possible command triplets are:
3642 ``(0, _, values: dict)``
3643 links to a new record created from the provided values
3644 ``(1, id, values: dict)``
3645 updates the already-linked record of id ``id`` with the
3648 unlinks and deletes the linked record of id ``id``
3650 unlinks the linked record of id ``id`` without deleting it
3652 links to an existing record of id ``id``
3654 unlinks all records in the relation, equivalent to using
3655 the command ``3`` on every linked record
3657 replaces the existing list of linked records by the provoded
3658 ones, equivalent to using ``5`` then ``4`` for each id in
3661 (in command triplets, ``_`` values are ignored and can be
3662 anything, generally ``0`` or ``False``)
3664 Any command can be used on :class:`~openerp.fields.Many2many`,
3665 only ``0``, ``1`` and ``2`` can be used on
3666 :class:`~openerp.fields.One2many`.
3671 self._check_concurrency(self._ids)
3672 self.check_access_rights('write')
3674 # No user-driven update of these columns
3675 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3676 vals.pop(field, None)
3678 # split up fields into old-style and pure new-style ones
3679 old_vals, new_vals, unknown = {}, {}, []
3680 for key, val in vals.iteritems():
3681 if key in self._columns:
3683 elif key in self._fields:
3689 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3691 # write old-style fields with (low-level) method _write
3693 self._write(old_vals)
3695 # put the values of pure new-style fields into cache, and inverse them
3698 record._cache.update(record._convert_to_cache(new_vals, update=True))
3699 for key in new_vals:
3700 self._fields[key].determine_inverse(self)
3704 def _write(self, cr, user, ids, vals, context=None):
3705 # low-level implementation of write()
3710 self.check_field_access_rights(cr, user, 'write', vals.keys())
3711 deleted_related = defaultdict(list)
3712 for field in vals.keys():
3714 if field in self._columns:
3715 fobj = self._columns[field]
3716 elif field in self._inherit_fields:
3717 fobj = self._inherit_fields[field][2]
3720 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3721 for wtuple in vals[field]:
3722 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3723 deleted_related[fobj._obj].append(wtuple[1])
3728 for group in groups:
3729 module = group.split(".")[0]
3730 grp = group.split(".")[1]
3731 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3732 (grp, module, 'res.groups', user))
3733 readonly = cr.fetchall()
3734 if readonly[0][0] >= 1:
3741 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3743 # for recomputing new-style fields
3744 recs = self.browse(cr, user, ids, context)
3745 modified_fields = list(vals)
3746 if self._log_access:
3747 modified_fields += ['write_date', 'write_uid']
3748 recs.modified(modified_fields)
3750 parents_changed = []
3751 parent_order = self._parent_order or self._order
3752 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3753 # The parent_left/right computation may take up to
3754 # 5 seconds. No need to recompute the values if the
3755 # parent is the same.
3756 # Note: to respect parent_order, nodes must be processed in
3757 # order, so ``parents_changed`` must be ordered properly.
3758 parent_val = vals[self._parent_name]
3760 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3761 (self._table, self._parent_name, self._parent_name, parent_order)
3762 cr.execute(query, (tuple(ids), parent_val))
3764 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3765 (self._table, self._parent_name, parent_order)
3766 cr.execute(query, (tuple(ids),))
3767 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3774 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3776 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3777 if field_column and field_column.deprecated:
3778 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3779 if field in self._columns:
3780 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3781 if (not totranslate) or not self._columns[field].translate:
3782 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3783 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3784 direct.append(field)
3786 upd_todo.append(field)
3788 updend.append(field)
3789 if field in self._columns \
3790 and hasattr(self._columns[field], 'selection') \
3792 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3794 if self._log_access:
3795 upd0.append('write_uid=%s')
3796 upd0.append("write_date=(now() at time zone 'UTC')")
3800 self.check_access_rule(cr, user, ids, 'write', context=context)
3801 for sub_ids in cr.split_for_in_conditions(ids):
3802 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3803 'where id IN %s', upd1 + [sub_ids])
3804 if cr.rowcount != len(sub_ids):
3805 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3810 if self._columns[f].translate:
3811 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3814 # Inserting value to DB
3815 context_wo_lang = dict(context, lang=None)
3816 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3817 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3819 # call the 'set' method of fields which are not classic_write
3820 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3822 # default element in context must be removed when call a one2many or many2many
3823 rel_context = context.copy()
3824 for c in context.items():
3825 if c[0].startswith('default_'):
3826 del rel_context[c[0]]
3828 for field in upd_todo:
3830 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3832 unknown_fields = updend[:]
3833 for table in self._inherits:
3834 col = self._inherits[table]
3836 for sub_ids in cr.split_for_in_conditions(ids):
3837 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3838 'where id IN %s', (sub_ids,))
3839 nids.extend([x[0] for x in cr.fetchall()])
3843 if self._inherit_fields[val][0] == table:
3845 unknown_fields.remove(val)
3847 self.pool[table].write(cr, user, nids, v, context)
3851 'No such field(s) in model %s: %s.',
3852 self._name, ', '.join(unknown_fields))
3854 # check Python constraints
3855 recs._validate_fields(vals)
3857 # TODO: use _order to set dest at the right position and not first node of parent
3858 # We can't defer parent_store computation because the stored function
3859 # fields that are computer may refer (directly or indirectly) to
3860 # parent_left/right (via a child_of domain)
3863 self.pool._init_parent[self._name] = True
3865 order = self._parent_order or self._order
3866 parent_val = vals[self._parent_name]
3868 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3870 clause, params = '%s IS NULL' % (self._parent_name,), ()
3872 for id in parents_changed:
3873 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3874 pleft, pright = cr.fetchone()
3875 distance = pright - pleft + 1
3877 # Positions of current siblings, to locate proper insertion point;
3878 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3879 # after each update, in case several nodes are sequentially inserted one
3880 # next to the other (i.e computed incrementally)
3881 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3882 parents = cr.fetchall()
3884 # Find Position of the element
3886 for (parent_pright, parent_id) in parents:
3889 position = parent_pright and parent_pright + 1 or 1
3891 # It's the first node of the parent
3896 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3897 position = cr.fetchone()[0] + 1
3899 if pleft < position <= pright:
3900 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3902 if pleft < position:
3903 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3904 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3905 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3907 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3908 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3909 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3910 recs.invalidate_cache(['parent_left', 'parent_right'])
3912 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3915 # for recomputing new-style fields
3916 recs.modified(modified_fields)
3919 for order, model_name, ids_to_update, fields_to_recompute in result:
3920 key = (model_name, tuple(fields_to_recompute))
3921 done.setdefault(key, {})
3922 # avoid to do several times the same computation
3924 for id in ids_to_update:
3925 if id not in done[key]:
3926 done[key][id] = True
3927 if id not in deleted_related[model_name]:
3929 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3931 # recompute new-style fields
3932 if context.get('recompute', True):
3935 self.step_workflow(cr, user, ids, context=context)
3939 # TODO: Should set perm to user.xxx
3942 @api.returns('self', lambda value: value.id)
3943 def create(self, vals):
3944 """ create(vals) -> record
3946 Creates a new record for the model.
3948 The new record is initialized using the values from ``vals`` and
3949 if necessary those from :meth:`~.default_get`.
3952 values for the model's fields, as a dictionary::
3954 {'field_name': field_value, ...}
3956 see :meth:`~.write` for details
3957 :return: new record created
3958 :raise AccessError: * if user has no create rights on the requested object
3959 * if user tries to bypass access rules for create on the requested object
3960 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3961 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3963 self.check_access_rights('create')
3965 # add missing defaults, and drop fields that may not be set by user
3966 vals = self._add_missing_default_values(vals)
3967 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3968 vals.pop(field, None)
3970 # split up fields into old-style and pure new-style ones
3971 old_vals, new_vals, unknown = {}, {}, []
3972 for key, val in vals.iteritems():
3973 if key in self._all_columns:
3975 elif key in self._fields:
3981 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3983 # create record with old-style fields
3984 record = self.browse(self._create(old_vals))
3986 # put the values of pure new-style fields into cache, and inverse them
3987 record._cache.update(record._convert_to_cache(new_vals))
3988 for key in new_vals:
3989 self._fields[key].determine_inverse(record)
3993 def _create(self, cr, user, vals, context=None):
3994 # low-level implementation of create()
3998 if self.is_transient():
3999 self._transient_vacuum(cr, user)
4002 for v in self._inherits:
4003 if self._inherits[v] not in vals:
4006 tocreate[v] = {'id': vals[self._inherits[v]]}
4009 # list of column assignments defined as tuples like:
4010 # (column_name, format_string, column_value)
4011 # (column_name, sql_formula)
4012 # Those tuples will be used by the string formatting for the INSERT
4014 ('id', "nextval('%s')" % self._sequence),
4019 for v in vals.keys():
4020 if v in self._inherit_fields and v not in self._columns:
4021 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4022 tocreate[table][v] = vals[v]
4025 if (v not in self._inherit_fields) and (v not in self._columns):
4027 unknown_fields.append(v)
4030 'No such field(s) in model %s: %s.',
4031 self._name, ', '.join(unknown_fields))
4033 for table in tocreate:
4034 if self._inherits[table] in vals:
4035 del vals[self._inherits[table]]
4037 record_id = tocreate[table].pop('id', None)
4039 if record_id is None or not record_id:
4040 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4042 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4044 updates.append((self._inherits[table], '%s', record_id))
4046 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4047 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4049 for bool_field in bool_fields:
4050 if bool_field not in vals:
4051 vals[bool_field] = False
4053 for field in vals.keys():
4055 if field in self._columns:
4056 fobj = self._columns[field]
4058 fobj = self._inherit_fields[field][2]
4064 for group in groups:
4065 module = group.split(".")[0]
4066 grp = group.split(".")[1]
4067 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4068 (grp, module, 'res.groups', user))
4069 readonly = cr.fetchall()
4070 if readonly[0][0] >= 1:
4073 elif readonly[0][0] == 0:
4081 current_field = self._columns[field]
4082 if current_field._classic_write:
4083 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4085 #for the function fields that receive a value, we set them directly in the database
4086 #(they may be required), but we also need to trigger the _fct_inv()
4087 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4088 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4089 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4090 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4091 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4092 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4093 #after the release but, definitively, the behavior shouldn't be different for related and function
4095 upd_todo.append(field)
4097 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4098 #related. See the above TODO comment for further explanations.
4099 if not isinstance(current_field, fields.related):
4100 upd_todo.append(field)
4101 if field in self._columns \
4102 and hasattr(current_field, 'selection') \
4104 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4105 if self._log_access:
4106 updates.append(('create_uid', '%s', user))
4107 updates.append(('write_uid', '%s', user))
4108 updates.append(('create_date', "(now() at time zone 'UTC')"))
4109 updates.append(('write_date', "(now() at time zone 'UTC')"))
4111 # the list of tuples used in this formatting corresponds to
4112 # tuple(field_name, format, value)
4113 # In some case, for example (id, create_date, write_date) we does not
4114 # need to read the third value of the tuple, because the real value is
4115 # encoded in the second value (the format).
4117 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4119 ', '.join('"%s"' % u[0] for u in updates),
4120 ', '.join(u[1] for u in updates)
4122 tuple([u[2] for u in updates if len(u) > 2])
4125 id_new, = cr.fetchone()
4126 recs = self.browse(cr, user, id_new, context)
4127 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4129 if self._parent_store and not context.get('defer_parent_store_computation'):
4131 self.pool._init_parent[self._name] = True
4133 parent = vals.get(self._parent_name, False)
4135 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4137 result_p = cr.fetchall()
4138 for (pleft,) in result_p:
4143 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4144 pleft_old = cr.fetchone()[0]
4147 cr.execute('select max(parent_right) from '+self._table)
4148 pleft = cr.fetchone()[0] or 0
4149 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4150 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4151 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4152 recs.invalidate_cache(['parent_left', 'parent_right'])
4154 # default element in context must be remove when call a one2many or many2many
4155 rel_context = context.copy()
4156 for c in context.items():
4157 if c[0].startswith('default_'):
4158 del rel_context[c[0]]
4161 for field in upd_todo:
4162 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4164 # check Python constraints
4165 recs._validate_fields(vals)
4167 # invalidate and mark new-style fields to recompute
4168 modified_fields = list(vals)
4169 if self._log_access:
4170 modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
4171 recs.modified(modified_fields)
4173 if context.get('recompute', True):
4174 result += self._store_get_values(cr, user, [id_new],
4175 list(set(vals.keys() + self._inherits.values())),
4179 for order, model_name, ids, fields2 in result:
4180 if not (model_name, ids, fields2) in done:
4181 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4182 done.append((model_name, ids, fields2))
4183 # recompute new-style fields
4186 if self._log_create and context.get('recompute', True):
4187 message = self._description + \
4189 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4190 "' " + _("created.")
4191 self.log(cr, user, id_new, message, True, context=context)
4193 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4194 self.create_workflow(cr, user, [id_new], context=context)
4197 def _store_get_values(self, cr, uid, ids, fields, context):
4198 """Returns an ordered list of fields.function to call due to
4199 an update operation on ``fields`` of records with ``ids``,
4200 obtained by calling the 'store' triggers of these fields,
4201 as setup by their 'store' attribute.
4203 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4205 if fields is None: fields = []
4206 stored_functions = self.pool._store_function.get(self._name, [])
4208 # use indexed names for the details of the stored_functions:
4209 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4211 # only keep store triggers that should be triggered for the ``fields``
4213 triggers_to_compute = (
4214 f for f in stored_functions
4215 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4219 target_id_results = {}
4220 for store_trigger in triggers_to_compute:
4221 target_func_id_ = id(store_trigger[target_ids_func_])
4222 if target_func_id_ not in target_id_results:
4223 # use admin user for accessing objects having rules defined on store fields
4224 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4225 target_ids = target_id_results[target_func_id_]
4227 # the compound key must consider the priority and model name
4228 key = (store_trigger[priority_], store_trigger[model_name_])
4229 for target_id in target_ids:
4230 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4232 # Here to_compute_map looks like:
4233 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4234 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4235 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4238 # Now we need to generate the batch function calls list
4240 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4242 for ((priority,model), id_map) in to_compute_map.iteritems():
4243 trigger_ids_maps = {}
4244 # function_ids_maps =
4245 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4246 for target_id, triggers in id_map.iteritems():
4247 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4248 for triggers, target_ids in trigger_ids_maps.iteritems():
4249 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4250 [t[func_field_to_compute_] for t in triggers]))
4253 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4256 def _store_set_values(self, cr, uid, ids, fields, context):
4257 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4258 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4263 if self._log_access:
4264 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4268 field_dict.setdefault(r[0], [])
4269 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4270 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4271 for i in self.pool._store_function.get(self._name, []):
4273 up_write_date = write_date + datetime.timedelta(hours=i[5])
4274 if datetime.datetime.now() < up_write_date:
4276 field_dict[r[0]].append(i[1])
4282 if self._columns[f]._multi not in keys:
4283 keys.append(self._columns[f]._multi)
4284 todo.setdefault(self._columns[f]._multi, [])
4285 todo[self._columns[f]._multi].append(f)
4289 # use admin user for accessing objects having rules defined on store fields
4290 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4291 for id, value in result.items():
4293 for f in value.keys():
4294 if f in field_dict[id]:
4301 if self._columns[v]._type == 'many2one':
4303 value[v] = value[v][0]
4306 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4307 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4310 cr.execute('update "' + self._table + '" set ' + \
4311 ','.join(upd0) + ' where id = %s', upd1)
4315 # use admin user for accessing objects having rules defined on store fields
4316 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4317 for r in result.keys():
4319 if r in field_dict.keys():
4320 if f in field_dict[r]:
4322 for id, value in result.items():
4323 if self._columns[f]._type == 'many2one':
4328 cr.execute('update "' + self._table + '" set ' + \
4329 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4331 # invalidate and mark new-style fields to recompute
4332 self.browse(cr, uid, ids, context).modified(fields)
4336 # TODO: ameliorer avec NULL
4337 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4338 """Computes the WHERE clause needed to implement an OpenERP domain.
4339 :param domain: the domain to compute
4341 :param active_test: whether the default filtering of records with ``active``
4342 field set to ``False`` should be applied.
4343 :return: the query expressing the given domain as provided in domain
4344 :rtype: osv.query.Query
4349 # if the object has a field named 'active', filter out all inactive
4350 # records unless they were explicitely asked for
4351 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4353 # the item[0] trick below works for domain items and '&'/'|'/'!'
4355 if not any(item[0] == 'active' for item in domain):
4356 domain.insert(0, ('active', '=', 1))
4358 domain = [('active', '=', 1)]
4361 e = expression.expression(cr, user, domain, self, context)
4362 tables = e.get_tables()
4363 where_clause, where_params = e.to_sql()
4364 where_clause = where_clause and [where_clause] or []
4366 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4368 return Query(tables, where_clause, where_params)
4370 def _check_qorder(self, word):
4371 if not regex_order.match(word):
4372 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4375 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4376 """Add what's missing in ``query`` to implement all appropriate ir.rules
4377 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4379 :param query: the current query object
4381 if uid == SUPERUSER_ID:
4384 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4385 """ :param parent_model: name of the parent model, if the added
4386 clause comes from a parent model
4390 # as inherited rules are being applied, we need to add the missing JOIN
4391 # to reach the parent table (if it was not JOINed yet in the query)
4392 parent_alias = self._inherits_join_add(self, parent_model, query)
4393 # inherited rules are applied on the external table -> need to get the alias and replace
4394 parent_table = self.pool[parent_model]._table
4395 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4396 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4398 for table in added_tables:
4399 # table is just a table name -> switch to the full alias
4400 if table == '"%s"' % parent_table:
4401 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4402 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4404 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4405 added_tables = new_tables
4406 query.where_clause += added_clause
4407 query.where_clause_params += added_params
4408 for table in added_tables:
4409 if table not in query.tables:
4410 query.tables.append(table)
4414 # apply main rules on the object
4415 rule_obj = self.pool.get('ir.rule')
4416 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4417 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4419 # apply ir.rules from the parents (through _inherits)
4420 for inherited_model in self._inherits:
4421 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4422 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4423 parent_model=inherited_model)
4425 def _generate_m2o_order_by(self, order_field, query):
4427 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4428 either native m2o fields or function/related fields that are stored, including
4429 intermediate JOINs for inheritance if required.
4431 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4433 if order_field not in self._columns and order_field in self._inherit_fields:
4434 # also add missing joins for reaching the table containing the m2o field
4435 qualified_field = self._inherits_join_calc(order_field, query)
4436 order_field_column = self._inherit_fields[order_field][2]
4438 qualified_field = '"%s"."%s"' % (self._table, order_field)
4439 order_field_column = self._columns[order_field]
4441 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4442 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4443 _logger.debug("Many2one function/related fields must be stored " \
4444 "to be used as ordering fields! Ignoring sorting for %s.%s",
4445 self._name, order_field)
4448 # figure out the applicable order_by for the m2o
4449 dest_model = self.pool[order_field_column._obj]
4450 m2o_order = dest_model._order
4451 if not regex_order.match(m2o_order):
4452 # _order is complex, can't use it here, so we default to _rec_name
4453 m2o_order = dest_model._rec_name
4455 # extract the field names, to be able to qualify them and add desc/asc
4457 for order_part in m2o_order.split(","):
4458 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4459 m2o_order = m2o_order_list
4461 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4462 # as we don't want to exclude results that have NULL values for the m2o
4463 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4464 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4465 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4466 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4468 def _generate_order_by(self, order_spec, query):
4470 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4471 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4473 :raise" except_orm in case order_spec is malformed
4475 order_by_clause = ''
4476 order_spec = order_spec or self._order
4478 order_by_elements = []
4479 self._check_qorder(order_spec)
4480 for order_part in order_spec.split(','):
4481 order_split = order_part.strip().split(' ')
4482 order_field = order_split[0].strip()
4483 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4486 if order_field == 'id':
4487 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4488 elif order_field in self._columns:
4489 order_column = self._columns[order_field]
4490 if order_column._classic_read:
4491 inner_clause = '"%s"."%s"' % (self._table, order_field)
4492 elif order_column._type == 'many2one':
4493 inner_clause = self._generate_m2o_order_by(order_field, query)
4495 continue # ignore non-readable or "non-joinable" fields
4496 elif order_field in self._inherit_fields:
4497 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4498 order_column = parent_obj._columns[order_field]
4499 if order_column._classic_read:
4500 inner_clause = self._inherits_join_calc(order_field, query)
4501 elif order_column._type == 'many2one':
4502 inner_clause = self._generate_m2o_order_by(order_field, query)
4504 continue # ignore non-readable or "non-joinable" fields
4506 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4507 if order_column and order_column._type == 'boolean':
4508 inner_clause = "COALESCE(%s, false)" % inner_clause
4510 if isinstance(inner_clause, list):
4511 for clause in inner_clause:
4512 order_by_elements.append("%s %s" % (clause, order_direction))
4514 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4515 if order_by_elements:
4516 order_by_clause = ",".join(order_by_elements)
4518 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4520 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4522 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4523 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4524 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4525 This is ok at the security level because this method is private and not callable through XML-RPC.
4527 :param access_rights_uid: optional user ID to use when checking access rights
4528 (not for ir.rules, this is only for ir.model.access)
4532 self.check_access_rights(cr, access_rights_uid or user, 'read')
4534 # For transient models, restrict acces to the current user, except for the super-user
4535 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4536 args = expression.AND(([('create_uid', '=', user)], args or []))
4538 query = self._where_calc(cr, user, args, context=context)
4539 self._apply_ir_rules(cr, user, query, 'read', context=context)
4540 order_by = self._generate_order_by(order, query)
4541 from_clause, where_clause, where_clause_params = query.get_sql()
4543 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4546 # Ignore order, limit and offset when just counting, they don't make sense and could
4548 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4549 cr.execute(query_str, where_clause_params)
4553 limit_str = limit and ' limit %d' % limit or ''
4554 offset_str = offset and ' offset %d' % offset or ''
4555 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4556 cr.execute(query_str, where_clause_params)
4559 # TDE note: with auto_join, we could have several lines about the same result
4560 # i.e. a lead with several unread messages; we uniquify the result using
4561 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4562 def _uniquify_list(seq):
4564 return [x for x in seq if x not in seen and not seen.add(x)]
4566 return _uniquify_list([x[0] for x in res])
4568 # returns the different values ever entered for one field
4569 # this is used, for example, in the client when the user hits enter on
4571 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4574 if field in self._inherit_fields:
4575 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4577 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4579 def copy_data(self, cr, uid, id, default=None, context=None):
4581 Copy given record's data with all its fields values
4583 :param cr: database cursor
4584 :param uid: current user id
4585 :param id: id of the record to copy
4586 :param default: field values to override in the original values of the copied record
4587 :type default: dictionary
4588 :param context: context arguments, like lang, time zone
4589 :type context: dictionary
4590 :return: dictionary containing all the field values
4596 # avoid recursion through already copied records in case of circular relationship
4597 seen_map = context.setdefault('__copy_data_seen', {})
4598 if id in seen_map.setdefault(self._name, []):
4600 seen_map[self._name].append(id)
4604 if 'state' not in default:
4605 if 'state' in self._defaults:
4606 if callable(self._defaults['state']):
4607 default['state'] = self._defaults['state'](self, cr, uid, context)
4609 default['state'] = self._defaults['state']
4611 # build a black list of fields that should not be copied
4612 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4613 def blacklist_given_fields(obj):
4614 # blacklist the fields that are given by inheritance
4615 for other, field_to_other in obj._inherits.items():
4616 blacklist.add(field_to_other)
4617 if field_to_other in default:
4618 # all the fields of 'other' are given by the record: default[field_to_other],
4619 # except the ones redefined in self
4620 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4622 blacklist_given_fields(self.pool[other])
4623 # blacklist deprecated fields
4624 for name, field in obj._columns.items():
4625 if field.deprecated:
4628 blacklist_given_fields(self)
4631 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4634 if f not in blacklist)
4636 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4640 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4643 for f, colinfo in fields_to_copy.iteritems():
4644 field = colinfo.column
4645 if field._type == 'many2one':
4646 res[f] = data[f] and data[f][0]
4647 elif field._type == 'one2many':
4648 other = self.pool[field._obj]
4649 # duplicate following the order of the ids because we'll rely on
4650 # it later for copying translations in copy_translation()!
4651 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4652 # the lines are duplicated using the wrong (old) parent, but then
4653 # are reassigned to the correct one thanks to the (0, 0, ...)
4654 res[f] = [(0, 0, line) for line in lines if line]
4655 elif field._type == 'many2many':
4656 res[f] = [(6, 0, data[f])]
4662 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4666 # avoid recursion through already copied records in case of circular relationship
4667 seen_map = context.setdefault('__copy_translations_seen',{})
4668 if old_id in seen_map.setdefault(self._name,[]):
4670 seen_map[self._name].append(old_id)
4672 trans_obj = self.pool.get('ir.translation')
4673 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4674 fields = self.fields_get(cr, uid, context=context)
4676 for field_name, field_def in fields.items():
4677 # removing the lang to compare untranslated values
4678 context_wo_lang = dict(context, lang=None)
4679 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4680 # we must recursively copy the translations for o2o and o2m
4681 if field_def['type'] == 'one2many':
4682 target_obj = self.pool[field_def['relation']]
4683 # here we rely on the order of the ids to match the translations
4684 # as foreseen in copy_data()
4685 old_children = sorted(r.id for r in old_record[field_name])
4686 new_children = sorted(r.id for r in new_record[field_name])
4687 for (old_child, new_child) in zip(old_children, new_children):
4688 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4689 # and for translatable fields we keep them for copy
4690 elif field_def.get('translate'):
4691 if field_name in self._columns:
4692 trans_name = self._name + "," + field_name
4695 elif field_name in self._inherit_fields:
4696 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4697 # get the id of the parent record to set the translation
4698 inherit_field_name = self._inherit_fields[field_name][1]
4699 target_id = new_record[inherit_field_name].id
4700 source_id = old_record[inherit_field_name].id
4704 trans_ids = trans_obj.search(cr, uid, [
4705 ('name', '=', trans_name),
4706 ('res_id', '=', source_id)
4708 user_lang = context.get('lang')
4709 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4711 # remove source to avoid triggering _set_src
4712 del record['source']
4713 record.update({'res_id': target_id})
4714 if user_lang and user_lang == record['lang']:
4715 # 'source' to force the call to _set_src
4716 # 'value' needed if value is changed in copy(), want to see the new_value
4717 record['source'] = old_record[field_name]
4718 record['value'] = new_record[field_name]
4719 trans_obj.create(cr, uid, record, context=context)
4721 @api.returns('self', lambda value: value.id)
4722 def copy(self, cr, uid, id, default=None, context=None):
4723 """ copy(default=None)
4725 Duplicate record with given id updating it with default values
4727 :param dict default: dictionary of field values to override in the
4728 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4729 :returns: new record
4734 context = context.copy()
4735 data = self.copy_data(cr, uid, id, default, context)
4736 new_id = self.create(cr, uid, data, context)
4737 self.copy_translations(cr, uid, id, new_id, context)
4741 @api.returns('self')
4743 """ exists() -> records
4745 Returns the subset of records in `self` that exist, and marks deleted
4746 records as such in cache. It can be used as a test on records::
4751 By convention, new records are returned as existing.
4753 ids = filter(None, self._ids) # ids to check in database
4756 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4757 self._cr.execute(query, (ids,))
4758 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4759 [id for id in self._ids if not id]) # new ids
4760 existing = self.browse(ids)
4761 if len(existing) < len(self):
4762 # mark missing records in cache with a failed value
4763 exc = MissingError(_("Record does not exist or has been deleted."))
4764 (self - existing)._cache.update(FailedValue(exc))
4767 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4768 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4770 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4771 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4772 return self._check_recursion(cr, uid, ids, context, parent)
4774 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4776 Verifies that there is no loop in a hierarchical structure of records,
4777 by following the parent relationship using the **parent** field until a loop
4778 is detected or until a top-level record is found.
4780 :param cr: database cursor
4781 :param uid: current user id
4782 :param ids: list of ids of records to check
4783 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4784 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4787 parent = self._parent_name
4789 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4790 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4793 while current_id is not None:
4794 cr.execute(query, (current_id,))
4795 result = cr.fetchone()
4796 current_id = result[0] if result else None
4797 if current_id == id:
4801 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4803 Verifies that there is no loop in a hierarchical structure of records,
4804 by following the parent relationship using the **parent** field until a loop
4805 is detected or until a top-level record is found.
4807 :param cr: database cursor
4808 :param uid: current user id
4809 :param ids: list of ids of records to check
4810 :param field_name: field to check
4811 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4814 field = self._all_columns.get(field_name)
4815 field = field.column if field else None
4816 if not field or field._type != 'many2many' or field._obj != self._name:
4817 # field must be a many2many on itself
4818 raise ValueError('invalid field_name: %r' % (field_name,))
4820 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4824 for i in range(0, len(ids_parent), cr.IN_MAX):
4826 sub_ids_parent = ids_parent[i:j]
4827 cr.execute(query, (tuple(sub_ids_parent),))
4828 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4829 ids_parent = ids_parent2
4830 for i in ids_parent:
4835 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4836 """Retrieve the External ID(s) of any database record.
4838 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4840 :return: map of ids to the list of their fully qualified External IDs
4841 in the form ``module.key``, or an empty list when there's no External
4842 ID for a record, e.g.::
4844 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4847 ir_model_data = self.pool.get('ir.model.data')
4848 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4849 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4852 # can't use dict.fromkeys() as the list would be shared!
4854 for record in data_results:
4855 result[record['res_id']].append('%(module)s.%(name)s' % record)
4858 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4859 """Retrieve the External ID of any database record, if there
4860 is one. This method works as a possible implementation
4861 for a function field, to be able to add it to any
4862 model object easily, referencing it as ``Model.get_external_id``.
4864 When multiple External IDs exist for a record, only one
4865 of them is returned (randomly).
4867 :return: map of ids to their fully qualified XML ID,
4868 defaulting to an empty string when there's none
4869 (to be usable as a function field),
4872 { 'id': 'module.ext_id',
4875 results = self._get_xml_ids(cr, uid, ids)
4876 for k, v in results.iteritems():
4883 # backwards compatibility
4884 get_xml_id = get_external_id
4885 _get_xml_ids = _get_external_ids
4887 def print_report(self, cr, uid, ids, name, data, context=None):
4889 Render the report `name` for the given IDs. The report must be defined
4890 for this model, not another.
4892 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4893 assert self._name == report.table
4894 return report.create(cr, uid, ids, data, context)
4898 def is_transient(cls):
4899 """ Return whether the model is transient.
4901 See :class:`TransientModel`.
4904 return cls._transient
4906 def _transient_clean_rows_older_than(self, cr, seconds):
4907 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4908 # Never delete rows used in last 5 minutes
4909 seconds = max(seconds, 300)
4910 query = ("SELECT id FROM " + self._table + " WHERE"
4911 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4912 " < ((now() at time zone 'UTC') - interval %s)")
4913 cr.execute(query, ("%s seconds" % seconds,))
4914 ids = [x[0] for x in cr.fetchall()]
4915 self.unlink(cr, SUPERUSER_ID, ids)
4917 def _transient_clean_old_rows(self, cr, max_count):
4918 # Check how many rows we have in the table
4919 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4921 if res[0][0] <= max_count:
4922 return # max not reached, nothing to do
4923 self._transient_clean_rows_older_than(cr, 300)
4925 def _transient_vacuum(self, cr, uid, force=False):
4926 """Clean the transient records.
4928 This unlinks old records from the transient model tables whenever the
4929 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4930 Actual cleaning will happen only once every "_transient_check_time" calls.
4931 This means this method can be called frequently called (e.g. whenever
4932 a new record is created).
4933 Example with both max_hours and max_count active:
4934 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4935 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4936 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4937 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4938 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4939 would immediately cause the maximum to be reached again.
4940 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4942 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4943 _transient_check_time = 20 # arbitrary limit on vacuum executions
4944 self._transient_check_count += 1
4945 if not force and (self._transient_check_count < _transient_check_time):
4946 return True # no vacuum cleaning this time
4947 self._transient_check_count = 0
4949 # Age-based expiration
4950 if self._transient_max_hours:
4951 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4953 # Count-based expiration
4954 if self._transient_max_count:
4955 self._transient_clean_old_rows(cr, self._transient_max_count)
4959 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4960 """ Serializes one2many and many2many commands into record dictionaries
4961 (as if all the records came from the database via a read()). This
4962 method is aimed at onchange methods on one2many and many2many fields.
4964 Because commands might be creation commands, not all record dicts
4965 will contain an ``id`` field. Commands matching an existing record
4966 will have an ``id``.
4968 :param field_name: name of the one2many or many2many field matching the commands
4969 :type field_name: str
4970 :param commands: one2many or many2many commands to execute on ``field_name``
4971 :type commands: list((int|False, int|False, dict|False))
4972 :param fields: list of fields to read from the database, when applicable
4973 :type fields: list(str)
4974 :returns: records in a shape similar to that returned by ``read()``
4975 (except records may be missing the ``id`` field if they don't exist in db)
4978 result = [] # result (list of dict)
4979 record_ids = [] # ids of records to read
4980 updates = {} # {id: dict} of updates on particular records
4982 for command in commands or []:
4983 if not isinstance(command, (list, tuple)):
4984 record_ids.append(command)
4985 elif command[0] == 0:
4986 result.append(command[2])
4987 elif command[0] == 1:
4988 record_ids.append(command[1])
4989 updates.setdefault(command[1], {}).update(command[2])
4990 elif command[0] in (2, 3):
4991 record_ids = [id for id in record_ids if id != command[1]]
4992 elif command[0] == 4:
4993 record_ids.append(command[1])
4994 elif command[0] == 5:
4995 result, record_ids = [], []
4996 elif command[0] == 6:
4997 result, record_ids = [], list(command[2])
4999 # read the records and apply the updates
5000 other_model = self.pool[self._all_columns[field_name].column._obj]
5001 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
5002 record.update(updates.get(record['id'], {}))
5003 result.append(record)
5007 # for backward compatibility
5008 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5010 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5012 Performs a ``search()`` followed by a ``read()``.
5014 :param cr: database cursor
5015 :param user: current user id
5016 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5017 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5018 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5019 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5020 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5021 :param context: context arguments.
5022 :return: List of dictionaries containing the asked fields.
5023 :rtype: List of dictionaries.
5026 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5030 if fields and fields == ['id']:
5031 # shortcut read if we only want the ids
5032 return [{'id': id} for id in record_ids]
5034 # read() ignores active_test, but it would forward it to any downstream search call
5035 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5036 # was presumably only meant for the main search().
5037 # TODO: Move this to read() directly?
5038 read_ctx = dict(context or {})
5039 read_ctx.pop('active_test', None)
5041 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5042 if len(result) <= 1:
5046 index = dict((r['id'], r) for r in result)
5047 return [index[x] for x in record_ids if x in index]
5049 def _register_hook(self, cr):
5050 """ stuff to do right after the registry is built """
5054 def _patch_method(cls, name, method):
5055 """ Monkey-patch a method for all instances of this model. This replaces
5056 the method called `name` by `method` in the given class.
5057 The original method is then accessible via ``method.origin``, and it
5058 can be restored with :meth:`~._revert_method`.
5063 def do_write(self, values):
5064 # do stuff, and call the original method
5065 return do_write.origin(self, values)
5067 # patch method write of model
5068 model._patch_method('write', do_write)
5070 # this will call do_write
5071 records = model.search([...])
5074 # restore the original method
5075 model._revert_method('write')
5077 origin = getattr(cls, name)
5078 method.origin = origin
5079 # propagate decorators from origin to method, and apply api decorator
5080 wrapped = api.guess(api.propagate(origin, method))
5081 wrapped.origin = origin
5082 setattr(cls, name, wrapped)
5085 def _revert_method(cls, name):
5086 """ Revert the original method called `name` in the given class.
5087 See :meth:`~._patch_method`.
5089 method = getattr(cls, name)
5090 setattr(cls, name, method.origin)
5095 # An instance represents an ordered collection of records in a given
5096 # execution environment. The instance object refers to the environment, and
5097 # the records themselves are represented by their cache dictionary. The 'id'
5098 # of each record is found in its corresponding cache dictionary.
5100 # This design has the following advantages:
5101 # - cache access is direct and thus fast;
5102 # - one can consider records without an 'id' (see new records);
5103 # - the global cache is only an index to "resolve" a record 'id'.
5107 def _browse(cls, env, ids):
5108 """ Create an instance attached to `env`; `ids` is a tuple of record
5111 records = object.__new__(cls)
5114 env.prefetch[cls._name].update(ids)
5118 def browse(self, cr, uid, arg=None, context=None):
5119 ids = _normalize_ids(arg)
5120 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5121 return self._browse(Environment(cr, uid, context or {}), ids)
5124 def browse(self, arg=None):
5125 """ browse([ids]) -> records
5127 Returns a recordset for the ids provided as parameter in the current
5130 Can take no ids, a single id or a sequence of ids.
5132 ids = _normalize_ids(arg)
5133 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5134 return self._browse(self.env, ids)
5137 # Internal properties, for manipulating the instance's implementation
5142 """ List of actual record ids in this recordset (ignores placeholder
5143 ids for records to create)
5145 return filter(None, list(self._ids))
5147 # backward-compatibility with former browse records
5148 _cr = property(lambda self: self.env.cr)
5149 _uid = property(lambda self: self.env.uid)
5150 _context = property(lambda self: self.env.context)
5153 # Conversion methods
5156 def ensure_one(self):
5157 """ Verifies that the current recorset holds a single record. Raises
5158 an exception otherwise.
5162 raise except_orm("ValueError", "Expected singleton: %s" % self)
5164 def with_env(self, env):
5165 """ Returns a new version of this recordset attached to the provided
5168 :type env: :class:`~openerp.api.Environment`
5170 return self._browse(env, self._ids)
5172 def sudo(self, user=SUPERUSER_ID):
5173 """ sudo([user=SUPERUSER])
5175 Returns a new version of this recordset attached to the provided
5178 return self.with_env(self.env(user=user))
5180 def with_context(self, *args, **kwargs):
5181 """ with_context([context][, **overrides]) -> records
5183 Returns a new version of this recordset attached to an extended
5186 The extended context is either the provided ``context`` in which
5187 ``overrides`` are merged or the *current* context in which
5188 ``overrides`` are merged e.g.::
5190 # current context is {'key1': True}
5191 r2 = records.with_context({}, key2=True)
5192 # -> r2._context is {'key2': True}
5193 r2 = records.with_context(key2=True)
5194 # -> r2._context is {'key1': True, 'key2': True}
5196 context = dict(args[0] if args else self._context, **kwargs)
5197 return self.with_env(self.env(context=context))
5199 def _convert_to_cache(self, values, update=False, validate=True):
5200 """ Convert the `values` dictionary into cached values.
5202 :param update: whether the conversion is made for updating `self`;
5203 this is necessary for interpreting the commands of *2many fields
5204 :param validate: whether values must be checked
5206 fields = self._fields
5207 target = self if update else self.browse()
5209 name: fields[name].convert_to_cache(value, target, validate=validate)
5210 for name, value in values.iteritems()
5214 def _convert_to_write(self, values):
5215 """ Convert the `values` dictionary into the format of :meth:`write`. """
5216 fields = self._fields
5218 for name, value in values.iteritems():
5220 value = fields[name].convert_to_write(value)
5221 if not isinstance(value, NewId):
5222 result[name] = value
5226 # Record traversal and update
5229 def _mapped_func(self, func):
5230 """ Apply function `func` on all records in `self`, and return the
5231 result as a list or a recordset (if `func` return recordsets).
5233 vals = [func(rec) for rec in self]
5234 val0 = vals[0] if vals else func(self)
5235 if isinstance(val0, BaseModel):
5236 return reduce(operator.or_, vals, val0)
5239 def mapped(self, func):
5240 """ Apply `func` on all records in `self`, and return the result as a
5241 list or a recordset (if `func` return recordsets). In the latter
5242 case, the order of the returned recordset is arbritrary.
5244 :param func: a function or a dot-separated sequence of field names
5246 if isinstance(func, basestring):
5248 for name in func.split('.'):
5249 recs = recs._mapped_func(operator.itemgetter(name))
5252 return self._mapped_func(func)
5254 def _mapped_cache(self, name_seq):
5255 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5256 field names, and only cached values are used.
5259 for name in name_seq.split('.'):
5260 field = recs._fields[name]
5261 null = field.null(self.env)
5262 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5265 def filtered(self, func):
5266 """ Select the records in `self` such that `func(rec)` is true, and
5267 return them as a recordset.
5269 :param func: a function or a dot-separated sequence of field names
5271 if isinstance(func, basestring):
5273 func = lambda rec: filter(None, rec.mapped(name))
5274 return self.browse([rec.id for rec in self if func(rec)])
5276 def sorted(self, key=None):
5277 """ Return the recordset `self` ordered by `key` """
5279 return self.search([('id', 'in', self.ids)])
5281 return self.browse(map(int, sorted(self, key=key)))
5283 def update(self, values):
5284 """ Update record `self[0]` with `values`. """
5285 for name, value in values.iteritems():
5289 # New records - represent records that do not exist in the database yet;
5290 # they are used to compute default values and perform onchanges.
5294 def new(self, values={}):
5295 """ new([values]) -> record
5297 Return a new record instance attached to the current environment and
5298 initialized with the provided ``value``. The record is *not* created
5299 in database, it only exists in memory.
5301 record = self.browse([NewId()])
5302 record._cache.update(record._convert_to_cache(values, update=True))
5304 if record.env.in_onchange:
5305 # The cache update does not set inverse fields, so do it manually.
5306 # This is useful for computing a function field on secondary
5307 # records, if that field depends on the main record.
5309 field = self._fields.get(name)
5311 for invf in field.inverse_fields:
5312 invf._update(record[name], record)
5317 # Dirty flag, to mark records modified (in draft mode)
5322 """ Return whether any record in `self` is dirty. """
5323 dirty = self.env.dirty
5324 return any(record in dirty for record in self)
5327 def _dirty(self, value):
5328 """ Mark the records in `self` as dirty. """
5330 map(self.env.dirty.add, self)
5332 map(self.env.dirty.discard, self)
5338 def __nonzero__(self):
5339 """ Test whether `self` is nonempty. """
5340 return bool(getattr(self, '_ids', True))
5343 """ Return the size of `self`. """
5344 return len(self._ids)
5347 """ Return an iterator over `self`. """
5348 for id in self._ids:
5349 yield self._browse(self.env, (id,))
5351 def __contains__(self, item):
5352 """ Test whether `item` (record or field name) is an element of `self`.
5353 In the first case, the test is fully equivalent to::
5355 any(item == record for record in self)
5357 if isinstance(item, BaseModel) and self._name == item._name:
5358 return len(item) == 1 and item.id in self._ids
5359 elif isinstance(item, basestring):
5360 return item in self._fields
5362 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5364 def __add__(self, other):
5365 """ Return the concatenation of two recordsets. """
5366 if not isinstance(other, BaseModel) or self._name != other._name:
5367 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5368 return self.browse(self._ids + other._ids)
5370 def __sub__(self, other):
5371 """ Return the recordset of all the records in `self` that are not in `other`. """
5372 if not isinstance(other, BaseModel) or self._name != other._name:
5373 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5374 other_ids = set(other._ids)
5375 return self.browse([id for id in self._ids if id not in other_ids])
5377 def __and__(self, other):
5378 """ Return the intersection of two recordsets.
5379 Note that recordset order is not preserved.
5381 if not isinstance(other, BaseModel) or self._name != other._name:
5382 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5383 return self.browse(set(self._ids) & set(other._ids))
5385 def __or__(self, other):
5386 """ Return the union of two recordsets.
5387 Note that recordset order is not preserved.
5389 if not isinstance(other, BaseModel) or self._name != other._name:
5390 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5391 return self.browse(set(self._ids) | set(other._ids))
5393 def __eq__(self, other):
5394 """ Test whether two recordsets are equivalent (up to reordering). """
5395 if not isinstance(other, BaseModel):
5397 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5399 return self._name == other._name and set(self._ids) == set(other._ids)
5401 def __ne__(self, other):
5402 return not self == other
5404 def __lt__(self, other):
5405 if not isinstance(other, BaseModel) or self._name != other._name:
5406 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5407 return set(self._ids) < set(other._ids)
5409 def __le__(self, other):
5410 if not isinstance(other, BaseModel) or self._name != other._name:
5411 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5412 return set(self._ids) <= set(other._ids)
5414 def __gt__(self, other):
5415 if not isinstance(other, BaseModel) or self._name != other._name:
5416 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5417 return set(self._ids) > set(other._ids)
5419 def __ge__(self, other):
5420 if not isinstance(other, BaseModel) or self._name != other._name:
5421 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5422 return set(self._ids) >= set(other._ids)
5428 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5430 def __unicode__(self):
5431 return unicode(str(self))
5436 if hasattr(self, '_ids'):
5437 return hash((self._name, frozenset(self._ids)))
5439 return hash(self._name)
5441 def __getitem__(self, key):
5442 """ If `key` is an integer or a slice, return the corresponding record
5443 selection as an instance (attached to `self.env`).
5444 Otherwise read the field `key` of the first record in `self`.
5448 inst = model.search(dom) # inst is a recordset
5449 r4 = inst[3] # fourth record in inst
5450 rs = inst[10:20] # subset of inst
5451 nm = rs['name'] # name of first record in inst
5453 if isinstance(key, basestring):
5454 # important: one must call the field's getter
5455 return self._fields[key].__get__(self, type(self))
5456 elif isinstance(key, slice):
5457 return self._browse(self.env, self._ids[key])
5459 return self._browse(self.env, (self._ids[key],))
5461 def __setitem__(self, key, value):
5462 """ Assign the field `key` to `value` in record `self`. """
5463 # important: one must call the field's setter
5464 return self._fields[key].__set__(self, value)
5467 # Cache and recomputation management
5472 """ Return the cache of `self`, mapping field names to values. """
5473 return RecordCache(self)
5476 def _in_cache_without(self, field):
5477 """ Make sure `self` is present in cache (for prefetching), and return
5478 the records of model `self` in cache that have no value for `field`
5479 (:class:`Field` instance).
5482 prefetch_ids = env.prefetch[self._name]
5483 prefetch_ids.update(self._ids)
5484 ids = filter(None, prefetch_ids - set(env.cache[field]))
5485 return self.browse(ids)
5489 """ Clear the records cache.
5492 The record cache is automatically invalidated.
5494 self.invalidate_cache()
5497 def invalidate_cache(self, fnames=None, ids=None):
5498 """ Invalidate the record caches after some records have been modified.
5499 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5501 :param fnames: the list of modified fields, or ``None`` for all fields
5502 :param ids: the list of modified record ids, or ``None`` for all
5506 return self.env.invalidate_all()
5507 fields = self._fields.values()
5509 fields = map(self._fields.__getitem__, fnames)
5511 # invalidate fields and inverse fields, too
5512 spec = [(f, ids) for f in fields] + \
5513 [(invf, None) for f in fields for invf in f.inverse_fields]
5514 self.env.invalidate(spec)
5517 def modified(self, fnames):
5518 """ Notify that fields have been modified on `self`. This invalidates
5519 the cache, and prepares the recomputation of stored function fields
5520 (new-style fields only).
5522 :param fnames: iterable of field names that have been modified on
5525 # each field knows what to invalidate and recompute
5527 for fname in fnames:
5528 spec += self._fields[fname].modified(self)
5532 for env in self.env.all
5533 for field in env.cache
5535 # invalidate non-stored fields.function which are currently cached
5536 spec += [(f, None) for f in self.pool.pure_function_fields
5537 if f in cached_fields]
5539 self.env.invalidate(spec)
5541 def _recompute_check(self, field):
5542 """ If `field` must be recomputed on some record in `self`, return the
5543 corresponding records that must be recomputed.
5545 return self.env.check_todo(field, self)
5547 def _recompute_todo(self, field):
5548 """ Mark `field` to be recomputed. """
5549 self.env.add_todo(field, self)
5551 def _recompute_done(self, field):
5552 """ Mark `field` as recomputed. """
5553 self.env.remove_todo(field, self)
5556 def recompute(self):
5557 """ Recompute stored function fields. The fields and records to
5558 recompute have been determined by method :meth:`modified`.
5560 while self.env.has_todo():
5561 field, recs = self.env.get_todo()
5562 # evaluate the fields to recompute, and save them to database
5563 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5565 values = rec._convert_to_write({
5566 f.name: rec[f.name] for f in field.computed_fields
5569 except MissingError:
5571 # mark the computed fields as done
5572 map(recs._recompute_done, field.computed_fields)
5575 # Generic onchange method
5578 def _has_onchange(self, field, other_fields):
5579 """ Return whether `field` should trigger an onchange event in the
5580 presence of `other_fields`.
5582 # test whether self has an onchange method for field, or field is a
5583 # dependency of any field in other_fields
5584 return field.name in self._onchange_methods or \
5585 any(dep in other_fields for dep in field.dependents)
5588 def _onchange_spec(self, view_info=None):
5589 """ Return the onchange spec from a view description; if not given, the
5590 result of ``self.fields_view_get()`` is used.
5594 # for traversing the XML arch and populating result
5595 def process(node, info, prefix):
5596 if node.tag == 'field':
5597 name = node.attrib['name']
5598 names = "%s.%s" % (prefix, name) if prefix else name
5599 if not result.get(names):
5600 result[names] = node.attrib.get('on_change')
5601 # traverse the subviews included in relational fields
5602 for subinfo in info['fields'][name].get('views', {}).itervalues():
5603 process(etree.fromstring(subinfo['arch']), subinfo, names)
5606 process(child, info, prefix)
5608 if view_info is None:
5609 view_info = self.fields_view_get()
5610 process(etree.fromstring(view_info['arch']), view_info, '')
5613 def _onchange_eval(self, field_name, onchange, result):
5614 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5615 on record `self`. Value assignments are applied on `self`, while
5616 domain and warning messages are put in dictionary `result`.
5618 onchange = onchange.strip()
5621 if onchange in ("1", "true"):
5622 for method in self._onchange_methods.get(field_name, ()):
5623 method_res = method(self)
5626 if 'domain' in method_res:
5627 result.setdefault('domain', {}).update(method_res['domain'])
5628 if 'warning' in method_res:
5629 result['warning'] = method_res['warning']
5633 match = onchange_v7.match(onchange)
5635 method, params = match.groups()
5637 # evaluate params -> tuple
5638 global_vars = {'context': self._context, 'uid': self._uid}
5639 if self._context.get('field_parent'):
5640 class RawRecord(object):
5641 def __init__(self, record):
5642 self._record = record
5643 def __getattr__(self, name):
5644 field = self._record._fields[name]
5645 value = self._record[name]
5646 return field.convert_to_onchange(value)
5647 record = self[self._context['field_parent']]
5648 global_vars['parent'] = RawRecord(record)
5650 key: self._fields[key].convert_to_onchange(val)
5651 for key, val in self._cache.iteritems()
5653 params = eval("[%s]" % params, global_vars, field_vars)
5655 # call onchange method
5656 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5657 method_res = getattr(self._model, method)(*args)
5658 if not isinstance(method_res, dict):
5660 if 'value' in method_res:
5661 method_res['value'].pop('id', None)
5662 self.update(self._convert_to_cache(method_res['value'], validate=False))
5663 if 'domain' in method_res:
5664 result.setdefault('domain', {}).update(method_res['domain'])
5665 if 'warning' in method_res:
5666 result['warning'] = method_res['warning']
5669 def onchange(self, values, field_name, field_onchange):
5670 """ Perform an onchange on the given field.
5672 :param values: dictionary mapping field names to values, giving the
5673 current state of modification
5674 :param field_name: name of the modified field_name
5675 :param field_onchange: dictionary mapping field names to their
5680 if field_name and field_name not in self._fields:
5683 # determine subfields for field.convert_to_write() below
5685 subfields = defaultdict(set)
5686 for dotname in field_onchange:
5688 secondary.append(dotname)
5689 name, subname = dotname.split('.')
5690 subfields[name].add(subname)
5692 # create a new record with values, and attach `self` to it
5693 with env.do_in_onchange():
5694 record = self.new(values)
5695 values = dict(record._cache)
5696 # attach `self` with a different context (for cache consistency)
5697 record._origin = self.with_context(__onchange=True)
5699 # determine which field should be triggered an onchange
5700 todo = set([field_name]) if field_name else set(values)
5703 # dummy assignment: trigger invalidations on the record
5705 value = record[name]
5706 field = self._fields[name]
5707 if not field_name and field.type == 'many2one' and field.delegate and not value:
5708 # do not nullify all fields of parent record for new records
5710 record[name] = value
5712 result = {'value': {}}
5720 with env.do_in_onchange():
5721 # apply field-specific onchange methods
5722 if field_onchange.get(name):
5723 record._onchange_eval(name, field_onchange[name], result)
5725 # force re-evaluation of function fields on secondary records
5726 for field_seq in secondary:
5727 record.mapped(field_seq)
5729 # determine which fields have been modified
5730 for name, oldval in values.iteritems():
5731 field = self._fields[name]
5732 newval = record[name]
5733 if field.type in ('one2many', 'many2many'):
5734 if newval != oldval or newval._dirty:
5735 # put new value in result
5736 result['value'][name] = field.convert_to_write(
5737 newval, record._origin, subfields.get(name),
5741 # keep result: newval may have been dirty before
5744 if newval != oldval:
5745 # put new value in result
5746 result['value'][name] = field.convert_to_write(
5747 newval, record._origin, subfields.get(name),
5751 # clean up result to not return another value
5752 result['value'].pop(name, None)
5754 # At the moment, the client does not support updates on a *2many field
5755 # while this one is modified by the user.
5756 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5757 result['value'].pop(field_name, None)
5762 class RecordCache(MutableMapping):
5763 """ Implements a proxy dictionary to read/update the cache of a record.
5764 Upon iteration, it looks like a dictionary mapping field names to
5765 values. However, fields may be used as keys as well.
5767 def __init__(self, records):
5768 self._recs = records
5770 def contains(self, field):
5771 """ Return whether `records[0]` has a value for `field` in cache. """
5772 if isinstance(field, basestring):
5773 field = self._recs._fields[field]
5774 return self._recs.id in self._recs.env.cache[field]
5776 def __contains__(self, field):
5777 """ Return whether `records[0]` has a regular value for `field` in cache. """
5778 if isinstance(field, basestring):
5779 field = self._recs._fields[field]
5780 dummy = SpecialValue(None)
5781 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5782 return not isinstance(value, SpecialValue)
5784 def __getitem__(self, field):
5785 """ Return the cached value of `field` for `records[0]`. """
5786 if isinstance(field, basestring):
5787 field = self._recs._fields[field]
5788 value = self._recs.env.cache[field][self._recs.id]
5789 return value.get() if isinstance(value, SpecialValue) else value
5791 def __setitem__(self, field, value):
5792 """ Assign the cached value of `field` for all records in `records`. """
5793 if isinstance(field, basestring):
5794 field = self._recs._fields[field]
5795 values = dict.fromkeys(self._recs._ids, value)
5796 self._recs.env.cache[field].update(values)
5798 def update(self, *args, **kwargs):
5799 """ Update the cache of all records in `records`. If the argument is a
5800 `SpecialValue`, update all fields (except "magic" columns).
5802 if args and isinstance(args[0], SpecialValue):
5803 values = dict.fromkeys(self._recs._ids, args[0])
5804 for name, field in self._recs._fields.iteritems():
5806 self._recs.env.cache[field].update(values)
5808 return super(RecordCache, self).update(*args, **kwargs)
5810 def __delitem__(self, field):
5811 """ Remove the cached value of `field` for all `records`. """
5812 if isinstance(field, basestring):
5813 field = self._recs._fields[field]
5814 field_cache = self._recs.env.cache[field]
5815 for id in self._recs._ids:
5816 field_cache.pop(id, None)
5819 """ Iterate over the field names with a regular value in cache. """
5820 cache, id = self._recs.env.cache, self._recs.id
5821 dummy = SpecialValue(None)
5822 for name, field in self._recs._fields.iteritems():
5823 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5827 """ Return the number of fields with a regular value in cache. """
5828 return sum(1 for name in self)
5830 class Model(BaseModel):
5831 """Main super-class for regular database-persisted OpenERP models.
5833 OpenERP models are created by inheriting from this class::
5838 The system will later instantiate the class once per database (on
5839 which the class' module is installed).
5842 _register = False # not visible in ORM registry, meant to be python-inherited only
5843 _transient = False # True in a TransientModel
5845 class TransientModel(BaseModel):
5846 """Model super-class for transient records, meant to be temporarily
5847 persisted, and regularly vaccuum-cleaned.
5849 A TransientModel has a simplified access rights management,
5850 all users can create new records, and may only access the
5851 records they created. The super-user has unrestricted access
5852 to all TransientModel records.
5855 _register = False # not visible in ORM registry, meant to be python-inherited only
5858 class AbstractModel(BaseModel):
5859 """Abstract Model super-class for creating an abstract class meant to be
5860 inherited by regular models (Models or TransientModels) but not meant to
5861 be usable on its own, or persisted.
5863 Technical note: we don't want to make AbstractModel the super-class of
5864 Model or BaseModel because it would not make sense to put the main
5865 definition of persistence methods such as create() in it, and still we
5866 should be able to override them within an AbstractModel.
5868 _auto = False # don't create any database backend for AbstractModels
5869 _register = False # not visible in ORM registry, meant to be python-inherited only
5872 def itemgetter_tuple(items):
5873 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5874 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5879 return lambda gettable: (gettable[items[0]],)
5880 return operator.itemgetter(*items)
5882 def convert_pgerror_23502(model, fields, info, e):
5883 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5884 r'not-null constraint\n',
5886 field_name = m and m.group('field')
5887 if not m or field_name not in fields:
5888 return {'message': unicode(e)}
5889 message = _(u"Missing required value for the field '%s'.") % field_name
5890 field = fields.get(field_name)
5892 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5895 'field': field_name,
5898 def convert_pgerror_23505(model, fields, info, e):
5899 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5901 field_name = m and m.group('field')
5902 if not m or field_name not in fields:
5903 return {'message': unicode(e)}
5904 message = _(u"The value for the field '%s' already exists.") % field_name
5905 field = fields.get(field_name)
5907 message = _(u"%s This might be '%s' in the current model, or a field "
5908 u"of the same name in an o2m.") % (message, field['string'])
5911 'field': field_name,
5914 PGERROR_TO_OE = defaultdict(
5915 # shape of mapped converters
5916 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5917 # not_null_violation
5918 '23502': convert_pgerror_23502,
5919 # unique constraint error
5920 '23505': convert_pgerror_23505,
5923 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5924 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5926 Various implementations were tested on the corpus of all browse() calls
5927 performed during a full crawler run (after having installed all website_*
5928 modules) and this one was the most efficient overall.
5930 A possible bit of correctness was sacrificed by not doing any test on
5931 Iterable and just assuming that any non-atomic type was an iterable of
5936 # much of the corpus is falsy objects (empty list, tuple or set, None)
5940 # `type in set` is significantly faster (because more restrictive) than
5941 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5942 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5943 # (and looks much worse) in most cases, but over millions of calls it
5944 # does have a very minor effect.
5945 if arg.__class__ in atoms:
5950 # keep those imports here to avoid dependency cycle errors
5951 from .osv import expression
5952 from .fields import Field, SpecialValue, FailedValue
5954 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: