1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
51 from collections import defaultdict, MutableMapping
52 from inspect import getmembers
55 import dateutil.relativedelta
57 from lxml import etree
60 from . import SUPERUSER_ID
63 from .api import Environment
64 from .exceptions import except_orm, AccessError, MissingError, ValidationError
65 from .osv import fields
66 from .osv.query import Query
67 from .tools import lazy_property, ormcache
68 from .tools.config import config
69 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
70 from .tools.safe_eval import safe_eval as eval
71 from .tools.translate import _
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
77 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
78 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
80 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def check_object_name(name):
84 """ Check if the given name is a valid openerp object name.
86 The _name attribute in osv and osv_memory object is subject to
87 some restrictions. This function returns True or False whether
88 the given name is allowed or not.
90 TODO: this is an approximation. The goal in this approximation
91 is to disallow uppercase characters (in some places, we quote
92 table/column names and in other not, which leads to this kind
95 psycopg2.ProgrammingError: relation "xxx" does not exist).
97 The same restriction should apply to both osv and osv_memory
98 objects for consistency.
101 if regex_object_name.match(name) is None:
105 def raise_on_invalid_object_name(name):
106 if not check_object_name(name):
107 msg = "The _name attribute %s is not valid." % name
109 raise except_orm('ValueError', msg)
111 POSTGRES_CONFDELTYPES = {
119 def intersect(la, lb):
120 return filter(lambda x: x in lb, la)
123 """ Test whether functions `f` and `g` are identical or have the same name """
124 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
126 def fix_import_export_id_paths(fieldname):
128 Fixes the id fields in import and exports, and splits field paths
131 :param str fieldname: name of the field to import/export
132 :return: split field name
135 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
136 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
137 return fixed_external_id.split('/')
139 def pg_varchar(size=0):
140 """ Returns the VARCHAR declaration for the provided size:
142 * If no size (or an empty or negative size is provided) return an
144 * Otherwise return a VARCHAR(n)
146 :type int size: varchar size, optional
150 if not isinstance(size, int):
151 raise TypeError("VARCHAR parameter should be an int, got %s"
154 return 'VARCHAR(%d)' % size
157 FIELDS_TO_PGTYPES = {
158 fields.boolean: 'bool',
159 fields.integer: 'int4',
163 fields.datetime: 'timestamp',
164 fields.binary: 'bytea',
165 fields.many2one: 'int4',
166 fields.serialized: 'text',
169 def get_pg_type(f, type_override=None):
171 :param fields._column f: field to get a Postgres type for
172 :param type type_override: use the provided type for dispatching instead of the field's own type
173 :returns: (postgres_identification_type, postgres_type_specification)
176 field_type = type_override or type(f)
178 if field_type in FIELDS_TO_PGTYPES:
179 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
180 elif issubclass(field_type, fields.float):
182 pg_type = ('numeric', 'NUMERIC')
184 pg_type = ('float8', 'DOUBLE PRECISION')
185 elif issubclass(field_type, (fields.char, fields.reference)):
186 pg_type = ('varchar', pg_varchar(f.size))
187 elif issubclass(field_type, fields.selection):
188 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
189 or getattr(f, 'size', None) == -1:
190 pg_type = ('int4', 'INTEGER')
192 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
193 elif issubclass(field_type, fields.function):
194 if f._type == 'selection':
195 pg_type = ('varchar', pg_varchar())
197 pg_type = get_pg_type(f, getattr(fields, f._type))
199 _logger.warning('%s type not supported!', field_type)
205 class MetaModel(api.Meta):
206 """ Metaclass for the models.
208 This class is used as the metaclass for the class :class:`BaseModel` to
209 discover the models defined in a module (without instanciating them).
210 If the automatic discovery is not needed, it is possible to set the model's
211 ``_register`` attribute to False.
215 module_to_models = {}
217 def __init__(self, name, bases, attrs):
218 if not self._register:
219 self._register = True
220 super(MetaModel, self).__init__(name, bases, attrs)
223 if not hasattr(self, '_module'):
224 # The (OpenERP) module name can be in the `openerp.addons` namespace
225 # or not. For instance, module `sale` can be imported as
226 # `openerp.addons.sale` (the right way) or `sale` (for backward
228 module_parts = self.__module__.split('.')
229 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
230 module_name = self.__module__.split('.')[2]
232 module_name = self.__module__.split('.')[0]
233 self._module = module_name
235 # Remember which models to instanciate for this module.
237 self.module_to_models.setdefault(self._module, []).append(self)
239 # transform columns into new-style fields (enables field inheritance)
240 for name, column in self._columns.iteritems():
241 if name in self.__dict__:
242 _logger.warning("Field %r erasing an existing value", name)
243 setattr(self, name, column.to_field())
247 """ Pseudo-ids for new records. """
248 def __nonzero__(self):
251 IdType = (int, long, basestring, NewId)
254 # maximum number of prefetched records
257 # special columns automatically created by the ORM
258 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
259 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
261 class BaseModel(object):
262 """ Base class for OpenERP models.
264 OpenERP models are created by inheriting from this class' subclasses:
266 * :class:`Model` for regular database-persisted models
268 * :class:`TransientModel` for temporary data, stored in the database but
269 automatically vaccuumed every so often
271 * :class:`AbstractModel` for abstract super classes meant to be shared by
272 multiple inheriting model
274 The system automatically instantiates every model once per database. Those
275 instances represent the available models on each database, and depend on
276 which modules are installed on that database. The actual class of each
277 instance is built from the Python classes that create and inherit from the
280 Every model instance is a "recordset", i.e., an ordered collection of
281 records of the model. Recordsets are returned by methods like
282 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
283 explicit representation: a record is represented as a recordset of one
286 To create a class that should not be instantiated, the _register class
287 attribute may be set to False.
289 __metaclass__ = MetaModel
290 _auto = True # create database backend
291 _register = False # Set to false if the model shouldn't be automatically discovered.
298 _parent_name = 'parent_id'
299 _parent_store = False
300 _parent_order = False
306 _translate = True # set to False to disable translations export for this model
308 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
309 # to include in the _read_group, if grouped on this field
313 _transient = False # True in a TransientModel
316 # { 'parent_model': 'm2o_field', ... }
319 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
320 # model from which it is inherits'd, r is the (local) field towards m, f
321 # is the _column object itself, and n is the original (i.e. top-most)
324 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
325 # field_column_obj, origina_parent_model), ... }
328 # Mapping field name/column_info object
329 # This is similar to _inherit_fields but:
330 # 1. includes self fields,
331 # 2. uses column_info instead of a triple.
336 _sql_constraints = []
338 # model dependencies, for models backed up by sql views:
339 # {model_name: field_names, ...}
342 CONCURRENCY_CHECK_FIELD = '__last_update'
344 def log(self, cr, uid, id, message, secondary=False, context=None):
345 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
347 def view_init(self, cr, uid, fields_list, context=None):
348 """Override this method to do specific things when a view on the object is opened."""
351 def _field_create(self, cr, context=None):
352 """ Create entries in ir_model_fields for all the model's fields.
354 If necessary, also create an entry in ir_model, and if called from the
355 modules loading scheme (by receiving 'module' in the context), also
356 create entries in ir_model_data (for the model and the fields).
358 - create an entry in ir_model (if there is not already one),
359 - create an entry in ir_model_data (if there is not already one, and if
360 'module' is in the context),
361 - update ir_model_fields with the fields found in _columns
362 (TODO there is some redundancy as _columns is updated from
363 ir_model_fields in __init__).
368 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
370 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
371 model_id = cr.fetchone()[0]
372 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
374 model_id = cr.fetchone()[0]
375 if 'module' in context:
376 name_id = 'model_'+self._name.replace('.', '_')
377 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
379 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
380 (name_id, context['module'], 'ir.model', model_id)
383 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
385 for rec in cr.dictfetchall():
386 cols[rec['name']] = rec
388 ir_model_fields_obj = self.pool.get('ir.model.fields')
390 # sparse field should be created at the end, as it depends on its serialized field already existing
391 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
392 for (k, f) in model_fields:
394 'model_id': model_id,
397 'field_description': f.string,
399 'relation': f._obj or '',
400 'select_level': tools.ustr(int(f.select)),
401 'readonly': (f.readonly and 1) or 0,
402 'required': (f.required and 1) or 0,
403 'selectable': (f.selectable and 1) or 0,
404 'translate': (f.translate and 1) or 0,
405 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
406 'serialization_field_id': None,
408 if getattr(f, 'serialization_field', None):
409 # resolve link to serialization_field if specified by name
410 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
411 if not serialization_field_id:
412 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
413 vals['serialization_field_id'] = serialization_field_id[0]
415 # When its a custom field,it does not contain f.select
416 if context.get('field_state', 'base') == 'manual':
417 if context.get('field_name', '') == k:
418 vals['select_level'] = context.get('select', '0')
419 #setting value to let the problem NOT occur next time
421 vals['select_level'] = cols[k]['select_level']
424 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
425 id = cr.fetchone()[0]
427 cr.execute("""INSERT INTO ir_model_fields (
428 id, model_id, model, name, field_description, ttype,
429 relation,state,select_level,relation_field, translate, serialization_field_id
431 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
433 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
434 vals['relation'], 'base',
435 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
437 if 'module' in context:
438 name1 = 'field_' + self._table + '_' + k
439 cr.execute("select name from ir_model_data where name=%s", (name1,))
441 name1 = name1 + "_" + str(id)
442 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
443 (name1, context['module'], 'ir.model.fields', id)
446 for key, val in vals.items():
447 if cols[k][key] != vals[key]:
448 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
449 cr.execute("""UPDATE ir_model_fields SET
450 model_id=%s, field_description=%s, ttype=%s, relation=%s,
451 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
453 model=%s AND name=%s""", (
454 vals['model_id'], vals['field_description'], vals['ttype'],
456 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
459 self.invalidate_cache(cr, SUPERUSER_ID)
462 def _add_field(cls, name, field):
463 """ Add the given `field` under the given `name` in the class """
464 field.set_class_name(cls, name)
466 # add field in _fields (for reflection)
467 cls._fields[name] = field
469 # add field as an attribute, unless another kind of value already exists
470 if isinstance(getattr(cls, name, field), Field):
471 setattr(cls, name, field)
473 _logger.warning("In model %r, member %r is not a field", cls._name, name)
476 cls._columns[name] = field.to_column()
478 # remove potential column that may be overridden by field
479 cls._columns.pop(name, None)
482 def _pop_field(cls, name):
483 """ Remove the field with the given `name` from the model.
484 This method should only be used for manual fields.
486 field = cls._fields.pop(name)
487 cls._columns.pop(name, None)
488 cls._all_columns.pop(name, None)
489 if hasattr(cls, name):
494 def _add_magic_fields(cls):
495 """ Introduce magic fields on the current class
497 * id is a "normal" field (with a specific getter)
498 * create_uid, create_date, write_uid and write_date have become
500 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
501 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
502 to get the same structure as the previous
503 ``(now() at time zone 'UTC')::timestamp``::
505 # select (now() at time zone 'UTC')::timestamp;
507 ----------------------------
508 2013-06-18 08:30:37.292809
510 >>> str(datetime.datetime.utcnow())
511 '2013-06-18 08:31:32.821177'
513 def add(name, field):
514 """ add `field` with the given `name` if it does not exist yet """
515 if name not in cls._columns and name not in cls._fields:
516 cls._add_field(name, field)
521 # this field 'id' must override any other column or field
522 cls._add_field('id', fields.Id(automatic=True))
524 add('display_name', fields.Char(string='Display Name', automatic=True,
525 compute='_compute_display_name'))
528 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
529 add('create_date', fields.Datetime(string='Created on', automatic=True))
530 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
531 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
532 last_modified_name = 'compute_concurrency_field_with_access'
534 last_modified_name = 'compute_concurrency_field'
536 # this field must override any other column or field
537 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
538 string='Last Modified on', compute=last_modified_name, automatic=True))
541 def compute_concurrency_field(self):
542 self[self.CONCURRENCY_CHECK_FIELD] = \
543 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
546 @api.depends('create_date', 'write_date')
547 def compute_concurrency_field_with_access(self):
548 self[self.CONCURRENCY_CHECK_FIELD] = \
549 self.write_date or self.create_date or \
550 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
553 # Goal: try to apply inheritance at the instanciation level and
554 # put objects in the pool var
557 def _build_model(cls, pool, cr):
558 """ Instanciate a given model.
560 This class method instanciates the class of some model (i.e. a class
561 deriving from osv or osv_memory). The class might be the class passed
562 in argument or, if it inherits from another class, a class constructed
563 by combining the two classes.
567 # IMPORTANT: the registry contains an instance for each model. The class
568 # of each model carries inferred metadata that is shared among the
569 # model's instances for this registry, but not among registries. Hence
570 # we cannot use that "registry class" for combining model classes by
571 # inheritance, since it confuses the metadata inference process.
573 # Keep links to non-inherited constraints in cls; this is useful for
574 # instance when exporting translations
575 cls._local_constraints = cls.__dict__.get('_constraints', [])
576 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
578 # determine inherited models
579 parents = getattr(cls, '_inherit', [])
580 parents = [parents] if isinstance(parents, basestring) else (parents or [])
582 # determine the model's name
583 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
585 # determine the module that introduced the model
586 original_module = pool[name]._original_module if name in parents else cls._module
588 # build the class hierarchy for the model
589 for parent in parents:
590 if parent not in pool:
591 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
592 'You may need to add a dependency on the parent class\' module.' % (name, parent))
593 parent_model = pool[parent]
595 # do no use the class of parent_model, since that class contains
596 # inferred metadata; use its ancestor instead
597 parent_class = type(parent_model).__base__
599 # don't inherit custom fields
600 columns = dict((key, val)
601 for key, val in parent_class._columns.iteritems()
604 columns.update(cls._columns)
606 inherits = dict(parent_class._inherits)
607 inherits.update(cls._inherits)
609 depends = dict(parent_class._depends)
610 for m, fs in cls._depends.iteritems():
611 depends[m] = depends.get(m, []) + fs
613 old_constraints = parent_class._constraints
614 new_constraints = cls._constraints
615 # filter out from old_constraints the ones overridden by a
616 # constraint with the same function name in new_constraints
617 constraints = new_constraints + [oldc
618 for oldc in old_constraints
619 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
620 for newc in new_constraints)
623 sql_constraints = cls._sql_constraints + \
624 parent_class._sql_constraints
630 '_inherits': inherits,
632 '_constraints': constraints,
633 '_sql_constraints': sql_constraints,
635 cls = type(name, (cls, parent_class), attrs)
637 # introduce the "registry class" of the model;
638 # duplicate some attributes so that the ORM can modify them
642 '_columns': dict(cls._columns),
643 '_defaults': {}, # filled by Field._determine_default()
644 '_inherits': dict(cls._inherits),
645 '_depends': dict(cls._depends),
646 '_constraints': list(cls._constraints),
647 '_sql_constraints': list(cls._sql_constraints),
648 '_original_module': original_module,
650 cls = type(cls._name, (cls,), attrs)
652 # instantiate the model, and initialize it
653 model = object.__new__(cls)
654 model.__init__(pool, cr)
658 def _init_function_fields(cls, pool, cr):
659 # initialize the list of non-stored function fields for this model
660 pool._pure_function_fields[cls._name] = []
662 # process store of low-level function fields
663 for fname, column in cls._columns.iteritems():
664 if hasattr(column, 'digits_change'):
665 column.digits_change(cr)
666 # filter out existing store about this field
667 pool._store_function[cls._name] = [
669 for stored in pool._store_function.get(cls._name, [])
670 if (stored[0], stored[1]) != (cls._name, fname)
672 if not isinstance(column, fields.function):
675 # register it on the pool for invalidation
676 pool._pure_function_fields[cls._name].append(fname)
678 # process store parameter
681 get_ids = lambda self, cr, uid, ids, c={}: ids
682 store = {cls._name: (get_ids, None, column.priority, None)}
683 for model, spec in store.iteritems():
685 (fnct, fields2, order, length) = spec
687 (fnct, fields2, order) = spec
690 raise except_orm('Error',
691 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
692 pool._store_function.setdefault(model, [])
693 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
694 if t not in pool._store_function[model]:
695 pool._store_function[model].append(t)
696 pool._store_function[model].sort(key=lambda x: x[4])
699 def _init_manual_fields(cls, pool, cr):
700 # Check whether the query is already done
701 if pool.fields_by_model is not None:
702 manual_fields = pool.fields_by_model.get(cls._name, [])
704 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
705 manual_fields = cr.dictfetchall()
707 for field in manual_fields:
708 if field['name'] in cls._columns:
711 'string': field['field_description'],
712 'required': bool(field['required']),
713 'readonly': bool(field['readonly']),
714 'domain': eval(field['domain']) if field['domain'] else None,
715 'size': field['size'] or None,
716 'ondelete': field['on_delete'],
717 'translate': (field['translate']),
720 #'select': int(field['select_level'])
722 if field['serialization_field_id']:
723 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
724 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
725 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
726 attrs.update({'relation': field['relation']})
727 cls._columns[field['name']] = fields.sparse(**attrs)
728 elif field['ttype'] == 'selection':
729 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
730 elif field['ttype'] == 'reference':
731 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
732 elif field['ttype'] == 'many2one':
733 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
734 elif field['ttype'] == 'one2many':
735 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
736 elif field['ttype'] == 'many2many':
737 _rel1 = field['relation'].replace('.', '_')
738 _rel2 = field['model'].replace('.', '_')
739 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
740 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
742 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
745 def _init_constraints_onchanges(cls):
746 # store sql constraint error messages
747 for (key, _, msg) in cls._sql_constraints:
748 cls.pool._sql_error[cls._table + '_' + key] = msg
750 # collect constraint and onchange methods
751 cls._constraint_methods = []
752 cls._onchange_methods = defaultdict(list)
753 for attr, func in getmembers(cls, callable):
754 if hasattr(func, '_constrains'):
755 if not all(name in cls._fields for name in func._constrains):
756 _logger.warning("@constrains%r parameters must be field names", func._constrains)
757 cls._constraint_methods.append(func)
758 if hasattr(func, '_onchange'):
759 if not all(name in cls._fields for name in func._onchange):
760 _logger.warning("@onchange%r parameters must be field names", func._onchange)
761 for name in func._onchange:
762 cls._onchange_methods[name].append(func)
765 # In the past, this method was registering the model class in the server.
766 # This job is now done entirely by the metaclass MetaModel.
768 # Do not create an instance here. Model instances are created by method
772 def __init__(self, pool, cr):
773 """ Initialize a model and make it part of the given registry.
775 - copy the stored fields' functions in the registry,
776 - retrieve custom fields and add them in the model,
777 - ensure there is a many2one for each _inherits'd parent,
778 - update the children's _columns,
779 - give a chance to each field to initialize itself.
784 # link the class to the registry, and update the registry
786 cls._model = self # backward compatibility
787 pool.add(cls._name, self)
789 # determine description, table, sequence and log_access
790 if not cls._description:
791 cls._description = cls._name
793 cls._table = cls._name.replace('.', '_')
794 if not cls._sequence:
795 cls._sequence = cls._table + '_id_seq'
796 if not hasattr(cls, '_log_access'):
797 # If _log_access is not specified, it is the same value as _auto.
798 cls._log_access = cls._auto
801 if cls.is_transient():
802 cls._transient_check_count = 0
803 cls._transient_max_count = config.get('osv_memory_count_limit')
804 cls._transient_max_hours = config.get('osv_memory_age_limit')
805 assert cls._log_access, \
806 "TransientModels must have log_access turned on, " \
807 "in order to implement their access rights policy"
809 # retrieve new-style fields and duplicate them (to avoid clashes with
810 # inheritance between different models)
812 for attr, field in getmembers(cls, Field.__instancecheck__):
813 if not field.inherited:
814 cls._add_field(attr, field.new())
816 # introduce magic fields
817 cls._add_magic_fields()
819 # register stuff about low-level function fields and custom fields
820 cls._init_function_fields(pool, cr)
821 cls._init_manual_fields(pool, cr)
824 cls._inherits_check()
825 cls._inherits_reload()
827 # register constraints and onchange methods
828 cls._init_constraints_onchanges()
831 for k in cls._defaults:
832 assert k in cls._fields, \
833 "Model %s has a default for nonexiting field %s" % (cls._name, k)
836 for column in cls._columns.itervalues():
841 assert cls._rec_name in cls._fields, \
842 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
843 elif 'name' in cls._fields:
844 cls._rec_name = 'name'
846 # prepare ormcache, which must be shared by all instances of the model
851 def _is_an_ordinary_table(self):
852 self.env.cr.execute("""\
856 AND relkind = %s""", [self._table, 'r'])
857 return bool(self.env.cr.fetchone())
859 def __export_xml_id(self):
860 """ Return a valid xml_id for the record `self`. """
861 if not self._is_an_ordinary_table():
863 "You can not export the column ID of model %s, because the "
864 "table %s is not an ordinary table."
865 % (self._name, self._table))
866 ir_model_data = self.sudo().env['ir.model.data']
867 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
870 return '%s.%s' % (data[0].module, data[0].name)
875 name = '%s_%s' % (self._table, self.id)
876 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
878 name = '%s_%s_%s' % (self._table, self.id, postfix)
879 ir_model_data.create({
882 'module': '__export__',
885 return '__export__.' + name
888 def __export_rows(self, fields):
889 """ Export fields of the records in `self`.
891 :param fields: list of lists of fields to traverse
892 :return: list of lists of corresponding values
896 # main line of record, initially empty
897 current = [''] * len(fields)
898 lines.append(current)
900 # list of primary fields followed by secondary field(s)
903 # process column by column
904 for i, path in enumerate(fields):
909 if name in primary_done:
913 current[i] = str(record.id)
915 current[i] = record.__export_xml_id()
917 field = record._fields[name]
920 # this part could be simpler, but it has to be done this way
921 # in order to reproduce the former behavior
922 if not isinstance(value, BaseModel):
923 current[i] = field.convert_to_export(value, self.env)
925 primary_done.append(name)
927 # This is a special case, its strange behavior is intended!
928 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
929 xml_ids = [r.__export_xml_id() for r in value]
930 current[i] = ','.join(xml_ids) or False
933 # recursively export the fields that follow name
934 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
935 lines2 = value.__export_rows(fields2)
937 # merge first line with record's main line
938 for j, val in enumerate(lines2[0]):
941 # check value of current field
943 # assign xml_ids, and forget about remaining lines
944 xml_ids = [item[1] for item in value.name_get()]
945 current[i] = ','.join(xml_ids)
947 # append the other lines at the end
955 def export_data(self, fields_to_export, raw_data=False):
956 """ Export fields for selected objects
958 :param fields_to_export: list of fields
959 :param raw_data: True to return value in native Python type
960 :rtype: dictionary with a *datas* matrix
962 This method is used when exporting data via client menu
964 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
966 self = self.with_context(export_raw_data=True)
967 return {'datas': self.__export_rows(fields_to_export)}
969 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
972 Use :meth:`~load` instead
974 Import given data in given module
976 This method is used when importing data via client menu.
978 Example of fields to import for a sale.order::
981 partner_id, (=name_search)
982 order_line/.id, (=database_id)
984 order_line/product_id/id, (=xml id)
985 order_line/price_unit,
986 order_line/product_uom_qty,
987 order_line/product_uom/id (=xml_id)
989 This method returns a 4-tuple with the following structure::
991 (return_code, errored_resource, error_message, unused)
993 * The first item is a return code, it is ``-1`` in case of
994 import error, or the last imported row number in case of success
995 * The second item contains the record data dict that failed to import
996 in case of error, otherwise it's 0
997 * The third item contains an error message string in case of error,
999 * The last item is currently unused, with no specific semantics
1001 :param fields: list of fields to import
1002 :param datas: data to import
1003 :param mode: 'init' or 'update' for record creation
1004 :param current_module: module name
1005 :param noupdate: flag for record creation
1006 :param filename: optional file to store partial import state for recovery
1007 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1008 :rtype: (int, dict or 0, str or 0, str or 0)
1010 context = dict(context) if context is not None else {}
1011 context['_import_current_module'] = current_module
1013 fields = map(fix_import_export_id_paths, fields)
1014 ir_model_data_obj = self.pool.get('ir.model.data')
1017 if m['type'] == 'error':
1018 raise Exception(m['message'])
1020 if config.get('import_partial') and filename:
1021 with open(config.get('import_partial'), 'rb') as partial_import_file:
1022 data = pickle.load(partial_import_file)
1023 position = data.get(filename, 0)
1027 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1028 self._extract_records(cr, uid, fields, datas,
1029 context=context, log=log),
1030 context=context, log=log):
1031 ir_model_data_obj._update(cr, uid, self._name,
1032 current_module, res, mode=mode, xml_id=xml_id,
1033 noupdate=noupdate, res_id=res_id, context=context)
1034 position = info.get('rows', {}).get('to', 0) + 1
1035 if config.get('import_partial') and filename and (not (position%100)):
1036 with open(config.get('import_partial'), 'rb') as partial_import:
1037 data = pickle.load(partial_import)
1038 data[filename] = position
1039 with open(config.get('import_partial'), 'wb') as partial_import:
1040 pickle.dump(data, partial_import)
1041 if context.get('defer_parent_store_computation'):
1042 self._parent_store_compute(cr)
1044 except Exception, e:
1046 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1048 if context.get('defer_parent_store_computation'):
1049 self._parent_store_compute(cr)
1050 return position, 0, 0, 0
1052 def load(self, cr, uid, fields, data, context=None):
1054 Attempts to load the data matrix, and returns a list of ids (or
1055 ``False`` if there was an error and no id could be generated) and a
1058 The ids are those of the records created and saved (in database), in
1059 the same order they were extracted from the file. They can be passed
1060 directly to :meth:`~read`
1062 :param fields: list of fields to import, at the same index as the corresponding data
1063 :type fields: list(str)
1064 :param data: row-major matrix of data to import
1065 :type data: list(list(str))
1066 :param dict context:
1067 :returns: {ids: list(int)|False, messages: [Message]}
1069 cr.execute('SAVEPOINT model_load')
1072 fields = map(fix_import_export_id_paths, fields)
1073 ModelData = self.pool['ir.model.data'].clear_caches()
1075 fg = self.fields_get(cr, uid, context=context)
1082 for id, xid, record, info in self._convert_records(cr, uid,
1083 self._extract_records(cr, uid, fields, data,
1084 context=context, log=messages.append),
1085 context=context, log=messages.append):
1087 cr.execute('SAVEPOINT model_load_save')
1088 except psycopg2.InternalError, e:
1089 # broken transaction, exit and hope the source error was
1091 if not any(message['type'] == 'error' for message in messages):
1092 messages.append(dict(info, type='error',message=
1093 u"Unknown database error: '%s'" % e))
1096 ids.append(ModelData._update(cr, uid, self._name,
1097 current_module, record, mode=mode, xml_id=xid,
1098 noupdate=noupdate, res_id=id, context=context))
1099 cr.execute('RELEASE SAVEPOINT model_load_save')
1100 except psycopg2.Warning, e:
1101 messages.append(dict(info, type='warning', message=str(e)))
1102 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1103 except psycopg2.Error, e:
1104 messages.append(dict(
1106 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1107 # Failed to write, log to messages, rollback savepoint (to
1108 # avoid broken transaction) and keep going
1109 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1110 except Exception, e:
1111 message = (_('Unknown error during import:') +
1112 ' %s: %s' % (type(e), unicode(e)))
1113 moreinfo = _('Resolve other errors first')
1114 messages.append(dict(info, type='error',
1117 # Failed for some reason, perhaps due to invalid data supplied,
1118 # rollback savepoint and keep going
1119 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1120 if any(message['type'] == 'error' for message in messages):
1121 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1123 return {'ids': ids, 'messages': messages}
1125 def _extract_records(self, cr, uid, fields_, data,
1126 context=None, log=lambda a: None):
1127 """ Generates record dicts from the data sequence.
1129 The result is a generator of dicts mapping field names to raw
1130 (unconverted, unvalidated) values.
1132 For relational fields, if sub-fields were provided the value will be
1133 a list of sub-records
1135 The following sub-fields may be set on the record (by key):
1136 * None is the name_get for the record (to use with name_create/name_search)
1137 * "id" is the External ID for the record
1138 * ".id" is the Database ID for the record
1140 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1141 # Fake columns to avoid special cases in extractor
1142 columns[None] = fields.char('rec_name')
1143 columns['id'] = fields.char('External ID')
1144 columns['.id'] = fields.integer('Database ID')
1146 # m2o fields can't be on multiple lines so exclude them from the
1147 # is_relational field rows filter, but special-case it later on to
1148 # be handled with relational fields (as it can have subfields)
1149 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1150 get_o2m_values = itemgetter_tuple(
1151 [index for index, field in enumerate(fields_)
1152 if columns[field[0]]._type == 'one2many'])
1153 get_nono2m_values = itemgetter_tuple(
1154 [index for index, field in enumerate(fields_)
1155 if columns[field[0]]._type != 'one2many'])
1156 # Checks if the provided row has any non-empty non-relational field
1157 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1158 return any(g(row)) and not any(f(row))
1162 if index >= len(data): return
1165 # copy non-relational fields to record dict
1166 record = dict((field[0], value)
1167 for field, value in itertools.izip(fields_, row)
1168 if not is_relational(field[0]))
1170 # Get all following rows which have relational values attached to
1171 # the current record (no non-relational values)
1172 record_span = itertools.takewhile(
1173 only_o2m_values, itertools.islice(data, index + 1, None))
1174 # stitch record row back on for relational fields
1175 record_span = list(itertools.chain([row], record_span))
1176 for relfield in set(
1177 field[0] for field in fields_
1178 if is_relational(field[0])):
1179 column = columns[relfield]
1180 # FIXME: how to not use _obj without relying on fields_get?
1181 Model = self.pool[column._obj]
1183 # get only cells for this sub-field, should be strictly
1184 # non-empty, field path [None] is for name_get column
1185 indices, subfields = zip(*((index, field[1:] or [None])
1186 for index, field in enumerate(fields_)
1187 if field[0] == relfield))
1189 # return all rows which have at least one value for the
1190 # subfields of relfield
1191 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1192 record[relfield] = [subrecord
1193 for subrecord, _subinfo in Model._extract_records(
1194 cr, uid, subfields, relfield_data,
1195 context=context, log=log)]
1197 yield record, {'rows': {
1199 'to': index + len(record_span) - 1
1201 index += len(record_span)
1203 def _convert_records(self, cr, uid, records,
1204 context=None, log=lambda a: None):
1205 """ Converts records from the source iterable (recursive dicts of
1206 strings) into forms which can be written to the database (via
1207 self.create or (ir.model.data)._update)
1209 :returns: a list of triplets of (id, xid, record)
1210 :rtype: list((int|None, str|None, dict))
1212 if context is None: context = {}
1213 Converter = self.pool['ir.fields.converter']
1214 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1215 Translation = self.pool['ir.translation']
1217 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1218 context.get('lang'))
1220 for f, column in columns.iteritems())
1222 convert = Converter.for_model(cr, uid, self, context=context)
1224 def _log(base, field, exception):
1225 type = 'warning' if isinstance(exception, Warning) else 'error'
1226 # logs the logical (not human-readable) field name for automated
1227 # processing of response, but injects human readable in message
1228 record = dict(base, type=type, field=field,
1229 message=unicode(exception.args[0]) % base)
1230 if len(exception.args) > 1 and exception.args[1]:
1231 record.update(exception.args[1])
1234 stream = CountingStream(records)
1235 for record, extras in stream:
1238 # name_get/name_create
1239 if None in record: pass
1246 dbid = int(record['.id'])
1248 # in case of overridden id column
1249 dbid = record['.id']
1250 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1253 record=stream.index,
1255 message=_(u"Unknown database identifier '%s'") % dbid))
1258 converted = convert(record, lambda field, err:\
1259 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1261 yield dbid, xid, converted, dict(extras, record=stream.index)
1264 def _validate_fields(self, field_names):
1265 field_names = set(field_names)
1267 # old-style constraint methods
1268 trans = self.env['ir.translation']
1269 cr, uid, context = self.env.args
1272 for fun, msg, names in self._constraints:
1274 # validation must be context-independent; call `fun` without context
1275 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1277 except Exception, e:
1278 _logger.debug('Exception while validating constraint', exc_info=True)
1280 extra_error = tools.ustr(e)
1283 res_msg = msg(self._model, cr, uid, ids, context=context)
1284 if isinstance(res_msg, tuple):
1285 template, params = res_msg
1286 res_msg = template % params
1288 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1290 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1292 _("Field(s) `%s` failed against a constraint: %s") %
1293 (', '.join(names), res_msg)
1296 raise ValidationError('\n'.join(errors))
1298 # new-style constraint methods
1299 for check in self._constraint_methods:
1300 if set(check._constrains) & field_names:
1303 except ValidationError, e:
1305 except Exception, e:
1306 raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
1308 def default_get(self, cr, uid, fields_list, context=None):
1309 """ default_get(fields) -> default_values
1311 Return default values for the fields in `fields_list`. Default
1312 values are determined by the context, user defaults, and the model
1315 :param fields_list: a list of field names
1316 :return: a dictionary mapping each field name to its corresponding
1317 default value; the keys of the dictionary are the fields in
1318 `fields_list` that have a default value different from ``False``.
1320 This method should not be overridden. In order to change the
1321 mechanism for determining default values, you should override method
1322 :meth:`add_default_value` instead.
1324 # trigger view init hook
1325 self.view_init(cr, uid, fields_list, context)
1327 # use a new record to determine default values; evaluate fields on the
1328 # new record and put default values in result
1329 record = self.new(cr, uid, {}, context=context)
1331 for name in fields_list:
1332 if name in self._fields:
1333 value = record[name]
1334 if name in record._cache:
1335 result[name] = value # it really is a default value
1337 # convert default values to the expected format
1338 result = self._convert_to_write(result)
1341 def add_default_value(self, field):
1342 """ Set the default value of `field` to the new record `self`.
1343 The value must be assigned to `self`.
1345 assert not self.id, "Expected new record: %s" % self
1346 cr, uid, context = self.env.args
1349 # 1. look up context
1350 key = 'default_' + name
1352 self[name] = context[key]
1355 # 2. look up ir_values
1356 # Note: performance is good, because get_defaults_dict is cached!
1357 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1358 if name in ir_values_dict:
1359 self[name] = ir_values_dict[name]
1362 # 3. look up property fields
1363 # TODO: get rid of this one
1364 column = self._columns.get(name)
1365 if isinstance(column, fields.property):
1366 self[name] = self.env['ir.property'].get(name, self._name)
1369 # 4. delegate to field
1370 field.determine_default(self)
1372 def fields_get_keys(self, cr, user, context=None):
1373 res = self._columns.keys()
1374 # TODO I believe this loop can be replace by
1375 # res.extend(self._inherit_fields.key())
1376 for parent in self._inherits:
1377 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1380 def _rec_name_fallback(self, cr, uid, context=None):
1381 rec_name = self._rec_name
1382 if rec_name not in self._columns:
1383 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1387 # Overload this method if you need a window title which depends on the context
1389 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1392 def user_has_groups(self, cr, uid, groups, context=None):
1393 """Return true if the user is at least member of one of the groups
1394 in groups_str. Typically used to resolve `groups` attribute
1395 in view and model definitions.
1397 :param str groups: comma-separated list of fully-qualified group
1398 external IDs, e.g.: ``base.group_user,base.group_system``
1399 :return: True if the current user is a member of one of the
1402 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1403 for group_ext_id in groups.split(','))
1405 def _get_default_form_view(self, cr, user, context=None):
1406 """ Generates a default single-line form view using all fields
1407 of the current model except the m2m and o2m ones.
1409 :param cr: database cursor
1410 :param int user: user id
1411 :param dict context: connection context
1412 :returns: a form view as an lxml document
1413 :rtype: etree._Element
1415 view = etree.Element('form', string=self._description)
1416 group = etree.SubElement(view, 'group', col="4")
1417 for fname, field in self._fields.iteritems():
1418 if field.automatic or field.type in ('one2many', 'many2many'):
1421 etree.SubElement(group, 'field', name=fname)
1422 if field.type == 'text':
1423 etree.SubElement(group, 'newline')
1426 def _get_default_search_view(self, cr, user, context=None):
1427 """ Generates a single-field search view, based on _rec_name.
1429 :param cr: database cursor
1430 :param int user: user id
1431 :param dict context: connection context
1432 :returns: a tree view as an lxml document
1433 :rtype: etree._Element
1435 view = etree.Element('search', string=self._description)
1436 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1439 def _get_default_tree_view(self, cr, user, context=None):
1440 """ Generates a single-field tree view, based on _rec_name.
1442 :param cr: database cursor
1443 :param int user: user id
1444 :param dict context: connection context
1445 :returns: a tree view as an lxml document
1446 :rtype: etree._Element
1448 view = etree.Element('tree', string=self._description)
1449 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1452 def _get_default_calendar_view(self, cr, user, context=None):
1453 """ Generates a default calendar view by trying to infer
1454 calendar fields from a number of pre-set attribute names
1456 :param cr: database cursor
1457 :param int user: user id
1458 :param dict context: connection context
1459 :returns: a calendar view
1460 :rtype: etree._Element
1462 def set_first_of(seq, in_, to):
1463 """Sets the first value of `seq` also found in `in_` to
1464 the `to` attribute of the view being closed over.
1466 Returns whether it's found a suitable value (and set it on
1467 the attribute) or not
1475 view = etree.Element('calendar', string=self._description)
1476 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1478 if self._date_name not in self._columns:
1480 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1481 if dt in self._columns:
1482 self._date_name = dt
1487 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1488 view.set('date_start', self._date_name)
1490 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1491 self._columns, 'color')
1493 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1494 self._columns, 'date_stop'):
1495 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1496 self._columns, 'date_delay'):
1498 _('Invalid Object Architecture!'),
1499 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1503 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1504 """ fields_view_get([view_id | view_type='form'])
1506 Get the detailed composition of the requested view like fields, model, view architecture
1508 :param view_id: id of the view or None
1509 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1510 :param toolbar: true to include contextual actions
1511 :param submenu: deprecated
1512 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1513 :raise AttributeError:
1514 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1515 * if some tag other than 'position' is found in parent view
1516 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1520 View = self.pool['ir.ui.view']
1523 'model': self._name,
1524 'field_parent': False,
1527 # try to find a view_id if none provided
1529 # <view_type>_view_ref in context can be used to overrride the default view
1530 view_ref_key = view_type + '_view_ref'
1531 view_ref = context.get(view_ref_key)
1534 module, view_ref = view_ref.split('.', 1)
1535 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1536 view_ref_res = cr.fetchone()
1538 view_id = view_ref_res[0]
1540 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1541 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1545 # otherwise try to find the lowest priority matching ir.ui.view
1546 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1548 # context for post-processing might be overriden
1551 # read the view with inherited views applied
1552 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1553 result['arch'] = root_view['arch']
1554 result['name'] = root_view['name']
1555 result['type'] = root_view['type']
1556 result['view_id'] = root_view['id']
1557 result['field_parent'] = root_view['field_parent']
1558 # override context fro postprocessing
1559 if root_view.get('model') != self._name:
1560 ctx = dict(context, base_model_name=root_view.get('model'))
1562 # fallback on default views methods if no ir.ui.view could be found
1564 get_func = getattr(self, '_get_default_%s_view' % view_type)
1565 arch_etree = get_func(cr, uid, context)
1566 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1567 result['type'] = view_type
1568 result['name'] = 'default'
1569 except AttributeError:
1570 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1572 # Apply post processing, groups and modifiers etc...
1573 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1574 result['arch'] = xarch
1575 result['fields'] = xfields
1577 # Add related action information if aksed
1579 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1585 ir_values_obj = self.pool.get('ir.values')
1586 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1587 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1588 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1589 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1590 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1591 #When multi="True" set it will display only in More of the list view
1592 resrelate = [clean(action) for action in resrelate
1593 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1595 for x in itertools.chain(resprint, resaction, resrelate):
1596 x['string'] = x['name']
1598 result['toolbar'] = {
1600 'action': resaction,
1605 def get_formview_id(self, cr, uid, id, context=None):
1606 """ Return an view id to open the document with. This method is meant to be
1607 overridden in addons that want to give specific view ids for example.
1609 :param int id: id of the document to open
1613 def get_formview_action(self, cr, uid, id, context=None):
1614 """ Return an action to open the document. This method is meant to be
1615 overridden in addons that want to give specific view ids for example.
1617 :param int id: id of the document to open
1619 view_id = self.get_formview_id(cr, uid, id, context=context)
1621 'type': 'ir.actions.act_window',
1622 'res_model': self._name,
1623 'view_type': 'form',
1624 'view_mode': 'form',
1625 'views': [(view_id, 'form')],
1626 'target': 'current',
1630 def get_access_action(self, cr, uid, id, context=None):
1631 """ Return an action to open the document. This method is meant to be
1632 overridden in addons that want to give specific access to the document.
1633 By default it opens the formview of the document.
1635 :paramt int id: id of the document to open
1637 return self.get_formview_action(cr, uid, id, context=context)
1639 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1640 return self.pool['ir.ui.view'].postprocess_and_fields(
1641 cr, uid, self._name, node, view_id, context=context)
1643 def search_count(self, cr, user, args, context=None):
1644 """ search_count(args) -> int
1646 Returns the number of records in the current model matching :ref:`the
1647 provided domain <reference/orm/domains>`.
1649 res = self.search(cr, user, args, context=context, count=True)
1650 if isinstance(res, list):
1654 @api.returns('self')
1655 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1656 """ search(args[, offset=0][, limit=None][, order=None][, count=False])
1658 Searches for records based on the ``args``
1659 :ref:`search domain <reference/orm/domains>`.
1661 :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
1662 list to match all records.
1663 :param int offset: number of results to ignore (default: none)
1664 :param int limit: maximum number of records to return (default: all)
1665 :param str order: sort string
1666 :param bool count: if ``True``, the call should return the number of
1667 records matching ``args`` rather than the records
1669 :returns: at most ``limit`` records matching the search criteria
1671 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1673 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1676 # display_name, name_get, name_create, name_search
1679 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1680 def _compute_display_name(self):
1681 names = dict(self.name_get())
1683 record.display_name = names.get(record.id, False)
1687 """ name_get() -> [(id, name), ...]
1689 Returns a textual representation for the records in ``self``.
1690 By default this is the value of the ``display_name`` field.
1692 :return: list of pairs ``(id, text_repr)`` for each records
1696 name = self._rec_name
1697 if name in self._fields:
1698 convert = self._fields[name].convert_to_display_name
1700 result.append((record.id, convert(record[name])))
1703 result.append((record.id, "%s,%s" % (record._name, record.id)))
1708 def name_create(self, name):
1709 """ name_create(name) -> record
1711 Create a new record by calling :meth:`~.create` with only one value
1712 provided: the display name of the new record.
1714 The new record will be initialized with any default values
1715 applicable to this model, or provided through the context. The usual
1716 behavior of :meth:`~.create` applies.
1718 :param name: display name of the record to create
1720 :return: the :meth:`~.name_get` pair value of the created record
1723 record = self.create({self._rec_name: name})
1724 return record.name_get()[0]
1726 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1730 def name_search(self, name='', args=None, operator='ilike', limit=100):
1731 """ name_search(name='', args=None, operator='ilike', limit=100) -> records
1733 Search for records that have a display name matching the given
1734 `name` pattern when compared with the given `operator`, while also
1735 matching the optional search domain (`args`).
1737 This is used for example to provide suggestions based on a partial
1738 value for a relational field. Sometimes be seen as the inverse
1739 function of :meth:`~.name_get`, but it is not guaranteed to be.
1741 This method is equivalent to calling :meth:`~.search` with a search
1742 domain based on ``display_name`` and then :meth:`~.name_get` on the
1743 result of the search.
1745 :param str name: the name pattern to match
1746 :param list args: optional search domain (see :meth:`~.search` for
1747 syntax), specifying further restrictions
1748 :param str operator: domain operator for matching `name`, such as
1749 ``'like'`` or ``'='``.
1750 :param int limit: optional max number of records to return
1752 :return: list of pairs ``(id, text_repr)`` for all matching records.
1754 return self._name_search(name, args, operator, limit=limit)
1756 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1757 # private implementation of name_search, allows passing a dedicated user
1758 # for the name_get part to solve some access rights issues
1759 args = list(args or [])
1760 # optimize out the default criterion of ``ilike ''`` that matches everything
1761 if not self._rec_name:
1762 _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
1763 elif not (name == '' and operator == 'ilike'):
1764 args += [(self._rec_name, operator, name)]
1765 access_rights_uid = name_get_uid or user
1766 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1767 res = self.name_get(cr, access_rights_uid, ids, context)
1770 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1773 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1775 fields = self._columns.keys() + self._inherit_fields.keys()
1776 #FIXME: collect all calls to _get_source into one SQL call.
1778 res[lang] = {'code': lang}
1780 if f in self._columns:
1781 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1783 res[lang][f] = res_trans
1785 res[lang][f] = self._columns[f].string
1786 for table in self._inherits:
1787 cols = intersect(self._inherit_fields.keys(), fields)
1788 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1791 res[lang]['code'] = lang
1792 for f in res2[lang]:
1793 res[lang][f] = res2[lang][f]
1796 def write_string(self, cr, uid, id, langs, vals, context=None):
1797 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1798 #FIXME: try to only call the translation in one SQL
1801 if field in self._columns:
1802 src = self._columns[field].string
1803 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1804 for table in self._inherits:
1805 cols = intersect(self._inherit_fields.keys(), vals)
1807 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1810 def _add_missing_default_values(self, cr, uid, values, context=None):
1811 # avoid overriding inherited values when parent is set
1813 for tables, parent_field in self._inherits.items():
1814 if parent_field in values:
1815 avoid_tables.append(tables)
1817 # compute missing fields
1818 missing_defaults = set()
1819 for field in self._columns.keys():
1820 if not field in values:
1821 missing_defaults.add(field)
1822 for field in self._inherit_fields.keys():
1823 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1824 missing_defaults.add(field)
1825 # discard magic fields
1826 missing_defaults -= set(MAGIC_COLUMNS)
1828 if missing_defaults:
1829 # override defaults with the provided values, never allow the other way around
1830 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1832 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1833 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1834 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1835 defaults[dv] = [(6, 0, defaults[dv])]
1836 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1837 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1838 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1839 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1840 defaults.update(values)
1844 def clear_caches(self):
1845 """ Clear the caches
1847 This clears the caches associated to methods decorated with
1848 ``tools.ormcache`` or ``tools.ormcache_multi``.
1851 self._ormcache.clear()
1852 self.pool._any_cache_cleared = True
1853 except AttributeError:
1857 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
1858 aggregated_fields, count_field,
1859 read_group_result, read_group_order=None, context=None):
1860 """Helper method for filling in empty groups for all possible values of
1861 the field being grouped by"""
1863 # self._group_by_full should map groupable fields to a method that returns
1864 # a list of all aggregated values that we want to display for this field,
1865 # in the form of a m2o-like pair (key,label).
1866 # This is useful to implement kanban views for instance, where all columns
1867 # should be displayed even if they don't contain any record.
1869 # Grab the list of all groups that should be displayed, including all present groups
1870 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1871 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1872 read_group_order=read_group_order,
1873 access_rights_uid=openerp.SUPERUSER_ID,
1876 result_template = dict.fromkeys(aggregated_fields, False)
1877 result_template[groupby + '_count'] = 0
1878 if remaining_groupbys:
1879 result_template['__context'] = {'group_by': remaining_groupbys}
1881 # Merge the left_side (current results as dicts) with the right_side (all
1882 # possible values as m2o pairs). Both lists are supposed to be using the
1883 # same ordering, and can be merged in one pass.
1886 def append_left(left_side):
1887 grouped_value = left_side[groupby] and left_side[groupby][0]
1888 if not grouped_value in known_values:
1889 result.append(left_side)
1890 known_values[grouped_value] = left_side
1892 known_values[grouped_value].update({count_field: left_side[count_field]})
1893 def append_right(right_side):
1894 grouped_value = right_side[0]
1895 if not grouped_value in known_values:
1896 line = dict(result_template)
1897 line[groupby] = right_side
1898 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1900 known_values[grouped_value] = line
1901 while read_group_result or all_groups:
1902 left_side = read_group_result[0] if read_group_result else None
1903 right_side = all_groups[0] if all_groups else None
1904 assert left_side is None or left_side[groupby] is False \
1905 or isinstance(left_side[groupby], (tuple,list)), \
1906 'M2O-like pair expected, got %r' % left_side[groupby]
1907 assert right_side is None or isinstance(right_side, (tuple,list)), \
1908 'M2O-like pair expected, got %r' % right_side
1909 if left_side is None:
1910 append_right(all_groups.pop(0))
1911 elif right_side is None:
1912 append_left(read_group_result.pop(0))
1913 elif left_side[groupby] == right_side:
1914 append_left(read_group_result.pop(0))
1915 all_groups.pop(0) # discard right_side
1916 elif not left_side[groupby] or not left_side[groupby][0]:
1917 # left side == "Undefined" entry, not present on right_side
1918 append_left(read_group_result.pop(0))
1920 append_right(all_groups.pop(0))
1924 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1927 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1929 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1930 to the query if order should be computed against m2o field.
1931 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1932 :param aggregated_fields: list of aggregated fields in the query
1933 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1934 These dictionaries contains the qualified name of each groupby
1935 (fully qualified SQL name for the corresponding field),
1936 and the (non raw) field name.
1937 :param osv.Query query: the query under construction
1938 :return: (groupby_terms, orderby_terms)
1941 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1942 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1944 return groupby_terms, orderby_terms
1946 self._check_qorder(orderby)
1947 for order_part in orderby.split(','):
1948 order_split = order_part.split()
1949 order_field = order_split[0]
1950 if order_field in groupby_fields:
1952 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1953 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1955 orderby_terms.append(order_clause)
1956 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1958 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1959 orderby_terms.append(order)
1960 elif order_field in aggregated_fields:
1961 orderby_terms.append(order_part)
1963 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1964 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1965 self._name, order_part)
1966 return groupby_terms, orderby_terms
1968 def _read_group_process_groupby(self, gb, query, context):
1970 Helper method to collect important information about groupbys: raw
1971 field name, type, time informations, qualified name, ...
1973 split = gb.split(':')
1974 field_type = self._all_columns[split[0]].column._type
1975 gb_function = split[1] if len(split) == 2 else None
1976 temporal = field_type in ('date', 'datetime')
1977 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1978 qualified_field = self._inherits_join_calc(split[0], query)
1981 'day': 'dd MMM YYYY',
1982 'week': "'W'w YYYY",
1983 'month': 'MMMM YYYY',
1984 'quarter': 'QQQ YYYY',
1988 'day': dateutil.relativedelta.relativedelta(days=1),
1989 'week': datetime.timedelta(days=7),
1990 'month': dateutil.relativedelta.relativedelta(months=1),
1991 'quarter': dateutil.relativedelta.relativedelta(months=3),
1992 'year': dateutil.relativedelta.relativedelta(years=1)
1995 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
1996 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
1997 if field_type == 'boolean':
1998 qualified_field = "coalesce(%s,false)" % qualified_field
2003 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2004 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2005 'tz_convert': tz_convert,
2006 'qualified_field': qualified_field
2009 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2011 Helper method to sanitize the data received by read_group. The None
2012 values are converted to False, and the date/datetime are formatted,
2013 and corrected according to the timezones.
2015 value = False if value is None else value
2016 gb = groupby_dict.get(key)
2017 if gb and gb['type'] in ('date', 'datetime') and value:
2018 if isinstance(value, basestring):
2019 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2020 value = datetime.datetime.strptime(value, dt_format)
2021 if gb['tz_convert']:
2022 value = pytz.timezone(context['tz']).localize(value)
2025 def _read_group_get_domain(self, groupby, value):
2027 Helper method to construct the domain corresponding to a groupby and
2028 a given value. This is mostly relevant for date/datetime.
2030 if groupby['type'] in ('date', 'datetime') and value:
2031 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2032 domain_dt_begin = value
2033 domain_dt_end = value + groupby['interval']
2034 if groupby['tz_convert']:
2035 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2036 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2037 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2038 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2039 if groupby['type'] == 'many2one' and value:
2041 return [(groupby['field'], '=', value)]
2043 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2045 Helper method to format the data contained in the dictianary data by
2046 adding the domain corresponding to its values, the groupbys in the
2047 context and by properly formatting the date/datetime values.
2049 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2050 for k,v in data.iteritems():
2051 gb = groupby_dict.get(k)
2052 if gb and gb['type'] in ('date', 'datetime') and v:
2053 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2055 data['__domain'] = domain_group + domain
2056 if len(groupby) - len(annotated_groupbys) >= 1:
2057 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2061 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2063 Get the list of records in list view grouped by the given ``groupby`` fields
2065 :param cr: database cursor
2066 :param uid: current user id
2067 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2068 :param list fields: list of fields present in the list view specified on the object
2069 :param list groupby: list of groupby descriptions by which the records will be grouped.
2070 A groupby description is either a field (then it will be grouped by that field)
2071 or a string 'field:groupby_function'. Right now, the only functions supported
2072 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2073 date/datetime fields.
2074 :param int offset: optional number of records to skip
2075 :param int limit: optional max number of records to return
2076 :param dict context: context arguments, like lang, time zone.
2077 :param list orderby: optional ``order by`` specification, for
2078 overriding the natural sort ordering of the
2079 groups, see also :py:meth:`~osv.osv.osv.search`
2080 (supported only for many2one fields currently)
2081 :param bool lazy: if true, the results are only grouped by the first groupby and the
2082 remaining groupbys are put in the __context key. If false, all the groupbys are
2084 :return: list of dictionaries(one dictionary for each record) containing:
2086 * the values of fields grouped by the fields in ``groupby`` argument
2087 * __domain: list of tuples specifying the search criteria
2088 * __context: dictionary with argument like ``groupby``
2089 :rtype: [{'field_name_1': value, ...]
2090 :raise AccessError: * if user has no read rights on the requested object
2091 * if user tries to bypass access rules for read on the requested object
2095 self.check_access_rights(cr, uid, 'read')
2096 query = self._where_calc(cr, uid, domain, context=context)
2097 fields = fields or self._columns.keys()
2099 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2100 groupby_list = groupby[:1] if lazy else groupby
2101 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2102 for gb in groupby_list]
2103 groupby_fields = [g['field'] for g in annotated_groupbys]
2104 order = orderby or ','.join([g for g in groupby_list])
2105 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2107 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2108 for gb in groupby_fields:
2109 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2110 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2111 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2112 if not (gb in self._all_columns):
2113 # Don't allow arbitrary values, as this would be a SQL injection vector!
2114 raise except_orm(_('Invalid group_by'),
2115 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2117 aggregated_fields = [
2119 if f not in ('id', 'sequence')
2120 if f not in groupby_fields
2121 if f in self._all_columns
2122 if self._all_columns[f].column._type in ('integer', 'float')
2123 if getattr(self._all_columns[f].column, '_classic_write')]
2125 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2126 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2128 for gb in annotated_groupbys:
2129 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2131 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2132 from_clause, where_clause, where_clause_params = query.get_sql()
2133 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2134 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2137 count_field += '_count'
2139 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2140 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2143 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
2151 'table': self._table,
2152 'count_field': count_field,
2153 'extra_fields': prefix_terms(',', select_terms),
2154 'from': from_clause,
2155 'where': prefix_term('WHERE', where_clause),
2156 'groupby': prefix_terms('GROUP BY', groupby_terms),
2157 'orderby': prefix_terms('ORDER BY', orderby_terms),
2158 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2159 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2161 cr.execute(query, where_clause_params)
2162 fetched_data = cr.dictfetchall()
2164 if not groupby_fields:
2167 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2169 data_ids = [r['id'] for r in fetched_data]
2170 many2onefields = list(set(many2onefields))
2171 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2172 for d in fetched_data:
2173 d.update(data_dict[d['id']])
2175 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2176 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2177 if lazy and groupby_fields[0] in self._group_by_full:
2178 # Right now, read_group only fill results in lazy mode (by default).
2179 # If you need to have the empty groups in 'eager' mode, then the
2180 # method _read_group_fill_results need to be completely reimplemented
2182 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2183 aggregated_fields, count_field, result, read_group_order=order,
2187 def _inherits_join_add(self, current_model, parent_model_name, query):
2189 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2190 :param current_model: current model object
2191 :param parent_model_name: name of the parent model for which the clauses should be added
2192 :param query: query object on which the JOIN should be added
2194 inherits_field = current_model._inherits[parent_model_name]
2195 parent_model = self.pool[parent_model_name]
2196 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2199 def _inherits_join_calc(self, field, query):
2201 Adds missing table select and join clause(s) to ``query`` for reaching
2202 the field coming from an '_inherits' parent table (no duplicates).
2204 :param field: name of inherited field to reach
2205 :param query: query object on which the JOIN should be added
2206 :return: qualified name of field, to be used in SELECT clause
2208 current_table = self
2209 parent_alias = '"%s"' % current_table._table
2210 while field in current_table._inherit_fields and not field in current_table._columns:
2211 parent_model_name = current_table._inherit_fields[field][0]
2212 parent_table = self.pool[parent_model_name]
2213 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2214 current_table = parent_table
2215 return '%s."%s"' % (parent_alias, field)
2217 def _parent_store_compute(self, cr):
2218 if not self._parent_store:
2220 _logger.info('Computing parent left and right for table %s...', self._table)
2221 def browse_rec(root, pos=0):
2223 where = self._parent_name+'='+str(root)
2225 where = self._parent_name+' IS NULL'
2226 if self._parent_order:
2227 where += ' order by '+self._parent_order
2228 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2230 for id in cr.fetchall():
2231 pos2 = browse_rec(id[0], pos2)
2232 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2234 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2235 if self._parent_order:
2236 query += ' order by ' + self._parent_order
2239 for (root,) in cr.fetchall():
2240 pos = browse_rec(root, pos)
2241 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2244 def _update_store(self, cr, f, k):
2245 _logger.info("storing computed values of fields.function '%s'", k)
2246 ss = self._columns[k]._symbol_set
2247 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2248 cr.execute('select id from '+self._table)
2249 ids_lst = map(lambda x: x[0], cr.fetchall())
2251 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2252 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2253 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2254 for key, val in res.items():
2257 # if val is a many2one, just write the ID
2258 if type(val) == tuple:
2260 if val is not False:
2261 cr.execute(update_query, (ss[1](val), key))
2264 def _check_selection_field_value(self, field, value):
2265 """ Check whether value is among the valid values for the given
2266 selection/reference field, and raise an exception if not.
2268 field = self._fields[field]
2269 field.convert_to_cache(value, self)
2271 def _check_removed_columns(self, cr, log=False):
2272 # iterate on the database columns to drop the NOT NULL constraints
2273 # of fields which were required but have been removed (or will be added by another module)
2274 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2275 columns += MAGIC_COLUMNS
2276 cr.execute("SELECT a.attname, a.attnotnull"
2277 " FROM pg_class c, pg_attribute a"
2278 " WHERE c.relname=%s"
2279 " AND c.oid=a.attrelid"
2280 " AND a.attisdropped=%s"
2281 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2282 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2284 for column in cr.dictfetchall():
2286 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2287 column['attname'], self._table, self._name)
2288 if column['attnotnull']:
2289 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2290 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2291 self._table, column['attname'])
2293 def _save_constraint(self, cr, constraint_name, type):
2295 Record the creation of a constraint for this model, to make it possible
2296 to delete it later when the module is uninstalled. Type can be either
2297 'f' or 'u' depending on the constraint being a foreign key or not.
2299 if not self._module:
2300 # no need to save constraints for custom models as they're not part
2303 assert type in ('f', 'u')
2305 SELECT 1 FROM ir_model_constraint, ir_module_module
2306 WHERE ir_model_constraint.module=ir_module_module.id
2307 AND ir_model_constraint.name=%s
2308 AND ir_module_module.name=%s
2309 """, (constraint_name, self._module))
2312 INSERT INTO ir_model_constraint
2313 (name, date_init, date_update, module, model, type)
2314 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2315 (SELECT id FROM ir_module_module WHERE name=%s),
2316 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2317 (constraint_name, self._module, self._name, type))
2319 def _save_relation_table(self, cr, relation_table):
2321 Record the creation of a many2many for this model, to make it possible
2322 to delete it later when the module is uninstalled.
2325 SELECT 1 FROM ir_model_relation, ir_module_module
2326 WHERE ir_model_relation.module=ir_module_module.id
2327 AND ir_model_relation.name=%s
2328 AND ir_module_module.name=%s
2329 """, (relation_table, self._module))
2331 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2332 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2333 (SELECT id FROM ir_module_module WHERE name=%s),
2334 (SELECT id FROM ir_model WHERE model=%s))""",
2335 (relation_table, self._module, self._name))
2336 self.invalidate_cache(cr, SUPERUSER_ID)
2338 # checked version: for direct m2o starting from `self`
2339 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2340 assert self.is_transient() or not dest_model.is_transient(), \
2341 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2342 if self.is_transient() and not dest_model.is_transient():
2343 # TransientModel relationships to regular Models are annoying
2344 # usually because they could block deletion due to the FKs.
2345 # So unless stated otherwise we default them to ondelete=cascade.
2346 ondelete = ondelete or 'cascade'
2347 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2348 self._foreign_keys.add(fk_def)
2349 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2351 # unchecked version: for custom cases, such as m2m relationships
2352 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2353 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2354 self._foreign_keys.add(fk_def)
2355 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2357 def _drop_constraint(self, cr, source_table, constraint_name):
2358 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2360 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2361 # Find FK constraint(s) currently established for the m2o field,
2362 # and see whether they are stale or not
2363 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2364 cl2.relname as foreign_table
2365 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2366 pg_attribute as att1, pg_attribute as att2
2367 WHERE con.conrelid = cl1.oid
2368 AND cl1.relname = %s
2369 AND con.confrelid = cl2.oid
2370 AND array_lower(con.conkey, 1) = 1
2371 AND con.conkey[1] = att1.attnum
2372 AND att1.attrelid = cl1.oid
2373 AND att1.attname = %s
2374 AND array_lower(con.confkey, 1) = 1
2375 AND con.confkey[1] = att2.attnum
2376 AND att2.attrelid = cl2.oid
2377 AND att2.attname = %s
2378 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2379 constraints = cr.dictfetchall()
2381 if len(constraints) == 1:
2382 # Is it the right constraint?
2384 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2385 or cons['foreign_table'] != dest_model._table:
2386 # Wrong FK: drop it and recreate
2387 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2388 source_table, cons['constraint_name'])
2389 self._drop_constraint(cr, source_table, cons['constraint_name'])
2391 # it's all good, nothing to do!
2394 # Multiple FKs found for the same field, drop them all, and re-create
2395 for cons in constraints:
2396 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2397 source_table, cons['constraint_name'])
2398 self._drop_constraint(cr, source_table, cons['constraint_name'])
2400 # (re-)create the FK
2401 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2404 def _set_default_value_on_column(self, cr, column_name, context=None):
2405 # ideally, we should use default_get(), but it fails due to ir.values
2409 default = self._defaults.get(column_name)
2410 if callable(default):
2411 default = default(self, cr, SUPERUSER_ID, context)
2413 column = self._columns[column_name]
2414 ss = column._symbol_set
2415 db_default = ss[1](default)
2416 # Write default if non-NULL, except for booleans for which False means
2417 # the same as NULL - this saves us an expensive query on large tables.
2418 write_default = (db_default is not None if column._type != 'boolean'
2421 _logger.debug("Table '%s': setting default value of new column %s to %r",
2422 self._table, column_name, default)
2423 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2424 self._table, column_name, ss[0], column_name)
2425 cr.execute(query, (db_default,))
2426 # this is a disgrace
2429 def _auto_init(self, cr, context=None):
2432 Call _field_create and, unless _auto is False:
2434 - create the corresponding table in database for the model,
2435 - possibly add the parent columns in database,
2436 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2437 'write_date' in database if _log_access is True (the default),
2438 - report on database columns no more existing in _columns,
2439 - remove no more existing not null constraints,
2440 - alter existing database columns to match _columns,
2441 - create database tables to match _columns,
2442 - add database indices to match _columns,
2443 - save in self._foreign_keys a list a foreign keys to create (see
2447 self._foreign_keys = set()
2448 raise_on_invalid_object_name(self._name)
2451 store_compute = False
2452 stored_fields = [] # new-style stored fields with compute
2454 update_custom_fields = context.get('update_custom_fields', False)
2455 self._field_create(cr, context=context)
2456 create = not self._table_exist(cr)
2460 self._create_table(cr)
2463 cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
2464 has_rows = cr.rowcount
2467 if self._parent_store:
2468 if not self._parent_columns_exist(cr):
2469 self._create_parent_columns(cr)
2470 store_compute = True
2472 self._check_removed_columns(cr, log=False)
2474 # iterate on the "object columns"
2475 column_data = self._select_column_data(cr)
2477 for k, f in self._columns.iteritems():
2478 if k == 'id': # FIXME: maybe id should be a regular column?
2480 # Don't update custom (also called manual) fields
2481 if f.manual and not update_custom_fields:
2484 if isinstance(f, fields.one2many):
2485 self._o2m_raise_on_missing_reference(cr, f)
2487 elif isinstance(f, fields.many2many):
2488 self._m2m_raise_or_create_relation(cr, f)
2491 res = column_data.get(k)
2493 # The field is not found as-is in database, try if it
2494 # exists with an old name.
2495 if not res and hasattr(f, 'oldname'):
2496 res = column_data.get(f.oldname)
2498 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2500 column_data[k] = res
2501 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2502 self._table, f.oldname, k)
2504 # The field already exists in database. Possibly
2505 # change its type, rename it, drop it or change its
2508 f_pg_type = res['typname']
2509 f_pg_size = res['size']
2510 f_pg_notnull = res['attnotnull']
2511 if isinstance(f, fields.function) and not f.store and\
2512 not getattr(f, 'nodrop', False):
2513 _logger.info('column %s (%s) converted to a function, removed from table %s',
2514 k, f.string, self._table)
2515 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2517 _schema.debug("Table '%s': dropped column '%s' with cascade",
2521 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2526 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2527 ('varchar', 'text', 'TEXT', ''),
2528 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2529 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2530 ('timestamp', 'date', 'date', '::date'),
2531 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2532 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2534 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2536 with cr.savepoint():
2537 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2538 except psycopg2.NotSupportedError:
2539 # In place alter table cannot be done because a view is depending of this field.
2540 # Do a manual copy. This will drop the view (that will be recreated later)
2541 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2542 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2543 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2544 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2546 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2547 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2549 if (f_pg_type==c[0]) and (f._type==c[1]):
2550 if f_pg_type != f_obj_type:
2552 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2553 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2554 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2555 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2557 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2558 self._table, k, c[0], c[1])
2561 if f_pg_type != f_obj_type:
2565 newname = k + '_moved' + str(i)
2566 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2567 "WHERE c.relname=%s " \
2568 "AND a.attname=%s " \
2569 "AND c.oid=a.attrelid ", (self._table, newname))
2570 if not cr.fetchone()[0]:
2574 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2575 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2576 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2577 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2578 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2579 self._table, k, f_pg_type, f._type, newname)
2581 # if the field is required and hasn't got a NOT NULL constraint
2582 if f.required and f_pg_notnull == 0:
2584 self._set_default_value_on_column(cr, k, context=context)
2585 # add the NOT NULL constraint
2587 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2589 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2592 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2593 "If you want to have it, you should update the records and execute manually:\n"\
2594 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2595 _schema.warning(msg, self._table, k, self._table, k)
2597 elif not f.required and f_pg_notnull == 1:
2598 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2600 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2603 indexname = '%s_%s_index' % (self._table, k)
2604 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2605 res2 = cr.dictfetchall()
2606 if not res2 and f.select:
2607 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2609 if f._type == 'text':
2610 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2611 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2612 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2613 " because there is a length limit for indexable btree values!\n"\
2614 "Use a search view instead if you simply want to make the field searchable."
2615 _schema.warning(msg, self._table, f._type, k)
2616 if res2 and not f.select:
2617 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2619 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2620 _schema.debug(msg, self._table, k, f._type)
2622 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2623 dest_model = self.pool[f._obj]
2624 if dest_model._auto and dest_model._table != 'ir_actions':
2625 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2627 # The field doesn't exist in database. Create it if necessary.
2629 if not isinstance(f, fields.function) or f.store:
2630 # add the missing field
2631 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2632 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2633 _schema.debug("Table '%s': added column '%s' with definition=%s",
2634 self._table, k, get_pg_type(f)[1])
2638 self._set_default_value_on_column(cr, k, context=context)
2640 # remember the functions to call for the stored fields
2641 if isinstance(f, fields.function):
2643 if f.store is not True: # i.e. if f.store is a dict
2644 order = f.store[f.store.keys()[0]][2]
2645 todo_end.append((order, self._update_store, (f, k)))
2647 # remember new-style stored fields with compute method
2648 if k in self._fields and self._fields[k].depends:
2649 stored_fields.append(self._fields[k])
2651 # and add constraints if needed
2652 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2653 if f._obj not in self.pool:
2654 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2655 dest_model = self.pool[f._obj]
2656 ref = dest_model._table
2657 # ir_actions is inherited so foreign key doesn't work on it
2658 if dest_model._auto and ref != 'ir_actions':
2659 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2661 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2665 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2666 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2669 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2670 "Try to re-run: openerp-server --update=module\n"\
2671 "If it doesn't work, update records and execute manually:\n"\
2672 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2673 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2677 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2678 create = not bool(cr.fetchone())
2680 cr.commit() # start a new transaction
2683 self._add_sql_constraints(cr)
2686 self._execute_sql(cr)
2689 self._parent_store_compute(cr)
2693 # trigger computation of new-style stored fields with a compute
2695 _logger.info("Storing computed values of %s fields %s",
2696 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2697 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2698 recs = recs.search([])
2700 map(recs._recompute_todo, stored_fields)
2703 todo_end.append((1000, func, ()))
2707 def _auto_end(self, cr, context=None):
2708 """ Create the foreign keys recorded by _auto_init. """
2709 for t, k, r, d in self._foreign_keys:
2710 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2711 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2713 del self._foreign_keys
2716 def _table_exist(self, cr):
2717 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2721 def _create_table(self, cr):
2722 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2723 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2724 _schema.debug("Table '%s': created", self._table)
2727 def _parent_columns_exist(self, cr):
2728 cr.execute("""SELECT c.relname
2729 FROM pg_class c, pg_attribute a
2730 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2731 """, (self._table, 'parent_left'))
2735 def _create_parent_columns(self, cr):
2736 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2737 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2738 if 'parent_left' not in self._columns:
2739 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2741 _schema.debug("Table '%s': added column '%s' with definition=%s",
2742 self._table, 'parent_left', 'INTEGER')
2743 elif not self._columns['parent_left'].select:
2744 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2746 if 'parent_right' not in self._columns:
2747 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2749 _schema.debug("Table '%s': added column '%s' with definition=%s",
2750 self._table, 'parent_right', 'INTEGER')
2751 elif not self._columns['parent_right'].select:
2752 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2754 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2755 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2756 self._parent_name, self._name)
2761 def _select_column_data(self, cr):
2762 # attlen is the number of bytes necessary to represent the type when
2763 # the type has a fixed size. If the type has a varying size attlen is
2764 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2765 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2766 "FROM pg_class c,pg_attribute a,pg_type t " \
2767 "WHERE c.relname=%s " \
2768 "AND c.oid=a.attrelid " \
2769 "AND a.atttypid=t.oid", (self._table,))
2770 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2773 def _o2m_raise_on_missing_reference(self, cr, f):
2774 # TODO this check should be a method on fields.one2many.
2775 if f._obj in self.pool:
2776 other = self.pool[f._obj]
2777 # TODO the condition could use fields_get_keys().
2778 if f._fields_id not in other._columns.keys():
2779 if f._fields_id not in other._inherit_fields.keys():
2780 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2782 def _m2m_raise_or_create_relation(self, cr, f):
2783 m2m_tbl, col1, col2 = f._sql_names(self)
2784 # do not create relations for custom fields as they do not belong to a module
2785 # they will be automatically removed when dropping the corresponding ir.model.field
2786 # table name for custom relation all starts with x_, see __init__
2787 if not m2m_tbl.startswith('x_'):
2788 self._save_relation_table(cr, m2m_tbl)
2789 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2790 if not cr.dictfetchall():
2791 if f._obj not in self.pool:
2792 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2793 dest_model = self.pool[f._obj]
2794 ref = dest_model._table
2795 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2796 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2797 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2798 if not cr.fetchall():
2799 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2800 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2801 if not cr.fetchall():
2802 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2804 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2805 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2806 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2808 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2811 def _add_sql_constraints(self, cr):
2814 Modify this model's database table constraints so they match the one in
2818 def unify_cons_text(txt):
2819 return txt.lower().replace(', ',',').replace(' (','(')
2821 for (key, con, _) in self._sql_constraints:
2822 conname = '%s_%s' % (self._table, key)
2824 self._save_constraint(cr, conname, 'u')
2825 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2826 existing_constraints = cr.dictfetchall()
2830 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2831 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2832 self._table, conname, con),
2833 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2838 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2839 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2840 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2846 if not existing_constraints:
2847 # constraint does not exists:
2848 sql_actions['add']['execute'] = True
2849 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2850 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2851 # constraint exists but its definition has changed:
2852 sql_actions['drop']['execute'] = True
2853 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2854 sql_actions['add']['execute'] = True
2855 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2857 # we need to add the constraint:
2858 sql_actions = [item for item in sql_actions.values()]
2859 sql_actions.sort(key=lambda x: x['order'])
2860 for sql_action in [action for action in sql_actions if action['execute']]:
2862 cr.execute(sql_action['query'])
2864 _schema.debug(sql_action['msg_ok'])
2866 _schema.warning(sql_action['msg_err'])
2870 def _execute_sql(self, cr):
2871 """ Execute the SQL code from the _sql attribute (if any)."""
2872 if hasattr(self, "_sql"):
2873 for line in self._sql.split(';'):
2874 line2 = line.replace('\n', '').strip()
2880 # Update objects that uses this one to update their _inherits fields
2884 def _inherits_reload_src(cls):
2885 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2886 for model in cls.pool.values():
2887 if cls._name in model._inherits:
2888 model._inherits_reload()
2891 def _inherits_reload(cls):
2892 """ Recompute the _inherit_fields mapping.
2894 This will also call itself on each inherits'd child model.
2898 for table in cls._inherits:
2899 other = cls.pool[table]
2900 for col in other._columns.keys():
2901 res[col] = (table, cls._inherits[table], other._columns[col], table)
2902 for col in other._inherit_fields.keys():
2903 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2904 cls._inherit_fields = res
2905 cls._all_columns = cls._get_column_infos()
2907 # interface columns with new-style fields
2908 for attr, column in cls._columns.items():
2909 if attr not in cls._fields:
2910 cls._add_field(attr, column.to_field())
2912 # interface inherited fields with new-style fields (note that the
2913 # reverse order is for being consistent with _all_columns above)
2914 for parent_model, parent_field in reversed(cls._inherits.items()):
2915 for attr, field in cls.pool[parent_model]._fields.iteritems():
2916 if attr not in cls._fields:
2917 cls._add_field(attr, field.new(
2919 related=(parent_field, attr),
2923 cls._inherits_reload_src()
2926 def _get_column_infos(cls):
2927 """Returns a dict mapping all fields names (direct fields and
2928 inherited field via _inherits) to a ``column_info`` struct
2929 giving detailed columns """
2931 # do not inverse for loops, since local fields may hide inherited ones!
2932 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2933 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2934 for k, col in cls._columns.iteritems():
2935 result[k] = fields.column_info(k, col)
2939 def _inherits_check(cls):
2940 for table, field_name in cls._inherits.items():
2941 if field_name not in cls._columns:
2942 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2943 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2944 required=True, ondelete="cascade")
2945 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2946 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2947 cls._columns[field_name].required = True
2948 cls._columns[field_name].ondelete = "cascade"
2950 # reflect fields with delegate=True in dictionary cls._inherits
2951 for field in cls._fields.itervalues():
2952 if field.type == 'many2one' and not field.related and field.delegate:
2953 if not field.required:
2954 _logger.warning("Field %s with delegate=True must be required.", field)
2955 field.required = True
2956 if field.ondelete.lower() not in ('cascade', 'restrict'):
2957 field.ondelete = 'cascade'
2958 cls._inherits[field.comodel_name] = field.name
2961 def _prepare_setup_fields(self):
2962 """ Prepare the setup of fields once the models have been loaded. """
2963 for field in self._fields.itervalues():
2967 def _setup_fields(self, partial=False):
2968 """ Setup the fields (dependency triggers, etc). """
2969 for field in self._fields.itervalues():
2970 if partial and field.manual and \
2971 field.relational and \
2972 (field.comodel_name not in self.pool or \
2973 (field.type == 'one2many' and field.inverse_name not in self.pool[field.comodel_name]._fields)):
2974 # do not set up manual fields that refer to unknown models
2976 field.setup(self.env)
2978 # group fields by compute to determine field.computed_fields
2979 fields_by_compute = defaultdict(list)
2980 for field in self._fields.itervalues():
2982 field.computed_fields = fields_by_compute[field.compute]
2983 field.computed_fields.append(field)
2985 field.computed_fields = []
2987 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
2988 """ fields_get([fields])
2990 Return the definition of each field.
2992 The returned value is a dictionary (indiced by field name) of
2993 dictionaries. The _inherits'd fields are included. The string, help,
2994 and selection (if present) attributes are translated.
2996 :param cr: database cursor
2997 :param user: current user id
2998 :param allfields: list of fields
2999 :param context: context arguments, like lang, time zone
3000 :return: dictionary of field dictionaries, each one describing a field of the business object
3001 :raise AccessError: * if user has no create/write rights on the requested object
3004 recs = self.browse(cr, user, [], context)
3007 for fname, field in self._fields.iteritems():
3008 if allfields and fname not in allfields:
3010 if not field.setup_done:
3012 if field.groups and not recs.user_has_groups(field.groups):
3014 res[fname] = field.get_description(recs.env)
3016 # if user cannot create or modify records, make all fields readonly
3017 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
3018 if not (has_access('write') or has_access('create')):
3019 for description in res.itervalues():
3020 description['readonly'] = True
3021 description['states'] = {}
3025 def get_empty_list_help(self, cr, user, help, context=None):
3026 """ Generic method giving the help message displayed when having
3027 no result to display in a list or kanban view. By default it returns
3028 the help given in parameter that is generally the help message
3029 defined in the action.
3033 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3035 Check the user access rights on the given fields. This raises Access
3036 Denied if the user does not have the rights. Otherwise it returns the
3037 fields (as is if the fields is not falsy, or the readable/writable
3038 fields if fields is falsy).
3040 if user == SUPERUSER_ID:
3041 return fields or list(self._fields)
3044 """ determine whether user has access to field `fname` """
3045 field = self._fields.get(fname)
3046 if field and field.groups:
3047 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3052 fields = filter(valid, self._fields)
3054 invalid_fields = set(filter(lambda name: not valid(name), fields))
3056 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3057 operation, user, self._name, ', '.join(invalid_fields))
3059 _('The requested operation cannot be completed due to security restrictions. '
3060 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3061 (self._description, operation))
3065 # add explicit old-style implementation to read()
3067 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3068 records = self.browse(cr, user, ids, context)
3069 result = BaseModel.read(records, fields, load=load)
3070 return result if isinstance(ids, list) else (bool(result) and result[0])
3072 # new-style implementation of read()
3074 def read(self, fields=None, load='_classic_read'):
3077 Reads the requested fields for the records in `self`, low-level/RPC
3078 method. In Python code, prefer :meth:`~.browse`.
3080 :param fields: list of field names to return (default is all fields)
3081 :return: a list of dictionaries mapping field names to their values,
3082 with one dictionary per record
3083 :raise AccessError: if user has no read rights on some of the given
3086 # check access rights
3087 self.check_access_rights('read')
3088 fields = self.check_field_access_rights('read', fields)
3090 # split fields into stored and computed fields
3091 stored, computed = [], []
3093 if name in self._columns:
3095 elif name in self._fields:
3096 computed.append(name)
3098 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3100 # fetch stored fields from the database to the cache
3101 self._read_from_database(stored)
3103 # retrieve results from records; this takes values from the cache and
3104 # computes remaining fields
3106 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3107 use_name_get = (load == '_classic_read')
3110 values = {'id': record.id}
3111 for name, field in name_fields:
3112 values[name] = field.convert_to_read(record[name], use_name_get)
3113 result.append(values)
3114 except MissingError:
3120 def _prefetch_field(self, field):
3121 """ Read from the database in order to fetch `field` (:class:`Field`
3122 instance) for `self` in cache.
3124 # fetch the records of this model without field_name in their cache
3125 records = self._in_cache_without(field)
3127 if len(records) > PREFETCH_MAX:
3128 records = records[:PREFETCH_MAX] | self
3130 # determine which fields can be prefetched
3131 if not self.env.in_draft and \
3132 self._context.get('prefetch_fields', True) and \
3133 self._columns[field.name]._prefetch:
3134 # prefetch all classic and many2one fields that the user can access
3136 for fname, fcolumn in self._columns.iteritems()
3137 if fcolumn._prefetch
3138 if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
3141 fnames = {field.name}
3143 # important: never prefetch fields to recompute!
3144 get_recs_todo = self.env.field_todo
3145 for fname in list(fnames):
3146 if get_recs_todo(self._fields[fname]):
3147 if fname == field.name:
3148 records -= get_recs_todo(field)
3150 fnames.discard(fname)
3152 # fetch records with read()
3153 assert self in records and field.name in fnames
3156 result = records.read(list(fnames), load='_classic_write')
3160 # check the cache, and update it if necessary
3161 if not self._cache.contains(field):
3162 for values in result:
3163 record = self.browse(values.pop('id'))
3164 record._cache.update(record._convert_to_cache(values, validate=False))
3165 if not self._cache.contains(field):
3166 e = AccessError("No value found for %s.%s" % (self, field.name))
3167 self._cache[field] = FailedValue(e)
3170 def _read_from_database(self, field_names):
3171 """ Read the given fields of the records in `self` from the database,
3172 and store them in cache. Access errors are also stored in cache.
3175 cr, user, context = env.args
3177 # FIXME: The query construction needs to be rewritten using the internal Query
3178 # object, as in search(), to avoid ambiguous column references when
3179 # reading/sorting on a table that is auto_joined to another table with
3180 # common columns (e.g. the magical columns)
3182 # Construct a clause for the security rules.
3183 # 'tables' holds the list of tables necessary for the SELECT, including
3184 # the ir.rule clauses, and contains at least self._table.
3185 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3187 # determine the fields that are stored as columns in self._table
3188 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3190 # we need fully-qualified column names in case len(tables) > 1
3192 if isinstance(self._columns.get(f), fields.binary) and \
3193 context.get('bin_size_%s' % f, context.get('bin_size')):
3194 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3195 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3197 return '%s."%s"' % (self._table, f)
3198 qual_names = map(qualify, set(fields_pre + ['id']))
3200 query = """ SELECT %(qual_names)s FROM %(tables)s
3201 WHERE %(table)s.id IN %%s AND (%(extra)s)
3204 'qual_names': ",".join(qual_names),
3205 'tables': ",".join(tables),
3206 'table': self._table,
3207 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3208 'order': self._parent_order or self._order,
3212 for sub_ids in cr.split_for_in_conditions(self.ids):
3213 cr.execute(query, [tuple(sub_ids)] + rule_params)
3214 result.extend(cr.dictfetchall())
3216 ids = [vals['id'] for vals in result]
3219 # translate the fields if necessary
3220 if context.get('lang'):
3221 ir_translation = env['ir.translation']
3222 for f in fields_pre:
3223 if self._columns[f].translate:
3224 #TODO: optimize out of this loop
3225 res_trans = ir_translation._get_ids(
3226 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3228 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3230 # apply the symbol_get functions of the fields we just read
3231 for f in fields_pre:
3232 symbol_get = self._columns[f]._symbol_get
3235 vals[f] = symbol_get(vals[f])
3237 # store result in cache for POST fields
3239 record = self.browse(vals['id'])
3240 record._cache.update(record._convert_to_cache(vals, validate=False))
3242 # determine the fields that must be processed now
3243 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3245 # Compute POST fields, grouped by multi
3246 by_multi = defaultdict(list)
3247 for f in fields_post:
3248 by_multi[self._columns[f]._multi].append(f)
3250 for multi, fs in by_multi.iteritems():
3252 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3253 assert res2 is not None, \
3254 'The function field "%s" on the "%s" model returned None\n' \
3255 '(a dictionary was expected).' % (fs[0], self._name)
3257 # TOCHECK : why got string instend of dict in python2.6
3258 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3259 multi_fields = res2.get(vals['id'], {})
3262 vals[f] = multi_fields.get(f, [])
3265 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3268 vals[f] = res2[vals['id']]
3272 # Warn about deprecated fields now that fields_pre and fields_post are computed
3273 for f in field_names:
3274 column = self._columns[f]
3275 if column.deprecated:
3276 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3278 # store result in cache
3280 record = self.browse(vals.pop('id'))
3281 record._cache.update(record._convert_to_cache(vals, validate=False))
3283 # store failed values in cache for the records that could not be read
3284 fetched = self.browse(ids)
3285 missing = self - fetched
3287 extras = fetched - self
3290 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3291 ', '.join(map(repr, missing._ids)),
3292 ', '.join(map(repr, extras._ids)),
3294 # store an access error exception in existing records
3296 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3297 (self._name, 'read')
3299 forbidden = missing.exists()
3300 forbidden._cache.update(FailedValue(exc))
3301 # store a missing error exception in non-existing records
3303 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3305 (missing - forbidden)._cache.update(FailedValue(exc))
3308 def get_metadata(self):
3310 Returns some metadata about the given records.
3312 :return: list of ownership dictionaries for each requested record
3313 :rtype: list of dictionaries with the following keys:
3316 * create_uid: user who created the record
3317 * create_date: date when the record was created
3318 * write_uid: last user who changed the record
3319 * write_date: date of the last change to the record
3320 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3323 if self._log_access:
3324 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3325 quoted_table = '"%s"' % self._table
3326 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3327 query = '''SELECT %s, __imd.module, __imd.name
3328 FROM %s LEFT JOIN ir_model_data __imd
3329 ON (__imd.model = %%s and __imd.res_id = %s.id)
3330 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3331 self._cr.execute(query, (self._name, tuple(self.ids)))
3332 res = self._cr.dictfetchall()
3334 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3335 names = dict(self.env['res.users'].browse(uids).name_get())
3339 value = r[key] = r[key] or False
3340 if key in ('write_uid', 'create_uid') and value in names:
3341 r[key] = (value, names[value])
3342 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3343 del r['name'], r['module']
3346 def _check_concurrency(self, cr, ids, context):
3349 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3351 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3352 for sub_ids in cr.split_for_in_conditions(ids):
3355 id_ref = "%s,%s" % (self._name, id)
3356 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3358 ids_to_check.extend([id, update_date])
3359 if not ids_to_check:
3361 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3364 # mention the first one only to keep the error message readable
3365 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3367 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3368 """Verify the returned rows after applying record rules matches
3369 the length of `ids`, and raise an appropriate exception if it does not.
3373 ids, result_ids = set(ids), set(result_ids)
3374 missing_ids = ids - result_ids
3376 # Attempt to distinguish record rule restriction vs deleted records,
3377 # to provide a more specific error message - check if the missinf
3378 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3379 forbidden_ids = [x[0] for x in cr.fetchall()]
3381 # the missing ids are (at least partially) hidden by access rules
3382 if uid == SUPERUSER_ID:
3384 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3385 raise except_orm(_('Access Denied'),
3386 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3387 (self._description, operation))
3389 # If we get here, the missing_ids are not in the database
3390 if operation in ('read','unlink'):
3391 # No need to warn about deleting an already deleted record.
3392 # And no error when reading a record that was deleted, to prevent spurious
3393 # errors for non-transactional search/read sequences coming from clients
3395 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3396 raise except_orm(_('Missing document(s)'),
3397 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3400 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3401 """Verifies that the operation given by ``operation`` is allowed for the user
3402 according to the access rights."""
3403 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3405 def check_access_rule(self, cr, uid, ids, operation, context=None):
3406 """Verifies that the operation given by ``operation`` is allowed for the user
3407 according to ir.rules.
3409 :param operation: one of ``write``, ``unlink``
3410 :raise except_orm: * if current ir.rules do not permit this operation.
3411 :return: None if the operation is allowed
3413 if uid == SUPERUSER_ID:
3416 if self.is_transient():
3417 # Only one single implicit access rule for transient models: owner only!
3418 # This is ok to hardcode because we assert that TransientModels always
3419 # have log_access enabled so that the create_uid column is always there.
3420 # And even with _inherits, these fields are always present in the local
3421 # table too, so no need for JOINs.
3422 cr.execute("""SELECT distinct create_uid
3424 WHERE id IN %%s""" % self._table, (tuple(ids),))
3425 uids = [x[0] for x in cr.fetchall()]
3426 if len(uids) != 1 or uids[0] != uid:
3427 raise except_orm(_('Access Denied'),
3428 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3430 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3432 where_clause = ' and ' + ' and '.join(where_clause)
3433 for sub_ids in cr.split_for_in_conditions(ids):
3434 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3435 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3436 [sub_ids] + where_params)
3437 returned_ids = [x['id'] for x in cr.dictfetchall()]
3438 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3440 def create_workflow(self, cr, uid, ids, context=None):
3441 """Create a workflow instance for each given record IDs."""
3442 from openerp import workflow
3444 workflow.trg_create(uid, self._name, res_id, cr)
3445 # self.invalidate_cache(cr, uid, context=context) ?
3448 def delete_workflow(self, cr, uid, ids, context=None):
3449 """Delete the workflow instances bound to the given record IDs."""
3450 from openerp import workflow
3452 workflow.trg_delete(uid, self._name, res_id, cr)
3453 self.invalidate_cache(cr, uid, context=context)
3456 def step_workflow(self, cr, uid, ids, context=None):
3457 """Reevaluate the workflow instances of the given record IDs."""
3458 from openerp import workflow
3460 workflow.trg_write(uid, self._name, res_id, cr)
3461 # self.invalidate_cache(cr, uid, context=context) ?
3464 def signal_workflow(self, cr, uid, ids, signal, context=None):
3465 """Send given workflow signal and return a dict mapping ids to workflow results"""
3466 from openerp import workflow
3469 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3470 # self.invalidate_cache(cr, uid, context=context) ?
3473 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3474 """ Rebind the workflow instance bound to the given 'old' record IDs to
3475 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3477 from openerp import workflow
3478 for old_id, new_id in old_new_ids:
3479 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3480 self.invalidate_cache(cr, uid, context=context)
3483 def unlink(self, cr, uid, ids, context=None):
3486 Deletes the records of the current set
3488 :raise AccessError: * if user has no unlink rights on the requested object
3489 * if user tries to bypass access rules for unlink on the requested object
3490 :raise UserError: if the record is default property for other records
3495 if isinstance(ids, (int, long)):
3498 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3500 # for recomputing new-style fields
3501 recs = self.browse(cr, uid, ids, context)
3502 recs.modified(self._fields)
3504 self._check_concurrency(cr, ids, context)
3506 self.check_access_rights(cr, uid, 'unlink')
3508 ir_property = self.pool.get('ir.property')
3510 # Check if the records are used as default properties.
3511 domain = [('res_id', '=', False),
3512 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3514 if ir_property.search(cr, uid, domain, context=context):
3515 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3517 # Delete the records' properties.
3518 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3519 ir_property.unlink(cr, uid, property_ids, context=context)
3521 self.delete_workflow(cr, uid, ids, context=context)
3523 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3524 pool_model_data = self.pool.get('ir.model.data')
3525 ir_values_obj = self.pool.get('ir.values')
3526 ir_attachment_obj = self.pool.get('ir.attachment')
3527 for sub_ids in cr.split_for_in_conditions(ids):
3528 cr.execute('delete from ' + self._table + ' ' \
3529 'where id IN %s', (sub_ids,))
3531 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3532 # as these are not connected with real database foreign keys, and would be dangling references.
3533 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3534 # to avoid possible side-effects during admin calls.
3535 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3536 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3537 # Step 2. Marching towards the real deletion of referenced records
3539 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3541 # For the same reason, removing the record relevant to ir_values
3542 ir_value_ids = ir_values_obj.search(cr, uid,
3543 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3546 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3548 # For the same reason, removing the record relevant to ir_attachment
3549 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3550 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3551 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3552 if ir_attachment_ids:
3553 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3555 # invalidate the *whole* cache, since the orm does not handle all
3556 # changes made in the database, like cascading delete!
3557 recs.invalidate_cache()
3559 for order, obj_name, store_ids, fields in result_store:
3560 if obj_name == self._name:
3561 effective_store_ids = set(store_ids) - set(ids)
3563 effective_store_ids = store_ids
3564 if effective_store_ids:
3565 obj = self.pool[obj_name]
3566 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3567 rids = map(lambda x: x[0], cr.fetchall())
3569 obj._store_set_values(cr, uid, rids, fields, context)
3571 # recompute new-style fields
3580 def write(self, vals):
3583 Updates all records in the current set with the provided values.
3585 :param dict vals: fields to update and the value to set on them e.g::
3587 {'foo': 1, 'bar': "Qux"}
3589 will set the field ``foo`` to ``1`` and the field ``bar`` to
3590 ``"Qux"`` if those are valid (otherwise it will trigger an error).
3592 :raise AccessError: * if user has no write rights on the requested object
3593 * if user tries to bypass access rules for write on the requested object
3594 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3595 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3597 .. _openerp/models/relationals/format:
3599 .. note:: Relational fields use a special "commands" format to manipulate their values
3601 This format is a list of command triplets executed sequentially,
3602 possible command triplets are:
3604 ``(0, _, values: dict)``
3605 links to a new record created from the provided values
3606 ``(1, id, values: dict)``
3607 updates the already-linked record of id ``id`` with the
3610 unlinks and deletes the linked record of id ``id``
3612 unlinks the linked record of id ``id`` without deleting it
3614 links to an existing record of id ``id``
3616 unlinks all records in the relation, equivalent to using
3617 the command ``3`` on every linked record
3619 replaces the existing list of linked records by the provoded
3620 ones, equivalent to using ``5`` then ``4`` for each id in
3623 (in command triplets, ``_`` values are ignored and can be
3624 anything, generally ``0`` or ``False``)
3626 Any command can be used on :class:`~openerp.fields.Many2many`,
3627 only ``0``, ``1`` and ``2`` can be used on
3628 :class:`~openerp.fields.One2many`.
3633 self._check_concurrency(self._ids)
3634 self.check_access_rights('write')
3636 # No user-driven update of these columns
3637 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3638 vals.pop(field, None)
3640 # split up fields into old-style and pure new-style ones
3641 old_vals, new_vals, unknown = {}, {}, []
3642 for key, val in vals.iteritems():
3643 if key in self._columns:
3645 elif key in self._fields:
3651 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3653 # write old-style fields with (low-level) method _write
3655 self._write(old_vals)
3657 # put the values of pure new-style fields into cache, and inverse them
3660 record._cache.update(record._convert_to_cache(new_vals, update=True))
3661 for key in new_vals:
3662 self._fields[key].determine_inverse(self)
3666 def _write(self, cr, user, ids, vals, context=None):
3667 # low-level implementation of write()
3672 self.check_field_access_rights(cr, user, 'write', vals.keys())
3673 deleted_related = defaultdict(list)
3674 for field in vals.keys():
3676 if field in self._columns:
3677 fobj = self._columns[field]
3678 elif field in self._inherit_fields:
3679 fobj = self._inherit_fields[field][2]
3682 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3683 for wtuple in vals[field]:
3684 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3685 deleted_related[fobj._obj].append(wtuple[1])
3690 for group in groups:
3691 module = group.split(".")[0]
3692 grp = group.split(".")[1]
3693 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3694 (grp, module, 'res.groups', user))
3695 readonly = cr.fetchall()
3696 if readonly[0][0] >= 1:
3703 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3705 # for recomputing new-style fields
3706 recs = self.browse(cr, user, ids, context)
3707 modified_fields = list(vals)
3708 if self._log_access:
3709 modified_fields += ['write_date', 'write_uid']
3710 recs.modified(modified_fields)
3712 parents_changed = []
3713 parent_order = self._parent_order or self._order
3714 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3715 # The parent_left/right computation may take up to
3716 # 5 seconds. No need to recompute the values if the
3717 # parent is the same.
3718 # Note: to respect parent_order, nodes must be processed in
3719 # order, so ``parents_changed`` must be ordered properly.
3720 parent_val = vals[self._parent_name]
3722 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3723 (self._table, self._parent_name, self._parent_name, parent_order)
3724 cr.execute(query, (tuple(ids), parent_val))
3726 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3727 (self._table, self._parent_name, parent_order)
3728 cr.execute(query, (tuple(ids),))
3729 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3736 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3738 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3739 if field_column and field_column.deprecated:
3740 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3741 if field in self._columns:
3742 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3743 if (not totranslate) or not self._columns[field].translate:
3744 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3745 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3746 direct.append(field)
3748 upd_todo.append(field)
3750 updend.append(field)
3751 if field in self._columns \
3752 and hasattr(self._columns[field], 'selection') \
3754 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3756 if self._log_access:
3757 upd0.append('write_uid=%s')
3758 upd0.append("write_date=(now() at time zone 'UTC')")
3760 direct.append('write_uid')
3761 direct.append('write_date')
3764 self.check_access_rule(cr, user, ids, 'write', context=context)
3765 for sub_ids in cr.split_for_in_conditions(ids):
3766 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3767 'where id IN %s', upd1 + [sub_ids])
3768 if cr.rowcount != len(sub_ids):
3769 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3774 if self._columns[f].translate:
3775 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3778 # Inserting value to DB
3779 context_wo_lang = dict(context, lang=None)
3780 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3781 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3783 # invalidate and mark new-style fields to recompute; do this before
3784 # setting other fields, because it can require the value of computed
3785 # fields, e.g., a one2many checking constraints on records
3786 recs.modified(direct)
3788 # call the 'set' method of fields which are not classic_write
3789 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3791 # default element in context must be removed when call a one2many or many2many
3792 rel_context = context.copy()
3793 for c in context.items():
3794 if c[0].startswith('default_'):
3795 del rel_context[c[0]]
3797 for field in upd_todo:
3799 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3801 # for recomputing new-style fields
3802 recs.modified(upd_todo)
3804 unknown_fields = updend[:]
3805 for table in self._inherits:
3806 col = self._inherits[table]
3808 for sub_ids in cr.split_for_in_conditions(ids):
3809 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3810 'where id IN %s', (sub_ids,))
3811 nids.extend([x[0] for x in cr.fetchall()])
3815 if self._inherit_fields[val][0] == table:
3817 unknown_fields.remove(val)
3819 self.pool[table].write(cr, user, nids, v, context)
3823 'No such field(s) in model %s: %s.',
3824 self._name, ', '.join(unknown_fields))
3826 # check Python constraints
3827 recs._validate_fields(vals)
3829 # TODO: use _order to set dest at the right position and not first node of parent
3830 # We can't defer parent_store computation because the stored function
3831 # fields that are computer may refer (directly or indirectly) to
3832 # parent_left/right (via a child_of domain)
3835 self.pool._init_parent[self._name] = True
3837 order = self._parent_order or self._order
3838 parent_val = vals[self._parent_name]
3840 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3842 clause, params = '%s IS NULL' % (self._parent_name,), ()
3844 for id in parents_changed:
3845 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3846 pleft, pright = cr.fetchone()
3847 distance = pright - pleft + 1
3849 # Positions of current siblings, to locate proper insertion point;
3850 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3851 # after each update, in case several nodes are sequentially inserted one
3852 # next to the other (i.e computed incrementally)
3853 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3854 parents = cr.fetchall()
3856 # Find Position of the element
3858 for (parent_pright, parent_id) in parents:
3861 position = parent_pright and parent_pright + 1 or 1
3863 # It's the first node of the parent
3868 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3869 position = cr.fetchone()[0] + 1
3871 if pleft < position <= pright:
3872 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3874 if pleft < position:
3875 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3876 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3877 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3879 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3880 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3881 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3882 recs.invalidate_cache(['parent_left', 'parent_right'])
3884 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3888 for order, model_name, ids_to_update, fields_to_recompute in result:
3889 key = (model_name, tuple(fields_to_recompute))
3890 done.setdefault(key, {})
3891 # avoid to do several times the same computation
3893 for id in ids_to_update:
3894 if id not in done[key]:
3895 done[key][id] = True
3896 if id not in deleted_related[model_name]:
3898 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3900 # recompute new-style fields
3901 if context.get('recompute', True):
3904 self.step_workflow(cr, user, ids, context=context)
3908 # TODO: Should set perm to user.xxx
3911 @api.returns('self', lambda value: value.id)
3912 def create(self, vals):
3913 """ create(vals) -> record
3915 Creates a new record for the model.
3917 The new record is initialized using the values from ``vals`` and
3918 if necessary those from :meth:`~.default_get`.
3921 values for the model's fields, as a dictionary::
3923 {'field_name': field_value, ...}
3925 see :meth:`~.write` for details
3926 :return: new record created
3927 :raise AccessError: * if user has no create rights on the requested object
3928 * if user tries to bypass access rules for create on the requested object
3929 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3930 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3932 self.check_access_rights('create')
3934 # add missing defaults, and drop fields that may not be set by user
3935 vals = self._add_missing_default_values(vals)
3936 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3937 vals.pop(field, None)
3939 # split up fields into old-style and pure new-style ones
3940 old_vals, new_vals, unknown = {}, {}, []
3941 for key, val in vals.iteritems():
3942 if key in self._all_columns:
3944 elif key in self._fields:
3950 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3952 # create record with old-style fields
3953 record = self.browse(self._create(old_vals))
3955 # put the values of pure new-style fields into cache, and inverse them
3956 record._cache.update(record._convert_to_cache(new_vals))
3957 for key in new_vals:
3958 self._fields[key].determine_inverse(record)
3962 def _create(self, cr, user, vals, context=None):
3963 # low-level implementation of create()
3967 if self.is_transient():
3968 self._transient_vacuum(cr, user)
3971 for v in self._inherits:
3972 if self._inherits[v] not in vals:
3975 tocreate[v] = {'id': vals[self._inherits[v]]}
3978 # list of column assignments defined as tuples like:
3979 # (column_name, format_string, column_value)
3980 # (column_name, sql_formula)
3981 # Those tuples will be used by the string formatting for the INSERT
3983 ('id', "nextval('%s')" % self._sequence),
3988 for v in vals.keys():
3989 if v in self._inherit_fields and v not in self._columns:
3990 (table, col, col_detail, original_parent) = self._inherit_fields[v]
3991 tocreate[table][v] = vals[v]
3994 if (v not in self._inherit_fields) and (v not in self._columns):
3996 unknown_fields.append(v)
3999 'No such field(s) in model %s: %s.',
4000 self._name, ', '.join(unknown_fields))
4002 for table in tocreate:
4003 if self._inherits[table] in vals:
4004 del vals[self._inherits[table]]
4006 record_id = tocreate[table].pop('id', None)
4008 if record_id is None or not record_id:
4009 record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
4011 self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
4013 updates.append((self._inherits[table], '%s', record_id))
4015 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4016 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4018 for bool_field in bool_fields:
4019 if bool_field not in vals:
4020 vals[bool_field] = False
4022 for field in vals.keys():
4024 if field in self._columns:
4025 fobj = self._columns[field]
4027 fobj = self._inherit_fields[field][2]
4033 for group in groups:
4034 module = group.split(".")[0]
4035 grp = group.split(".")[1]
4036 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4037 (grp, module, 'res.groups', user))
4038 readonly = cr.fetchall()
4039 if readonly[0][0] >= 1:
4042 elif readonly[0][0] == 0:
4050 current_field = self._columns[field]
4051 if current_field._classic_write:
4052 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
4054 #for the function fields that receive a value, we set them directly in the database
4055 #(they may be required), but we also need to trigger the _fct_inv()
4056 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4057 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4058 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4059 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4060 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4061 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4062 #after the release but, definitively, the behavior shouldn't be different for related and function
4064 upd_todo.append(field)
4066 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4067 #related. See the above TODO comment for further explanations.
4068 if not isinstance(current_field, fields.related):
4069 upd_todo.append(field)
4070 if field in self._columns \
4071 and hasattr(current_field, 'selection') \
4073 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4074 if self._log_access:
4075 updates.append(('create_uid', '%s', user))
4076 updates.append(('write_uid', '%s', user))
4077 updates.append(('create_date', "(now() at time zone 'UTC')"))
4078 updates.append(('write_date', "(now() at time zone 'UTC')"))
4080 # the list of tuples used in this formatting corresponds to
4081 # tuple(field_name, format, value)
4082 # In some case, for example (id, create_date, write_date) we does not
4083 # need to read the third value of the tuple, because the real value is
4084 # encoded in the second value (the format).
4086 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4088 ', '.join('"%s"' % u[0] for u in updates),
4089 ', '.join(u[1] for u in updates)
4091 tuple([u[2] for u in updates if len(u) > 2])
4094 id_new, = cr.fetchone()
4095 recs = self.browse(cr, user, id_new, context)
4097 if self._parent_store and not context.get('defer_parent_store_computation'):
4099 self.pool._init_parent[self._name] = True
4101 parent = vals.get(self._parent_name, False)
4103 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4105 result_p = cr.fetchall()
4106 for (pleft,) in result_p:
4111 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4112 pleft_old = cr.fetchone()[0]
4115 cr.execute('select max(parent_right) from '+self._table)
4116 pleft = cr.fetchone()[0] or 0
4117 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4118 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4119 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4120 recs.invalidate_cache(['parent_left', 'parent_right'])
4122 # invalidate and mark new-style fields to recompute; do this before
4123 # setting other fields, because it can require the value of computed
4124 # fields, e.g., a one2many checking constraints on records
4125 recs.modified([u[0] for u in updates])
4127 # call the 'set' method of fields which are not classic_write
4128 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4130 # default element in context must be remove when call a one2many or many2many
4131 rel_context = context.copy()
4132 for c in context.items():
4133 if c[0].startswith('default_'):
4134 del rel_context[c[0]]
4137 for field in upd_todo:
4138 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4140 # for recomputing new-style fields
4141 recs.modified(upd_todo)
4143 # check Python constraints
4144 recs._validate_fields(vals)
4146 if context.get('recompute', True):
4147 result += self._store_get_values(cr, user, [id_new],
4148 list(set(vals.keys() + self._inherits.values())),
4152 for order, model_name, ids, fields2 in result:
4153 if not (model_name, ids, fields2) in done:
4154 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4155 done.append((model_name, ids, fields2))
4156 # recompute new-style fields
4159 if self._log_create and context.get('recompute', True):
4160 message = self._description + \
4162 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4163 "' " + _("created.")
4164 self.log(cr, user, id_new, message, True, context=context)
4166 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4167 self.create_workflow(cr, user, [id_new], context=context)
4170 def _store_get_values(self, cr, uid, ids, fields, context):
4171 """Returns an ordered list of fields.function to call due to
4172 an update operation on ``fields`` of records with ``ids``,
4173 obtained by calling the 'store' triggers of these fields,
4174 as setup by their 'store' attribute.
4176 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4178 if fields is None: fields = []
4179 stored_functions = self.pool._store_function.get(self._name, [])
4181 # use indexed names for the details of the stored_functions:
4182 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4184 # only keep store triggers that should be triggered for the ``fields``
4186 triggers_to_compute = (
4187 f for f in stored_functions
4188 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4192 target_id_results = {}
4193 for store_trigger in triggers_to_compute:
4194 target_func_id_ = id(store_trigger[target_ids_func_])
4195 if target_func_id_ not in target_id_results:
4196 # use admin user for accessing objects having rules defined on store fields
4197 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4198 target_ids = target_id_results[target_func_id_]
4200 # the compound key must consider the priority and model name
4201 key = (store_trigger[priority_], store_trigger[model_name_])
4202 for target_id in target_ids:
4203 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4205 # Here to_compute_map looks like:
4206 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4207 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4208 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4211 # Now we need to generate the batch function calls list
4213 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4215 for ((priority,model), id_map) in to_compute_map.iteritems():
4216 trigger_ids_maps = {}
4217 # function_ids_maps =
4218 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4219 for target_id, triggers in id_map.iteritems():
4220 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4221 for triggers, target_ids in trigger_ids_maps.iteritems():
4222 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4223 [t[func_field_to_compute_] for t in triggers]))
4226 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4229 def _store_set_values(self, cr, uid, ids, fields, context):
4230 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4231 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4236 if self._log_access:
4237 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4241 field_dict.setdefault(r[0], [])
4242 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4243 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4244 for i in self.pool._store_function.get(self._name, []):
4246 up_write_date = write_date + datetime.timedelta(hours=i[5])
4247 if datetime.datetime.now() < up_write_date:
4249 field_dict[r[0]].append(i[1])
4255 if self._columns[f]._multi not in keys:
4256 keys.append(self._columns[f]._multi)
4257 todo.setdefault(self._columns[f]._multi, [])
4258 todo[self._columns[f]._multi].append(f)
4262 # use admin user for accessing objects having rules defined on store fields
4263 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4264 for id, value in result.items():
4266 for f in value.keys():
4267 if f in field_dict[id]:
4274 if self._columns[v]._type == 'many2one':
4276 value[v] = value[v][0]
4279 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4280 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4283 cr.execute('update "' + self._table + '" set ' + \
4284 ','.join(upd0) + ' where id = %s', upd1)
4288 # use admin user for accessing objects having rules defined on store fields
4289 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4290 for r in result.keys():
4292 if r in field_dict.keys():
4293 if f in field_dict[r]:
4295 for id, value in result.items():
4296 if self._columns[f]._type == 'many2one':
4301 cr.execute('update "' + self._table + '" set ' + \
4302 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4304 # invalidate and mark new-style fields to recompute
4305 self.browse(cr, uid, ids, context).modified(fields)
4309 # TODO: ameliorer avec NULL
4310 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4311 """Computes the WHERE clause needed to implement an OpenERP domain.
4312 :param domain: the domain to compute
4314 :param active_test: whether the default filtering of records with ``active``
4315 field set to ``False`` should be applied.
4316 :return: the query expressing the given domain as provided in domain
4317 :rtype: osv.query.Query
4322 # if the object has a field named 'active', filter out all inactive
4323 # records unless they were explicitely asked for
4324 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4326 # the item[0] trick below works for domain items and '&'/'|'/'!'
4328 if not any(item[0] == 'active' for item in domain):
4329 domain.insert(0, ('active', '=', 1))
4331 domain = [('active', '=', 1)]
4334 e = expression.expression(cr, user, domain, self, context)
4335 tables = e.get_tables()
4336 where_clause, where_params = e.to_sql()
4337 where_clause = where_clause and [where_clause] or []
4339 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4341 return Query(tables, where_clause, where_params)
4343 def _check_qorder(self, word):
4344 if not regex_order.match(word):
4345 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4348 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4349 """Add what's missing in ``query`` to implement all appropriate ir.rules
4350 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4352 :param query: the current query object
4354 if uid == SUPERUSER_ID:
4357 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4358 """ :param parent_model: name of the parent model, if the added
4359 clause comes from a parent model
4363 # as inherited rules are being applied, we need to add the missing JOIN
4364 # to reach the parent table (if it was not JOINed yet in the query)
4365 parent_alias = self._inherits_join_add(self, parent_model, query)
4366 # inherited rules are applied on the external table -> need to get the alias and replace
4367 parent_table = self.pool[parent_model]._table
4368 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4369 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4371 for table in added_tables:
4372 # table is just a table name -> switch to the full alias
4373 if table == '"%s"' % parent_table:
4374 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4375 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4377 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4378 added_tables = new_tables
4379 query.where_clause += added_clause
4380 query.where_clause_params += added_params
4381 for table in added_tables:
4382 if table not in query.tables:
4383 query.tables.append(table)
4387 # apply main rules on the object
4388 rule_obj = self.pool.get('ir.rule')
4389 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4390 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4392 # apply ir.rules from the parents (through _inherits)
4393 for inherited_model in self._inherits:
4394 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4395 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4396 parent_model=inherited_model)
4398 def _generate_m2o_order_by(self, order_field, query):
4400 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4401 either native m2o fields or function/related fields that are stored, including
4402 intermediate JOINs for inheritance if required.
4404 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4406 if order_field not in self._columns and order_field in self._inherit_fields:
4407 # also add missing joins for reaching the table containing the m2o field
4408 qualified_field = self._inherits_join_calc(order_field, query)
4409 order_field_column = self._inherit_fields[order_field][2]
4411 qualified_field = '"%s"."%s"' % (self._table, order_field)
4412 order_field_column = self._columns[order_field]
4414 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4415 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4416 _logger.debug("Many2one function/related fields must be stored " \
4417 "to be used as ordering fields! Ignoring sorting for %s.%s",
4418 self._name, order_field)
4421 # figure out the applicable order_by for the m2o
4422 dest_model = self.pool[order_field_column._obj]
4423 m2o_order = dest_model._order
4424 if not regex_order.match(m2o_order):
4425 # _order is complex, can't use it here, so we default to _rec_name
4426 m2o_order = dest_model._rec_name
4428 # extract the field names, to be able to qualify them and add desc/asc
4430 for order_part in m2o_order.split(","):
4431 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4432 m2o_order = m2o_order_list
4434 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4435 # as we don't want to exclude results that have NULL values for the m2o
4436 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4437 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4438 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4439 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4441 def _generate_order_by(self, order_spec, query):
4443 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4444 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4446 :raise" except_orm in case order_spec is malformed
4448 order_by_clause = ''
4449 order_spec = order_spec or self._order
4451 order_by_elements = []
4452 self._check_qorder(order_spec)
4453 for order_part in order_spec.split(','):
4454 order_split = order_part.strip().split(' ')
4455 order_field = order_split[0].strip()
4456 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4459 if order_field == 'id':
4460 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4461 elif order_field in self._columns:
4462 order_column = self._columns[order_field]
4463 if order_column._classic_read:
4464 inner_clause = '"%s"."%s"' % (self._table, order_field)
4465 elif order_column._type == 'many2one':
4466 inner_clause = self._generate_m2o_order_by(order_field, query)
4468 continue # ignore non-readable or "non-joinable" fields
4469 elif order_field in self._inherit_fields:
4470 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4471 order_column = parent_obj._columns[order_field]
4472 if order_column._classic_read:
4473 inner_clause = self._inherits_join_calc(order_field, query)
4474 elif order_column._type == 'many2one':
4475 inner_clause = self._generate_m2o_order_by(order_field, query)
4477 continue # ignore non-readable or "non-joinable" fields
4479 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4480 if order_column and order_column._type == 'boolean':
4481 inner_clause = "COALESCE(%s, false)" % inner_clause
4483 if isinstance(inner_clause, list):
4484 for clause in inner_clause:
4485 order_by_elements.append("%s %s" % (clause, order_direction))
4487 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4488 if order_by_elements:
4489 order_by_clause = ",".join(order_by_elements)
4491 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4493 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4495 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4496 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4497 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4498 This is ok at the security level because this method is private and not callable through XML-RPC.
4500 :param access_rights_uid: optional user ID to use when checking access rights
4501 (not for ir.rules, this is only for ir.model.access)
4505 self.check_access_rights(cr, access_rights_uid or user, 'read')
4507 # For transient models, restrict acces to the current user, except for the super-user
4508 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4509 args = expression.AND(([('create_uid', '=', user)], args or []))
4511 query = self._where_calc(cr, user, args, context=context)
4512 self._apply_ir_rules(cr, user, query, 'read', context=context)
4513 order_by = self._generate_order_by(order, query)
4514 from_clause, where_clause, where_clause_params = query.get_sql()
4516 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4519 # Ignore order, limit and offset when just counting, they don't make sense and could
4521 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4522 cr.execute(query_str, where_clause_params)
4526 limit_str = limit and ' limit %d' % limit or ''
4527 offset_str = offset and ' offset %d' % offset or ''
4528 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4529 cr.execute(query_str, where_clause_params)
4532 # TDE note: with auto_join, we could have several lines about the same result
4533 # i.e. a lead with several unread messages; we uniquify the result using
4534 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4535 def _uniquify_list(seq):
4537 return [x for x in seq if x not in seen and not seen.add(x)]
4539 return _uniquify_list([x[0] for x in res])
4541 # returns the different values ever entered for one field
4542 # this is used, for example, in the client when the user hits enter on
4544 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4547 if field in self._inherit_fields:
4548 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4550 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4552 def copy_data(self, cr, uid, id, default=None, context=None):
4554 Copy given record's data with all its fields values
4556 :param cr: database cursor
4557 :param uid: current user id
4558 :param id: id of the record to copy
4559 :param default: field values to override in the original values of the copied record
4560 :type default: dictionary
4561 :param context: context arguments, like lang, time zone
4562 :type context: dictionary
4563 :return: dictionary containing all the field values
4569 # avoid recursion through already copied records in case of circular relationship
4570 seen_map = context.setdefault('__copy_data_seen', {})
4571 if id in seen_map.setdefault(self._name, []):
4573 seen_map[self._name].append(id)
4577 if 'state' not in default:
4578 if 'state' in self._defaults:
4579 if callable(self._defaults['state']):
4580 default['state'] = self._defaults['state'](self, cr, uid, context)
4582 default['state'] = self._defaults['state']
4584 # build a black list of fields that should not be copied
4585 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4586 def blacklist_given_fields(obj):
4587 # blacklist the fields that are given by inheritance
4588 for other, field_to_other in obj._inherits.items():
4589 blacklist.add(field_to_other)
4590 if field_to_other in default:
4591 # all the fields of 'other' are given by the record: default[field_to_other],
4592 # except the ones redefined in self
4593 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4595 blacklist_given_fields(self.pool[other])
4596 # blacklist deprecated fields
4597 for name, field in obj._columns.items():
4598 if field.deprecated:
4601 blacklist_given_fields(self)
4604 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4607 if f not in blacklist)
4609 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4613 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4616 for f, colinfo in fields_to_copy.iteritems():
4617 field = colinfo.column
4618 if field._type == 'many2one':
4619 res[f] = data[f] and data[f][0]
4620 elif field._type == 'one2many':
4621 other = self.pool[field._obj]
4622 # duplicate following the order of the ids because we'll rely on
4623 # it later for copying translations in copy_translation()!
4624 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4625 # the lines are duplicated using the wrong (old) parent, but then
4626 # are reassigned to the correct one thanks to the (0, 0, ...)
4627 res[f] = [(0, 0, line) for line in lines if line]
4628 elif field._type == 'many2many':
4629 res[f] = [(6, 0, data[f])]
4635 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4639 # avoid recursion through already copied records in case of circular relationship
4640 seen_map = context.setdefault('__copy_translations_seen',{})
4641 if old_id in seen_map.setdefault(self._name,[]):
4643 seen_map[self._name].append(old_id)
4645 trans_obj = self.pool.get('ir.translation')
4646 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4647 fields = self.fields_get(cr, uid, context=context)
4649 for field_name, field_def in fields.items():
4650 # removing the lang to compare untranslated values
4651 context_wo_lang = dict(context, lang=None)
4652 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4653 # we must recursively copy the translations for o2o and o2m
4654 if field_def['type'] == 'one2many':
4655 target_obj = self.pool[field_def['relation']]
4656 # here we rely on the order of the ids to match the translations
4657 # as foreseen in copy_data()
4658 old_children = sorted(r.id for r in old_record[field_name])
4659 new_children = sorted(r.id for r in new_record[field_name])
4660 for (old_child, new_child) in zip(old_children, new_children):
4661 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4662 # and for translatable fields we keep them for copy
4663 elif field_def.get('translate'):
4664 if field_name in self._columns:
4665 trans_name = self._name + "," + field_name
4668 elif field_name in self._inherit_fields:
4669 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4670 # get the id of the parent record to set the translation
4671 inherit_field_name = self._inherit_fields[field_name][1]
4672 target_id = new_record[inherit_field_name].id
4673 source_id = old_record[inherit_field_name].id
4677 trans_ids = trans_obj.search(cr, uid, [
4678 ('name', '=', trans_name),
4679 ('res_id', '=', source_id)
4681 user_lang = context.get('lang')
4682 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4684 # remove source to avoid triggering _set_src
4685 del record['source']
4686 record.update({'res_id': target_id})
4687 if user_lang and user_lang == record['lang']:
4688 # 'source' to force the call to _set_src
4689 # 'value' needed if value is changed in copy(), want to see the new_value
4690 record['source'] = old_record[field_name]
4691 record['value'] = new_record[field_name]
4692 trans_obj.create(cr, uid, record, context=context)
4694 @api.returns('self', lambda value: value.id)
4695 def copy(self, cr, uid, id, default=None, context=None):
4696 """ copy(default=None)
4698 Duplicate record with given id updating it with default values
4700 :param dict default: dictionary of field values to override in the
4701 original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4702 :returns: new record
4707 context = context.copy()
4708 data = self.copy_data(cr, uid, id, default, context)
4709 new_id = self.create(cr, uid, data, context)
4710 self.copy_translations(cr, uid, id, new_id, context)
4714 @api.returns('self')
4716 """ exists() -> records
4718 Returns the subset of records in `self` that exist, and marks deleted
4719 records as such in cache. It can be used as a test on records::
4724 By convention, new records are returned as existing.
4726 ids = filter(None, self._ids) # ids to check in database
4729 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4730 self._cr.execute(query, (ids,))
4731 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4732 [id for id in self._ids if not id]) # new ids
4733 existing = self.browse(ids)
4734 if len(existing) < len(self):
4735 # mark missing records in cache with a failed value
4736 exc = MissingError(_("Record does not exist or has been deleted."))
4737 (self - existing)._cache.update(FailedValue(exc))
4740 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4741 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4743 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4744 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4745 return self._check_recursion(cr, uid, ids, context, parent)
4747 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4749 Verifies that there is no loop in a hierarchical structure of records,
4750 by following the parent relationship using the **parent** field until a loop
4751 is detected or until a top-level record is found.
4753 :param cr: database cursor
4754 :param uid: current user id
4755 :param ids: list of ids of records to check
4756 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4757 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4760 parent = self._parent_name
4762 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4763 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4766 while current_id is not None:
4767 cr.execute(query, (current_id,))
4768 result = cr.fetchone()
4769 current_id = result[0] if result else None
4770 if current_id == id:
4774 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4776 Verifies that there is no loop in a hierarchical structure of records,
4777 by following the parent relationship using the **parent** field until a loop
4778 is detected or until a top-level record is found.
4780 :param cr: database cursor
4781 :param uid: current user id
4782 :param ids: list of ids of records to check
4783 :param field_name: field to check
4784 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4787 field = self._all_columns.get(field_name)
4788 field = field.column if field else None
4789 if not field or field._type != 'many2many' or field._obj != self._name:
4790 # field must be a many2many on itself
4791 raise ValueError('invalid field_name: %r' % (field_name,))
4793 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4797 for i in range(0, len(ids_parent), cr.IN_MAX):
4799 sub_ids_parent = ids_parent[i:j]
4800 cr.execute(query, (tuple(sub_ids_parent),))
4801 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4802 ids_parent = ids_parent2
4803 for i in ids_parent:
4808 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4809 """Retrieve the External ID(s) of any database record.
4811 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4813 :return: map of ids to the list of their fully qualified External IDs
4814 in the form ``module.key``, or an empty list when there's no External
4815 ID for a record, e.g.::
4817 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4820 ir_model_data = self.pool.get('ir.model.data')
4821 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4822 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4825 # can't use dict.fromkeys() as the list would be shared!
4827 for record in data_results:
4828 result[record['res_id']].append('%(module)s.%(name)s' % record)
4831 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4832 """Retrieve the External ID of any database record, if there
4833 is one. This method works as a possible implementation
4834 for a function field, to be able to add it to any
4835 model object easily, referencing it as ``Model.get_external_id``.
4837 When multiple External IDs exist for a record, only one
4838 of them is returned (randomly).
4840 :return: map of ids to their fully qualified XML ID,
4841 defaulting to an empty string when there's none
4842 (to be usable as a function field),
4845 { 'id': 'module.ext_id',
4848 results = self._get_xml_ids(cr, uid, ids)
4849 for k, v in results.iteritems():
4856 # backwards compatibility
4857 get_xml_id = get_external_id
4858 _get_xml_ids = _get_external_ids
4860 def print_report(self, cr, uid, ids, name, data, context=None):
4862 Render the report `name` for the given IDs. The report must be defined
4863 for this model, not another.
4865 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4866 assert self._name == report.table
4867 return report.create(cr, uid, ids, data, context)
4871 def is_transient(cls):
4872 """ Return whether the model is transient.
4874 See :class:`TransientModel`.
4877 return cls._transient
4879 def _transient_clean_rows_older_than(self, cr, seconds):
4880 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4881 # Never delete rows used in last 5 minutes
4882 seconds = max(seconds, 300)
4883 query = ("SELECT id FROM " + self._table + " WHERE"
4884 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4885 " < ((now() at time zone 'UTC') - interval %s)")
4886 cr.execute(query, ("%s seconds" % seconds,))
4887 ids = [x[0] for x in cr.fetchall()]
4888 self.unlink(cr, SUPERUSER_ID, ids)
4890 def _transient_clean_old_rows(self, cr, max_count):
4891 # Check how many rows we have in the table
4892 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4894 if res[0][0] <= max_count:
4895 return # max not reached, nothing to do
4896 self._transient_clean_rows_older_than(cr, 300)
4898 def _transient_vacuum(self, cr, uid, force=False):
4899 """Clean the transient records.
4901 This unlinks old records from the transient model tables whenever the
4902 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4903 Actual cleaning will happen only once every "_transient_check_time" calls.
4904 This means this method can be called frequently called (e.g. whenever
4905 a new record is created).
4906 Example with both max_hours and max_count active:
4907 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4908 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4909 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4910 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4911 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4912 would immediately cause the maximum to be reached again.
4913 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4915 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4916 _transient_check_time = 20 # arbitrary limit on vacuum executions
4917 self._transient_check_count += 1
4918 if not force and (self._transient_check_count < _transient_check_time):
4919 return True # no vacuum cleaning this time
4920 self._transient_check_count = 0
4922 # Age-based expiration
4923 if self._transient_max_hours:
4924 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4926 # Count-based expiration
4927 if self._transient_max_count:
4928 self._transient_clean_old_rows(cr, self._transient_max_count)
4932 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4933 """ Serializes one2many and many2many commands into record dictionaries
4934 (as if all the records came from the database via a read()). This
4935 method is aimed at onchange methods on one2many and many2many fields.
4937 Because commands might be creation commands, not all record dicts
4938 will contain an ``id`` field. Commands matching an existing record
4939 will have an ``id``.
4941 :param field_name: name of the one2many or many2many field matching the commands
4942 :type field_name: str
4943 :param commands: one2many or many2many commands to execute on ``field_name``
4944 :type commands: list((int|False, int|False, dict|False))
4945 :param fields: list of fields to read from the database, when applicable
4946 :type fields: list(str)
4947 :returns: records in a shape similar to that returned by ``read()``
4948 (except records may be missing the ``id`` field if they don't exist in db)
4951 result = [] # result (list of dict)
4952 record_ids = [] # ids of records to read
4953 updates = {} # {id: dict} of updates on particular records
4955 for command in commands or []:
4956 if not isinstance(command, (list, tuple)):
4957 record_ids.append(command)
4958 elif command[0] == 0:
4959 result.append(command[2])
4960 elif command[0] == 1:
4961 record_ids.append(command[1])
4962 updates.setdefault(command[1], {}).update(command[2])
4963 elif command[0] in (2, 3):
4964 record_ids = [id for id in record_ids if id != command[1]]
4965 elif command[0] == 4:
4966 record_ids.append(command[1])
4967 elif command[0] == 5:
4968 result, record_ids = [], []
4969 elif command[0] == 6:
4970 result, record_ids = [], list(command[2])
4972 # read the records and apply the updates
4973 other_model = self.pool[self._all_columns[field_name].column._obj]
4974 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4975 record.update(updates.get(record['id'], {}))
4976 result.append(record)
4980 # for backward compatibility
4981 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
4983 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
4985 Performs a ``search()`` followed by a ``read()``.
4987 :param cr: database cursor
4988 :param user: current user id
4989 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
4990 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
4991 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
4992 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
4993 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
4994 :param context: context arguments.
4995 :return: List of dictionaries containing the asked fields.
4996 :rtype: List of dictionaries.
4999 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5003 if fields and fields == ['id']:
5004 # shortcut read if we only want the ids
5005 return [{'id': id} for id in record_ids]
5007 # read() ignores active_test, but it would forward it to any downstream search call
5008 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5009 # was presumably only meant for the main search().
5010 # TODO: Move this to read() directly?
5011 read_ctx = dict(context or {})
5012 read_ctx.pop('active_test', None)
5014 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5015 if len(result) <= 1:
5019 index = dict((r['id'], r) for r in result)
5020 return [index[x] for x in record_ids if x in index]
5022 def _register_hook(self, cr):
5023 """ stuff to do right after the registry is built """
5027 def _patch_method(cls, name, method):
5028 """ Monkey-patch a method for all instances of this model. This replaces
5029 the method called `name` by `method` in the given class.
5030 The original method is then accessible via ``method.origin``, and it
5031 can be restored with :meth:`~._revert_method`.
5036 def do_write(self, values):
5037 # do stuff, and call the original method
5038 return do_write.origin(self, values)
5040 # patch method write of model
5041 model._patch_method('write', do_write)
5043 # this will call do_write
5044 records = model.search([...])
5047 # restore the original method
5048 model._revert_method('write')
5050 origin = getattr(cls, name)
5051 method.origin = origin
5052 # propagate decorators from origin to method, and apply api decorator
5053 wrapped = api.guess(api.propagate(origin, method))
5054 wrapped.origin = origin
5055 setattr(cls, name, wrapped)
5058 def _revert_method(cls, name):
5059 """ Revert the original method called `name` in the given class.
5060 See :meth:`~._patch_method`.
5062 method = getattr(cls, name)
5063 setattr(cls, name, method.origin)
5068 # An instance represents an ordered collection of records in a given
5069 # execution environment. The instance object refers to the environment, and
5070 # the records themselves are represented by their cache dictionary. The 'id'
5071 # of each record is found in its corresponding cache dictionary.
5073 # This design has the following advantages:
5074 # - cache access is direct and thus fast;
5075 # - one can consider records without an 'id' (see new records);
5076 # - the global cache is only an index to "resolve" a record 'id'.
5080 def _browse(cls, env, ids):
5081 """ Create an instance attached to `env`; `ids` is a tuple of record
5084 records = object.__new__(cls)
5087 env.prefetch[cls._name].update(ids)
5091 def browse(self, cr, uid, arg=None, context=None):
5092 ids = _normalize_ids(arg)
5093 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5094 return self._browse(Environment(cr, uid, context or {}), ids)
5097 def browse(self, arg=None):
5098 """ browse([ids]) -> records
5100 Returns a recordset for the ids provided as parameter in the current
5103 Can take no ids, a single id or a sequence of ids.
5105 ids = _normalize_ids(arg)
5106 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5107 return self._browse(self.env, ids)
5110 # Internal properties, for manipulating the instance's implementation
5115 """ List of actual record ids in this recordset (ignores placeholder
5116 ids for records to create)
5118 return filter(None, list(self._ids))
5120 # backward-compatibility with former browse records
5121 _cr = property(lambda self: self.env.cr)
5122 _uid = property(lambda self: self.env.uid)
5123 _context = property(lambda self: self.env.context)
5126 # Conversion methods
5129 def ensure_one(self):
5130 """ Verifies that the current recorset holds a single record. Raises
5131 an exception otherwise.
5135 raise except_orm("ValueError", "Expected singleton: %s" % self)
5137 def with_env(self, env):
5138 """ Returns a new version of this recordset attached to the provided
5141 :type env: :class:`~openerp.api.Environment`
5143 return self._browse(env, self._ids)
5145 def sudo(self, user=SUPERUSER_ID):
5146 """ sudo([user=SUPERUSER])
5148 Returns a new version of this recordset attached to the provided
5151 return self.with_env(self.env(user=user))
5153 def with_context(self, *args, **kwargs):
5154 """ with_context([context][, **overrides]) -> records
5156 Returns a new version of this recordset attached to an extended
5159 The extended context is either the provided ``context`` in which
5160 ``overrides`` are merged or the *current* context in which
5161 ``overrides`` are merged e.g.::
5163 # current context is {'key1': True}
5164 r2 = records.with_context({}, key2=True)
5165 # -> r2._context is {'key2': True}
5166 r2 = records.with_context(key2=True)
5167 # -> r2._context is {'key1': True, 'key2': True}
5169 context = dict(args[0] if args else self._context, **kwargs)
5170 return self.with_env(self.env(context=context))
5172 def _convert_to_cache(self, values, update=False, validate=True):
5173 """ Convert the `values` dictionary into cached values.
5175 :param update: whether the conversion is made for updating `self`;
5176 this is necessary for interpreting the commands of *2many fields
5177 :param validate: whether values must be checked
5179 fields = self._fields
5180 target = self if update else self.browse()
5182 name: fields[name].convert_to_cache(value, target, validate=validate)
5183 for name, value in values.iteritems()
5187 def _convert_to_write(self, values):
5188 """ Convert the `values` dictionary into the format of :meth:`write`. """
5189 fields = self._fields
5191 for name, value in values.iteritems():
5193 value = fields[name].convert_to_write(value)
5194 if not isinstance(value, NewId):
5195 result[name] = value
5199 # Record traversal and update
5202 def _mapped_func(self, func):
5203 """ Apply function `func` on all records in `self`, and return the
5204 result as a list or a recordset (if `func` return recordsets).
5206 vals = [func(rec) for rec in self]
5207 val0 = vals[0] if vals else func(self)
5208 if isinstance(val0, BaseModel):
5209 return reduce(operator.or_, vals, val0)
5212 def mapped(self, func):
5213 """ Apply `func` on all records in `self`, and return the result as a
5214 list or a recordset (if `func` return recordsets). In the latter
5215 case, the order of the returned recordset is arbritrary.
5217 :param func: a function or a dot-separated sequence of field names
5219 if isinstance(func, basestring):
5221 for name in func.split('.'):
5222 recs = recs._mapped_func(operator.itemgetter(name))
5225 return self._mapped_func(func)
5227 def _mapped_cache(self, name_seq):
5228 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5229 field names, and only cached values are used.
5232 for name in name_seq.split('.'):
5233 field = recs._fields[name]
5234 null = field.null(self.env)
5235 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5238 def filtered(self, func):
5239 """ Select the records in `self` such that `func(rec)` is true, and
5240 return them as a recordset.
5242 :param func: a function or a dot-separated sequence of field names
5244 if isinstance(func, basestring):
5246 func = lambda rec: filter(None, rec.mapped(name))
5247 return self.browse([rec.id for rec in self if func(rec)])
5249 def sorted(self, key=None):
5250 """ Return the recordset `self` ordered by `key` """
5252 return self.search([('id', 'in', self.ids)])
5254 return self.browse(map(int, sorted(self, key=key)))
5256 def update(self, values):
5257 """ Update record `self[0]` with `values`. """
5258 for name, value in values.iteritems():
5262 # New records - represent records that do not exist in the database yet;
5263 # they are used to compute default values and perform onchanges.
5267 def new(self, values={}):
5268 """ new([values]) -> record
5270 Return a new record instance attached to the current environment and
5271 initialized with the provided ``value``. The record is *not* created
5272 in database, it only exists in memory.
5274 record = self.browse([NewId()])
5275 record._cache.update(record._convert_to_cache(values, update=True))
5277 if record.env.in_onchange:
5278 # The cache update does not set inverse fields, so do it manually.
5279 # This is useful for computing a function field on secondary
5280 # records, if that field depends on the main record.
5282 field = self._fields.get(name)
5284 for invf in field.inverse_fields:
5285 invf._update(record[name], record)
5290 # Dirty flag, to mark records modified (in draft mode)
5295 """ Return whether any record in `self` is dirty. """
5296 dirty = self.env.dirty
5297 return any(record in dirty for record in self)
5300 def _dirty(self, value):
5301 """ Mark the records in `self` as dirty. """
5303 map(self.env.dirty.add, self)
5305 map(self.env.dirty.discard, self)
5311 def __nonzero__(self):
5312 """ Test whether `self` is nonempty. """
5313 return bool(getattr(self, '_ids', True))
5316 """ Return the size of `self`. """
5317 return len(self._ids)
5320 """ Return an iterator over `self`. """
5321 for id in self._ids:
5322 yield self._browse(self.env, (id,))
5324 def __contains__(self, item):
5325 """ Test whether `item` (record or field name) is an element of `self`.
5326 In the first case, the test is fully equivalent to::
5328 any(item == record for record in self)
5330 if isinstance(item, BaseModel) and self._name == item._name:
5331 return len(item) == 1 and item.id in self._ids
5332 elif isinstance(item, basestring):
5333 return item in self._fields
5335 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5337 def __add__(self, other):
5338 """ Return the concatenation of two recordsets. """
5339 if not isinstance(other, BaseModel) or self._name != other._name:
5340 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5341 return self.browse(self._ids + other._ids)
5343 def __sub__(self, other):
5344 """ Return the recordset of all the records in `self` that are not in `other`. """
5345 if not isinstance(other, BaseModel) or self._name != other._name:
5346 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5347 other_ids = set(other._ids)
5348 return self.browse([id for id in self._ids if id not in other_ids])
5350 def __and__(self, other):
5351 """ Return the intersection of two recordsets.
5352 Note that recordset order is not preserved.
5354 if not isinstance(other, BaseModel) or self._name != other._name:
5355 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5356 return self.browse(set(self._ids) & set(other._ids))
5358 def __or__(self, other):
5359 """ Return the union of two recordsets.
5360 Note that recordset order is not preserved.
5362 if not isinstance(other, BaseModel) or self._name != other._name:
5363 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5364 return self.browse(set(self._ids) | set(other._ids))
5366 def __eq__(self, other):
5367 """ Test whether two recordsets are equivalent (up to reordering). """
5368 if not isinstance(other, BaseModel):
5370 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5372 return self._name == other._name and set(self._ids) == set(other._ids)
5374 def __ne__(self, other):
5375 return not self == other
5377 def __lt__(self, other):
5378 if not isinstance(other, BaseModel) or self._name != other._name:
5379 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5380 return set(self._ids) < set(other._ids)
5382 def __le__(self, other):
5383 if not isinstance(other, BaseModel) or self._name != other._name:
5384 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5385 return set(self._ids) <= set(other._ids)
5387 def __gt__(self, other):
5388 if not isinstance(other, BaseModel) or self._name != other._name:
5389 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5390 return set(self._ids) > set(other._ids)
5392 def __ge__(self, other):
5393 if not isinstance(other, BaseModel) or self._name != other._name:
5394 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5395 return set(self._ids) >= set(other._ids)
5401 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5403 def __unicode__(self):
5404 return unicode(str(self))
5409 if hasattr(self, '_ids'):
5410 return hash((self._name, frozenset(self._ids)))
5412 return hash(self._name)
5414 def __getitem__(self, key):
5415 """ If `key` is an integer or a slice, return the corresponding record
5416 selection as an instance (attached to `self.env`).
5417 Otherwise read the field `key` of the first record in `self`.
5421 inst = model.search(dom) # inst is a recordset
5422 r4 = inst[3] # fourth record in inst
5423 rs = inst[10:20] # subset of inst
5424 nm = rs['name'] # name of first record in inst
5426 if isinstance(key, basestring):
5427 # important: one must call the field's getter
5428 return self._fields[key].__get__(self, type(self))
5429 elif isinstance(key, slice):
5430 return self._browse(self.env, self._ids[key])
5432 return self._browse(self.env, (self._ids[key],))
5434 def __setitem__(self, key, value):
5435 """ Assign the field `key` to `value` in record `self`. """
5436 # important: one must call the field's setter
5437 return self._fields[key].__set__(self, value)
5440 # Cache and recomputation management
5445 """ Return the cache of `self`, mapping field names to values. """
5446 return RecordCache(self)
5449 def _in_cache_without(self, field):
5450 """ Make sure `self` is present in cache (for prefetching), and return
5451 the records of model `self` in cache that have no value for `field`
5452 (:class:`Field` instance).
5455 prefetch_ids = env.prefetch[self._name]
5456 prefetch_ids.update(self._ids)
5457 ids = filter(None, prefetch_ids - set(env.cache[field]))
5458 return self.browse(ids)
5462 """ Clear the records cache.
5465 The record cache is automatically invalidated.
5467 self.invalidate_cache()
5470 def invalidate_cache(self, fnames=None, ids=None):
5471 """ Invalidate the record caches after some records have been modified.
5472 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5474 :param fnames: the list of modified fields, or ``None`` for all fields
5475 :param ids: the list of modified record ids, or ``None`` for all
5479 return self.env.invalidate_all()
5480 fields = self._fields.values()
5482 fields = map(self._fields.__getitem__, fnames)
5484 # invalidate fields and inverse fields, too
5485 spec = [(f, ids) for f in fields] + \
5486 [(invf, None) for f in fields for invf in f.inverse_fields]
5487 self.env.invalidate(spec)
5490 def modified(self, fnames):
5491 """ Notify that fields have been modified on `self`. This invalidates
5492 the cache, and prepares the recomputation of stored function fields
5493 (new-style fields only).
5495 :param fnames: iterable of field names that have been modified on
5498 # each field knows what to invalidate and recompute
5500 for fname in fnames:
5501 spec += self._fields[fname].modified(self)
5505 for env in self.env.all
5506 for field in env.cache
5508 # invalidate non-stored fields.function which are currently cached
5509 spec += [(f, None) for f in self.pool.pure_function_fields
5510 if f in cached_fields]
5512 self.env.invalidate(spec)
5514 def _recompute_check(self, field):
5515 """ If `field` must be recomputed on some record in `self`, return the
5516 corresponding records that must be recomputed.
5518 return self.env.check_todo(field, self)
5520 def _recompute_todo(self, field):
5521 """ Mark `field` to be recomputed. """
5522 self.env.add_todo(field, self)
5524 def _recompute_done(self, field):
5525 """ Mark `field` as recomputed. """
5526 self.env.remove_todo(field, self)
5529 def recompute(self):
5530 """ Recompute stored function fields. The fields and records to
5531 recompute have been determined by method :meth:`modified`.
5533 while self.env.has_todo():
5534 field, recs = self.env.get_todo()
5535 # evaluate the fields to recompute, and save them to database
5536 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5538 values = rec._convert_to_write({
5539 f.name: rec[f.name] for f in field.computed_fields
5542 except MissingError:
5544 # mark the computed fields as done
5545 map(recs._recompute_done, field.computed_fields)
5548 # Generic onchange method
5551 def _has_onchange(self, field, other_fields):
5552 """ Return whether `field` should trigger an onchange event in the
5553 presence of `other_fields`.
5555 # test whether self has an onchange method for field, or field is a
5556 # dependency of any field in other_fields
5557 return field.name in self._onchange_methods or \
5558 any(dep in other_fields for dep in field.dependents)
5561 def _onchange_spec(self, view_info=None):
5562 """ Return the onchange spec from a view description; if not given, the
5563 result of ``self.fields_view_get()`` is used.
5567 # for traversing the XML arch and populating result
5568 def process(node, info, prefix):
5569 if node.tag == 'field':
5570 name = node.attrib['name']
5571 names = "%s.%s" % (prefix, name) if prefix else name
5572 if not result.get(names):
5573 result[names] = node.attrib.get('on_change')
5574 # traverse the subviews included in relational fields
5575 for subinfo in info['fields'][name].get('views', {}).itervalues():
5576 process(etree.fromstring(subinfo['arch']), subinfo, names)
5579 process(child, info, prefix)
5581 if view_info is None:
5582 view_info = self.fields_view_get()
5583 process(etree.fromstring(view_info['arch']), view_info, '')
5586 def _onchange_eval(self, field_name, onchange, result):
5587 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5588 on record `self`. Value assignments are applied on `self`, while
5589 domain and warning messages are put in dictionary `result`.
5591 onchange = onchange.strip()
5594 if onchange in ("1", "true"):
5595 for method in self._onchange_methods.get(field_name, ()):
5596 method_res = method(self)
5599 if 'domain' in method_res:
5600 result.setdefault('domain', {}).update(method_res['domain'])
5601 if 'warning' in method_res:
5602 result['warning'] = method_res['warning']
5606 match = onchange_v7.match(onchange)
5608 method, params = match.groups()
5610 # evaluate params -> tuple
5611 global_vars = {'context': self._context, 'uid': self._uid}
5612 if self._context.get('field_parent'):
5613 class RawRecord(object):
5614 def __init__(self, record):
5615 self._record = record
5616 def __getattr__(self, name):
5617 field = self._record._fields[name]
5618 value = self._record[name]
5619 return field.convert_to_onchange(value)
5620 record = self[self._context['field_parent']]
5621 global_vars['parent'] = RawRecord(record)
5623 key: self._fields[key].convert_to_onchange(val)
5624 for key, val in self._cache.iteritems()
5626 params = eval("[%s]" % params, global_vars, field_vars)
5628 # call onchange method
5629 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5630 method_res = getattr(self._model, method)(*args)
5631 if not isinstance(method_res, dict):
5633 if 'value' in method_res:
5634 method_res['value'].pop('id', None)
5635 self.update(self._convert_to_cache(method_res['value'], validate=False))
5636 if 'domain' in method_res:
5637 result.setdefault('domain', {}).update(method_res['domain'])
5638 if 'warning' in method_res:
5639 result['warning'] = method_res['warning']
5642 def onchange(self, values, field_name, field_onchange):
5643 """ Perform an onchange on the given field.
5645 :param values: dictionary mapping field names to values, giving the
5646 current state of modification
5647 :param field_name: name of the modified field_name
5648 :param field_onchange: dictionary mapping field names to their
5653 if field_name and field_name not in self._fields:
5656 # determine subfields for field.convert_to_write() below
5658 subfields = defaultdict(set)
5659 for dotname in field_onchange:
5661 secondary.append(dotname)
5662 name, subname = dotname.split('.')
5663 subfields[name].add(subname)
5665 # create a new record with values, and attach `self` to it
5666 with env.do_in_onchange():
5667 record = self.new(values)
5668 values = dict(record._cache)
5669 # attach `self` with a different context (for cache consistency)
5670 record._origin = self.with_context(__onchange=True)
5672 # determine which field should be triggered an onchange
5673 todo = set([field_name]) if field_name else set(values)
5676 # dummy assignment: trigger invalidations on the record
5678 value = record[name]
5679 field = self._fields[name]
5680 if not field_name and field.type == 'many2one' and field.delegate and not value:
5681 # do not nullify all fields of parent record for new records
5683 record[name] = value
5685 result = {'value': {}}
5693 with env.do_in_onchange():
5694 # apply field-specific onchange methods
5695 if field_onchange.get(name):
5696 record._onchange_eval(name, field_onchange[name], result)
5698 # force re-evaluation of function fields on secondary records
5699 for field_seq in secondary:
5700 record.mapped(field_seq)
5702 # determine which fields have been modified
5703 for name, oldval in values.iteritems():
5704 field = self._fields[name]
5705 newval = record[name]
5706 if field.type in ('one2many', 'many2many'):
5707 if newval != oldval or newval._dirty:
5708 # put new value in result
5709 result['value'][name] = field.convert_to_write(
5710 newval, record._origin, subfields.get(name),
5714 # keep result: newval may have been dirty before
5717 if newval != oldval:
5718 # put new value in result
5719 result['value'][name] = field.convert_to_write(
5720 newval, record._origin, subfields.get(name),
5724 # clean up result to not return another value
5725 result['value'].pop(name, None)
5727 # At the moment, the client does not support updates on a *2many field
5728 # while this one is modified by the user.
5729 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5730 result['value'].pop(field_name, None)
5735 class RecordCache(MutableMapping):
5736 """ Implements a proxy dictionary to read/update the cache of a record.
5737 Upon iteration, it looks like a dictionary mapping field names to
5738 values. However, fields may be used as keys as well.
5740 def __init__(self, records):
5741 self._recs = records
5743 def contains(self, field):
5744 """ Return whether `records[0]` has a value for `field` in cache. """
5745 if isinstance(field, basestring):
5746 field = self._recs._fields[field]
5747 return self._recs.id in self._recs.env.cache[field]
5749 def __contains__(self, field):
5750 """ Return whether `records[0]` has a regular value for `field` in cache. """
5751 if isinstance(field, basestring):
5752 field = self._recs._fields[field]
5753 dummy = SpecialValue(None)
5754 value = self._recs.env.cache[field].get(self._recs.id, dummy)
5755 return not isinstance(value, SpecialValue)
5757 def __getitem__(self, field):
5758 """ Return the cached value of `field` for `records[0]`. """
5759 if isinstance(field, basestring):
5760 field = self._recs._fields[field]
5761 value = self._recs.env.cache[field][self._recs.id]
5762 return value.get() if isinstance(value, SpecialValue) else value
5764 def __setitem__(self, field, value):
5765 """ Assign the cached value of `field` for all records in `records`. """
5766 if isinstance(field, basestring):
5767 field = self._recs._fields[field]
5768 values = dict.fromkeys(self._recs._ids, value)
5769 self._recs.env.cache[field].update(values)
5771 def update(self, *args, **kwargs):
5772 """ Update the cache of all records in `records`. If the argument is a
5773 `SpecialValue`, update all fields (except "magic" columns).
5775 if args and isinstance(args[0], SpecialValue):
5776 values = dict.fromkeys(self._recs._ids, args[0])
5777 for name, field in self._recs._fields.iteritems():
5779 self._recs.env.cache[field].update(values)
5781 return super(RecordCache, self).update(*args, **kwargs)
5783 def __delitem__(self, field):
5784 """ Remove the cached value of `field` for all `records`. """
5785 if isinstance(field, basestring):
5786 field = self._recs._fields[field]
5787 field_cache = self._recs.env.cache[field]
5788 for id in self._recs._ids:
5789 field_cache.pop(id, None)
5792 """ Iterate over the field names with a regular value in cache. """
5793 cache, id = self._recs.env.cache, self._recs.id
5794 dummy = SpecialValue(None)
5795 for name, field in self._recs._fields.iteritems():
5796 if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
5800 """ Return the number of fields with a regular value in cache. """
5801 return sum(1 for name in self)
5803 class Model(BaseModel):
5804 """Main super-class for regular database-persisted OpenERP models.
5806 OpenERP models are created by inheriting from this class::
5811 The system will later instantiate the class once per database (on
5812 which the class' module is installed).
5815 _register = False # not visible in ORM registry, meant to be python-inherited only
5816 _transient = False # True in a TransientModel
5818 class TransientModel(BaseModel):
5819 """Model super-class for transient records, meant to be temporarily
5820 persisted, and regularly vaccuum-cleaned.
5822 A TransientModel has a simplified access rights management,
5823 all users can create new records, and may only access the
5824 records they created. The super-user has unrestricted access
5825 to all TransientModel records.
5828 _register = False # not visible in ORM registry, meant to be python-inherited only
5831 class AbstractModel(BaseModel):
5832 """Abstract Model super-class for creating an abstract class meant to be
5833 inherited by regular models (Models or TransientModels) but not meant to
5834 be usable on its own, or persisted.
5836 Technical note: we don't want to make AbstractModel the super-class of
5837 Model or BaseModel because it would not make sense to put the main
5838 definition of persistence methods such as create() in it, and still we
5839 should be able to override them within an AbstractModel.
5841 _auto = False # don't create any database backend for AbstractModels
5842 _register = False # not visible in ORM registry, meant to be python-inherited only
5845 def itemgetter_tuple(items):
5846 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5847 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5852 return lambda gettable: (gettable[items[0]],)
5853 return operator.itemgetter(*items)
5855 def convert_pgerror_23502(model, fields, info, e):
5856 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5857 r'not-null constraint\n',
5859 field_name = m and m.group('field')
5860 if not m or field_name not in fields:
5861 return {'message': unicode(e)}
5862 message = _(u"Missing required value for the field '%s'.") % field_name
5863 field = fields.get(field_name)
5865 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5868 'field': field_name,
5871 def convert_pgerror_23505(model, fields, info, e):
5872 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5874 field_name = m and m.group('field')
5875 if not m or field_name not in fields:
5876 return {'message': unicode(e)}
5877 message = _(u"The value for the field '%s' already exists.") % field_name
5878 field = fields.get(field_name)
5880 message = _(u"%s This might be '%s' in the current model, or a field "
5881 u"of the same name in an o2m.") % (message, field['string'])
5884 'field': field_name,
5887 PGERROR_TO_OE = defaultdict(
5888 # shape of mapped converters
5889 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5890 # not_null_violation
5891 '23502': convert_pgerror_23502,
5892 # unique constraint error
5893 '23505': convert_pgerror_23505,
5896 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5897 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5899 Various implementations were tested on the corpus of all browse() calls
5900 performed during a full crawler run (after having installed all website_*
5901 modules) and this one was the most efficient overall.
5903 A possible bit of correctness was sacrificed by not doing any test on
5904 Iterable and just assuming that any non-atomic type was an iterable of
5909 # much of the corpus is falsy objects (empty list, tuple or set, None)
5913 # `type in set` is significantly faster (because more restrictive) than
5914 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5915 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5916 # (and looks much worse) in most cases, but over millions of calls it
5917 # does have a very minor effect.
5918 if arg.__class__ in atoms:
5923 # keep those imports here to avoid dependency cycle errors
5924 from .osv import expression
5925 from .fields import Field, SpecialValue, FailedValue
5927 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: