1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
52 from collections import defaultdict, MutableMapping
53 from inspect import getmembers
56 import dateutil.relativedelta
58 from lxml import etree
61 from . import SUPERUSER_ID
64 from .api import Environment
65 from .exceptions import except_orm, AccessError, MissingError
66 from .osv import fields
67 from .osv.query import Query
68 from .tools import lazy_property
69 from .tools.config import config
70 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
71 from .tools.safe_eval import safe_eval as eval
72 from .tools.translate import _
74 _logger = logging.getLogger(__name__)
75 _schema = logging.getLogger(__name__ + '.schema')
77 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
78 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
79 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
81 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
84 def check_object_name(name):
85 """ Check if the given name is a valid openerp object name.
87 The _name attribute in osv and osv_memory object is subject to
88 some restrictions. This function returns True or False whether
89 the given name is allowed or not.
91 TODO: this is an approximation. The goal in this approximation
92 is to disallow uppercase characters (in some places, we quote
93 table/column names and in other not, which leads to this kind
96 psycopg2.ProgrammingError: relation "xxx" does not exist).
98 The same restriction should apply to both osv and osv_memory
99 objects for consistency.
102 if regex_object_name.match(name) is None:
106 def raise_on_invalid_object_name(name):
107 if not check_object_name(name):
108 msg = "The _name attribute %s is not valid." % name
110 raise except_orm('ValueError', msg)
112 POSTGRES_CONFDELTYPES = {
120 def intersect(la, lb):
121 return filter(lambda x: x in lb, la)
124 """ Test whether functions `f` and `g` are identical or have the same name """
125 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
127 def fix_import_export_id_paths(fieldname):
129 Fixes the id fields in import and exports, and splits field paths
132 :param str fieldname: name of the field to import/export
133 :return: split field name
136 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
137 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
138 return fixed_external_id.split('/')
140 def pg_varchar(size=0):
141 """ Returns the VARCHAR declaration for the provided size:
143 * If no size (or an empty or negative size is provided) return an
145 * Otherwise return a VARCHAR(n)
147 :type int size: varchar size, optional
151 if not isinstance(size, int):
152 raise TypeError("VARCHAR parameter should be an int, got %s"
155 return 'VARCHAR(%d)' % size
158 FIELDS_TO_PGTYPES = {
159 fields.boolean: 'bool',
160 fields.integer: 'int4',
164 fields.datetime: 'timestamp',
165 fields.binary: 'bytea',
166 fields.many2one: 'int4',
167 fields.serialized: 'text',
170 def get_pg_type(f, type_override=None):
172 :param fields._column f: field to get a Postgres type for
173 :param type type_override: use the provided type for dispatching instead of the field's own type
174 :returns: (postgres_identification_type, postgres_type_specification)
177 field_type = type_override or type(f)
179 if field_type in FIELDS_TO_PGTYPES:
180 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
181 elif issubclass(field_type, fields.float):
183 pg_type = ('numeric', 'NUMERIC')
185 pg_type = ('float8', 'DOUBLE PRECISION')
186 elif issubclass(field_type, (fields.char, fields.reference)):
187 pg_type = ('varchar', pg_varchar(f.size))
188 elif issubclass(field_type, fields.selection):
189 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
190 or getattr(f, 'size', None) == -1:
191 pg_type = ('int4', 'INTEGER')
193 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
194 elif issubclass(field_type, fields.function):
195 if f._type == 'selection':
196 pg_type = ('varchar', pg_varchar())
198 pg_type = get_pg_type(f, getattr(fields, f._type))
200 _logger.warning('%s type not supported!', field_type)
206 class MetaModel(api.Meta):
207 """ Metaclass for the models.
209 This class is used as the metaclass for the class :class:`BaseModel` to
210 discover the models defined in a module (without instanciating them).
211 If the automatic discovery is not needed, it is possible to set the model's
212 ``_register`` attribute to False.
216 module_to_models = {}
218 def __init__(self, name, bases, attrs):
219 if not self._register:
220 self._register = True
221 super(MetaModel, self).__init__(name, bases, attrs)
224 if not hasattr(self, '_module'):
225 # The (OpenERP) module name can be in the `openerp.addons` namespace
226 # or not. For instance, module `sale` can be imported as
227 # `openerp.addons.sale` (the right way) or `sale` (for backward
229 module_parts = self.__module__.split('.')
230 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
231 module_name = self.__module__.split('.')[2]
233 module_name = self.__module__.split('.')[0]
234 self._module = module_name
236 # Remember which models to instanciate for this module.
238 self.module_to_models.setdefault(self._module, []).append(self)
242 """ Pseudo-ids for new records. """
243 def __nonzero__(self):
246 IdType = (int, long, basestring, NewId)
249 # special columns automatically created by the ORM
250 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
251 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
253 class BaseModel(object):
254 """ Base class for OpenERP models.
256 OpenERP models are created by inheriting from this class' subclasses:
258 * :class:`Model` for regular database-persisted models
260 * :class:`TransientModel` for temporary data, stored in the database but
261 automatically vaccuumed every so often
263 * :class:`AbstractModel` for abstract super classes meant to be shared by
264 multiple inheriting model
266 The system automatically instantiates every model once per database. Those
267 instances represent the available models on each database, and depend on
268 which modules are installed on that database. The actual class of each
269 instance is built from the Python classes that create and inherit from the
272 Every model instance is a "recordset", i.e., an ordered collection of
273 records of the model. Recordsets are returned by methods like
274 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
275 explicit representation: a record is represented as a recordset of one
278 To create a class that should not be instantiated, the _register class
279 attribute may be set to False.
281 __metaclass__ = MetaModel
282 _auto = True # create database backend
283 _register = False # Set to false if the model shouldn't be automatically discovered.
290 _parent_name = 'parent_id'
291 _parent_store = False
292 _parent_order = False
299 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
300 # to include in the _read_group, if grouped on this field
304 _transient = False # True in a TransientModel
307 # { 'parent_model': 'm2o_field', ... }
310 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
311 # model from which it is inherits'd, r is the (local) field towards m, f
312 # is the _column object itself, and n is the original (i.e. top-most)
315 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
316 # field_column_obj, origina_parent_model), ... }
319 # Mapping field name/column_info object
320 # This is similar to _inherit_fields but:
321 # 1. includes self fields,
322 # 2. uses column_info instead of a triple.
327 _sql_constraints = []
329 CONCURRENCY_CHECK_FIELD = '__last_update'
331 def log(self, cr, uid, id, message, secondary=False, context=None):
332 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
334 def view_init(self, cr, uid, fields_list, context=None):
335 """Override this method to do specific things when a view on the object is opened."""
338 def _field_create(self, cr, context=None):
339 """ Create entries in ir_model_fields for all the model's fields.
341 If necessary, also create an entry in ir_model, and if called from the
342 modules loading scheme (by receiving 'module' in the context), also
343 create entries in ir_model_data (for the model and the fields).
345 - create an entry in ir_model (if there is not already one),
346 - create an entry in ir_model_data (if there is not already one, and if
347 'module' is in the context),
348 - update ir_model_fields with the fields found in _columns
349 (TODO there is some redundancy as _columns is updated from
350 ir_model_fields in __init__).
355 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
357 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
358 model_id = cr.fetchone()[0]
359 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
361 model_id = cr.fetchone()[0]
362 if 'module' in context:
363 name_id = 'model_'+self._name.replace('.', '_')
364 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
366 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
367 (name_id, context['module'], 'ir.model', model_id)
370 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
372 for rec in cr.dictfetchall():
373 cols[rec['name']] = rec
375 ir_model_fields_obj = self.pool.get('ir.model.fields')
377 # sparse field should be created at the end, as it depends on its serialized field already existing
378 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
379 for (k, f) in model_fields:
381 'model_id': model_id,
384 'field_description': f.string,
386 'relation': f._obj or '',
387 'select_level': tools.ustr(int(f.select)),
388 'readonly': (f.readonly and 1) or 0,
389 'required': (f.required and 1) or 0,
390 'selectable': (f.selectable and 1) or 0,
391 'translate': (f.translate and 1) or 0,
392 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
393 'serialization_field_id': None,
395 if getattr(f, 'serialization_field', None):
396 # resolve link to serialization_field if specified by name
397 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
398 if not serialization_field_id:
399 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
400 vals['serialization_field_id'] = serialization_field_id[0]
402 # When its a custom field,it does not contain f.select
403 if context.get('field_state', 'base') == 'manual':
404 if context.get('field_name', '') == k:
405 vals['select_level'] = context.get('select', '0')
406 #setting value to let the problem NOT occur next time
408 vals['select_level'] = cols[k]['select_level']
411 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
412 id = cr.fetchone()[0]
414 cr.execute("""INSERT INTO ir_model_fields (
415 id, model_id, model, name, field_description, ttype,
416 relation,state,select_level,relation_field, translate, serialization_field_id
418 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
420 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
421 vals['relation'], 'base',
422 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
424 if 'module' in context:
425 name1 = 'field_' + self._table + '_' + k
426 cr.execute("select name from ir_model_data where name=%s", (name1,))
428 name1 = name1 + "_" + str(id)
429 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
430 (name1, context['module'], 'ir.model.fields', id)
433 for key, val in vals.items():
434 if cols[k][key] != vals[key]:
435 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
436 cr.execute("""UPDATE ir_model_fields SET
437 model_id=%s, field_description=%s, ttype=%s, relation=%s,
438 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
440 model=%s AND name=%s""", (
441 vals['model_id'], vals['field_description'], vals['ttype'],
443 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
446 self.invalidate_cache(cr, SUPERUSER_ID)
449 def _add_field(cls, name, field):
450 """ Add the given `field` under the given `name` in the class """
451 field.set_class_name(cls, name)
453 # add field in _fields (for reflection)
454 cls._fields[name] = field
456 # add field as an attribute, unless another kind of value already exists
457 if isinstance(getattr(cls, name, field), Field):
458 setattr(cls, name, field)
460 _logger.warning("In model %r, member %r is not a field", cls._name, name)
463 cls._columns[name] = field.to_column()
465 # remove potential column that may be overridden by field
466 cls._columns.pop(name, None)
469 def _add_magic_fields(cls):
470 """ Introduce magic fields on the current class
472 * id is a "normal" field (with a specific getter)
473 * create_uid, create_date, write_uid and write_date have become
475 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
476 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
477 to get the same structure as the previous
478 ``(now() at time zone 'UTC')::timestamp``::
480 # select (now() at time zone 'UTC')::timestamp;
482 ----------------------------
483 2013-06-18 08:30:37.292809
485 >>> str(datetime.datetime.utcnow())
486 '2013-06-18 08:31:32.821177'
488 def add(name, field):
489 """ add `field` with the given `name` if it does not exist yet """
490 if name not in cls._columns and name not in cls._fields:
491 cls._add_field(name, field)
496 # this field 'id' must override any other column or field
497 cls._add_field('id', fields.Id(automatic=True))
499 add('display_name', fields.Char(string='Name',
500 compute='_compute_display_name', inverse='_inverse_display_name',
501 search='_search_display_name', automatic=True))
504 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
505 add('create_date', fields.Datetime(string='Created on', automatic=True))
506 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
507 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
508 last_modified_name = 'compute_concurrency_field_with_access'
510 last_modified_name = 'compute_concurrency_field'
512 # this field must override any other column or field
513 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
514 string='Last Modified on', compute=last_modified_name, automatic=True))
517 def compute_concurrency_field(self):
518 self[self.CONCURRENCY_CHECK_FIELD] = \
519 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
522 @api.depends('create_date', 'write_date')
523 def compute_concurrency_field_with_access(self):
524 self[self.CONCURRENCY_CHECK_FIELD] = \
525 self.write_date or self.create_date or \
526 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
529 # Goal: try to apply inheritance at the instanciation level and
530 # put objects in the pool var
533 def _build_model(cls, pool, cr):
534 """ Instanciate a given model.
536 This class method instanciates the class of some model (i.e. a class
537 deriving from osv or osv_memory). The class might be the class passed
538 in argument or, if it inherits from another class, a class constructed
539 by combining the two classes.
543 # IMPORTANT: the registry contains an instance for each model. The class
544 # of each model carries inferred metadata that is shared among the
545 # model's instances for this registry, but not among registries. Hence
546 # we cannot use that "registry class" for combining model classes by
547 # inheritance, since it confuses the metadata inference process.
549 # Keep links to non-inherited constraints in cls; this is useful for
550 # instance when exporting translations
551 cls._local_constraints = cls.__dict__.get('_constraints', [])
552 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
554 # determine inherited models
555 parents = getattr(cls, '_inherit', [])
556 parents = [parents] if isinstance(parents, basestring) else (parents or [])
558 # determine the model's name
559 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
561 # determine the module that introduced the model
562 original_module = pool[name]._original_module if name in parents else cls._module
564 # build the class hierarchy for the model
565 for parent in parents:
566 if parent not in pool:
567 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
568 'You may need to add a dependency on the parent class\' module.' % (name, parent))
569 parent_model = pool[parent]
571 # do no use the class of parent_model, since that class contains
572 # inferred metadata; use its ancestor instead
573 parent_class = type(parent_model).__base__
575 # don't inherit custom fields
576 columns = dict((key, val)
577 for key, val in parent_class._columns.iteritems()
580 columns.update(cls._columns)
582 defaults = dict(parent_class._defaults)
583 defaults.update(cls._defaults)
585 inherits = dict(parent_class._inherits)
586 inherits.update(cls._inherits)
588 old_constraints = parent_class._constraints
589 new_constraints = cls._constraints
590 # filter out from old_constraints the ones overridden by a
591 # constraint with the same function name in new_constraints
592 constraints = new_constraints + [oldc
593 for oldc in old_constraints
594 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
595 for newc in new_constraints)
598 sql_constraints = cls._sql_constraints + \
599 parent_class._sql_constraints
605 '_defaults': defaults,
606 '_inherits': inherits,
607 '_constraints': constraints,
608 '_sql_constraints': sql_constraints,
610 cls = type(name, (cls, parent_class), attrs)
612 # introduce the "registry class" of the model;
613 # duplicate some attributes so that the ORM can modify them
617 '_columns': dict(cls._columns),
618 '_defaults': dict(cls._defaults),
619 '_inherits': dict(cls._inherits),
620 '_constraints': list(cls._constraints),
621 '_sql_constraints': list(cls._sql_constraints),
622 '_original_module': original_module,
624 cls = type(cls._name, (cls,), attrs)
626 # float fields are registry-dependent (digit attribute); duplicate them
628 for key, col in cls._columns.items():
629 if col._type == 'float':
630 cls._columns[key] = copy.copy(col)
632 # link the class to the registry, and update the registry
634 # Note: we have to insert an instance into the registry now, because it
635 # can trigger some stuff on other models which expect this new instance
636 # (like method _inherits_reload_src())
637 model = object.__new__(cls)
638 cls._model = model # backward compatibility
639 pool.add(name, model)
641 # determine description, table, sequence and log_access
642 if not cls._description:
643 cls._description = cls._name
645 cls._table = cls._name.replace('.', '_')
646 if not cls._sequence:
647 cls._sequence = cls._table + '_id_seq'
648 if not hasattr(cls, '_log_access'):
649 # If _log_access is not specified, it is the same value as _auto.
650 cls._log_access = cls._auto
653 if cls.is_transient():
654 cls._transient_check_count = 0
655 cls._transient_max_count = config.get('osv_memory_count_limit')
656 cls._transient_max_hours = config.get('osv_memory_age_limit')
657 assert cls._log_access, \
658 "TransientModels must have log_access turned on, " \
659 "in order to implement their access rights policy"
661 # retrieve new-style fields and duplicate them (to avoid clashes with
662 # inheritance between different models)
664 for attr, field in getmembers(cls, Field.__instancecheck__):
665 if not field._origin:
666 cls._add_field(attr, field.copy())
668 # introduce magic fields
669 cls._add_magic_fields()
671 # register stuff about low-level function fields and custom fields
672 cls._init_function_fields(pool, cr)
673 cls._init_manual_fields(pool, cr)
676 cls._inherits_check()
677 cls._inherits_reload()
679 # register constraints and onchange methods
680 cls._init_constraints_onchanges()
683 for k in cls._defaults:
684 assert k in cls._fields, \
685 "Model %s has a default for nonexiting field %s" % (cls._name, k)
688 for column in cls._columns.itervalues():
693 assert cls._rec_name in cls._fields, \
694 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
695 elif 'name' in cls._fields:
696 cls._rec_name = 'name'
698 # prepare ormcache, which must be shared by all instances of the model
701 # complete the initialization of model
702 model.__init__(pool, cr)
706 def _init_function_fields(cls, pool, cr):
707 # initialize the list of non-stored function fields for this model
708 pool._pure_function_fields[cls._name] = []
710 # process store of low-level function fields
711 for fname, column in cls._columns.iteritems():
712 if hasattr(column, 'digits_change'):
713 column.digits_change(cr)
714 # filter out existing store about this field
715 pool._store_function[cls._name] = [
717 for stored in pool._store_function.get(cls._name, [])
718 if (stored[0], stored[1]) != (cls._name, fname)
720 if not isinstance(column, fields.function):
723 # register it on the pool for invalidation
724 pool._pure_function_fields[cls._name].append(fname)
726 # process store parameter
729 get_ids = lambda self, cr, uid, ids, c={}: ids
730 store = {cls._name: (get_ids, None, column.priority, None)}
731 for model, spec in store.iteritems():
733 (fnct, fields2, order, length) = spec
735 (fnct, fields2, order) = spec
738 raise except_orm('Error',
739 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
740 pool._store_function.setdefault(model, [])
741 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
742 if t not in pool._store_function[model]:
743 pool._store_function[model].append(t)
744 pool._store_function[model].sort(key=lambda x: x[4])
747 def _init_manual_fields(cls, pool, cr):
748 # Check whether the query is already done
749 if pool.fields_by_model is not None:
750 manual_fields = pool.fields_by_model.get(cls._name, [])
752 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
753 manual_fields = cr.dictfetchall()
755 for field in manual_fields:
756 if field['name'] in cls._columns:
759 'string': field['field_description'],
760 'required': bool(field['required']),
761 'readonly': bool(field['readonly']),
762 'domain': eval(field['domain']) if field['domain'] else None,
763 'size': field['size'] or None,
764 'ondelete': field['on_delete'],
765 'translate': (field['translate']),
768 #'select': int(field['select_level'])
770 if field['serialization_field_id']:
771 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
772 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
773 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
774 attrs.update({'relation': field['relation']})
775 cls._columns[field['name']] = fields.sparse(**attrs)
776 elif field['ttype'] == 'selection':
777 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
778 elif field['ttype'] == 'reference':
779 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
780 elif field['ttype'] == 'many2one':
781 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
782 elif field['ttype'] == 'one2many':
783 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
784 elif field['ttype'] == 'many2many':
785 _rel1 = field['relation'].replace('.', '_')
786 _rel2 = field['model'].replace('.', '_')
787 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
788 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
790 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
793 def _init_constraints_onchanges(cls):
794 # store sql constraint error messages
795 for (key, _, msg) in cls._sql_constraints:
796 cls.pool._sql_error[cls._table + '_' + key] = msg
798 # collect constraint and onchange methods
799 cls._constraint_methods = []
800 cls._onchange_methods = defaultdict(list)
801 for attr, func in getmembers(cls, callable):
802 if hasattr(func, '_constrains'):
803 if not all(name in cls._fields for name in func._constrains):
804 _logger.warning("@constrains%r parameters must be field names", func._constrains)
805 cls._constraint_methods.append(func)
806 if hasattr(func, '_onchange'):
807 if not all(name in cls._fields for name in func._onchange):
808 _logger.warning("@onchange%r parameters must be field names", func._onchange)
809 for name in func._onchange:
810 cls._onchange_methods[name].append(func)
813 # In the past, this method was registering the model class in the server.
814 # This job is now done entirely by the metaclass MetaModel.
816 # Do not create an instance here. Model instances are created by method
820 def __init__(self, pool, cr):
821 # this method no longer does anything; kept for backward compatibility
824 def __export_xml_id(self):
825 """ Return a valid xml_id for the record `self`. """
826 ir_model_data = self.sudo().env['ir.model.data']
827 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
830 return '%s.%s' % (data.module, data.name)
835 name = '%s_%s' % (self._table, self.id)
836 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
838 name = '%s_%s_%s' % (self._table, self.id, postfix)
839 ir_model_data.create({
842 'module': '__export__',
845 return '__export__.' + name
848 def __export_rows(self, fields):
849 """ Export fields of the records in `self`.
851 :param fields: list of lists of fields to traverse
852 :return: list of lists of corresponding values
856 # main line of record, initially empty
857 current = [''] * len(fields)
858 lines.append(current)
860 # list of primary fields followed by secondary field(s)
863 # process column by column
864 for i, path in enumerate(fields):
869 if name in primary_done:
873 current[i] = str(record.id)
875 current[i] = record.__export_xml_id()
877 field = record._fields[name]
880 # this part could be simpler, but it has to be done this way
881 # in order to reproduce the former behavior
882 if not isinstance(value, BaseModel):
883 current[i] = field.convert_to_export(value, self.env)
885 primary_done.append(name)
887 # This is a special case, its strange behavior is intended!
888 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
889 xml_ids = [r.__export_xml_id() for r in value]
890 current[i] = ','.join(xml_ids) or False
893 # recursively export the fields that follow name
894 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
895 lines2 = value.__export_rows(fields2)
897 # merge first line with record's main line
898 for j, val in enumerate(lines2[0]):
901 # check value of current field
903 # assign xml_ids, and forget about remaining lines
904 xml_ids = [item[1] for item in value.name_get()]
905 current[i] = ','.join(xml_ids)
907 # append the other lines at the end
915 def export_data(self, fields_to_export, raw_data=False):
916 """ Export fields for selected objects
918 :param fields_to_export: list of fields
919 :param raw_data: True to return value in native Python type
920 :rtype: dictionary with a *datas* matrix
922 This method is used when exporting data via client menu
924 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
926 self = self.with_context(export_raw_data=True)
927 return {'datas': self.__export_rows(fields_to_export)}
929 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
932 Use :meth:`~load` instead
934 Import given data in given module
936 This method is used when importing data via client menu.
938 Example of fields to import for a sale.order::
941 partner_id, (=name_search)
942 order_line/.id, (=database_id)
944 order_line/product_id/id, (=xml id)
945 order_line/price_unit,
946 order_line/product_uom_qty,
947 order_line/product_uom/id (=xml_id)
949 This method returns a 4-tuple with the following structure::
951 (return_code, errored_resource, error_message, unused)
953 * The first item is a return code, it is ``-1`` in case of
954 import error, or the last imported row number in case of success
955 * The second item contains the record data dict that failed to import
956 in case of error, otherwise it's 0
957 * The third item contains an error message string in case of error,
959 * The last item is currently unused, with no specific semantics
961 :param fields: list of fields to import
962 :param datas: data to import
963 :param mode: 'init' or 'update' for record creation
964 :param current_module: module name
965 :param noupdate: flag for record creation
966 :param filename: optional file to store partial import state for recovery
967 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
968 :rtype: (int, dict or 0, str or 0, str or 0)
970 context = dict(context) if context is not None else {}
971 context['_import_current_module'] = current_module
973 fields = map(fix_import_export_id_paths, fields)
974 ir_model_data_obj = self.pool.get('ir.model.data')
977 if m['type'] == 'error':
978 raise Exception(m['message'])
980 if config.get('import_partial') and filename:
981 with open(config.get('import_partial'), 'rb') as partial_import_file:
982 data = pickle.load(partial_import_file)
983 position = data.get(filename, 0)
987 for res_id, xml_id, res, info in self._convert_records(cr, uid,
988 self._extract_records(cr, uid, fields, datas,
989 context=context, log=log),
990 context=context, log=log):
991 ir_model_data_obj._update(cr, uid, self._name,
992 current_module, res, mode=mode, xml_id=xml_id,
993 noupdate=noupdate, res_id=res_id, context=context)
994 position = info.get('rows', {}).get('to', 0) + 1
995 if config.get('import_partial') and filename and (not (position%100)):
996 with open(config.get('import_partial'), 'rb') as partial_import:
997 data = pickle.load(partial_import)
998 data[filename] = position
999 with open(config.get('import_partial'), 'wb') as partial_import:
1000 pickle.dump(data, partial_import)
1001 if context.get('defer_parent_store_computation'):
1002 self._parent_store_compute(cr)
1004 except Exception, e:
1006 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1008 if context.get('defer_parent_store_computation'):
1009 self._parent_store_compute(cr)
1010 return position, 0, 0, 0
1012 def load(self, cr, uid, fields, data, context=None):
1014 Attempts to load the data matrix, and returns a list of ids (or
1015 ``False`` if there was an error and no id could be generated) and a
1018 The ids are those of the records created and saved (in database), in
1019 the same order they were extracted from the file. They can be passed
1020 directly to :meth:`~read`
1022 :param fields: list of fields to import, at the same index as the corresponding data
1023 :type fields: list(str)
1024 :param data: row-major matrix of data to import
1025 :type data: list(list(str))
1026 :param dict context:
1027 :returns: {ids: list(int)|False, messages: [Message]}
1029 cr.execute('SAVEPOINT model_load')
1032 fields = map(fix_import_export_id_paths, fields)
1033 ModelData = self.pool['ir.model.data'].clear_caches()
1035 fg = self.fields_get(cr, uid, context=context)
1042 for id, xid, record, info in self._convert_records(cr, uid,
1043 self._extract_records(cr, uid, fields, data,
1044 context=context, log=messages.append),
1045 context=context, log=messages.append):
1047 cr.execute('SAVEPOINT model_load_save')
1048 except psycopg2.InternalError, e:
1049 # broken transaction, exit and hope the source error was
1051 if not any(message['type'] == 'error' for message in messages):
1052 messages.append(dict(info, type='error',message=
1053 u"Unknown database error: '%s'" % e))
1056 ids.append(ModelData._update(cr, uid, self._name,
1057 current_module, record, mode=mode, xml_id=xid,
1058 noupdate=noupdate, res_id=id, context=context))
1059 cr.execute('RELEASE SAVEPOINT model_load_save')
1060 except psycopg2.Warning, e:
1061 messages.append(dict(info, type='warning', message=str(e)))
1062 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1063 except psycopg2.Error, e:
1064 messages.append(dict(
1066 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1067 # Failed to write, log to messages, rollback savepoint (to
1068 # avoid broken transaction) and keep going
1069 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1070 if any(message['type'] == 'error' for message in messages):
1071 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1073 return {'ids': ids, 'messages': messages}
1075 def _extract_records(self, cr, uid, fields_, data,
1076 context=None, log=lambda a: None):
1077 """ Generates record dicts from the data sequence.
1079 The result is a generator of dicts mapping field names to raw
1080 (unconverted, unvalidated) values.
1082 For relational fields, if sub-fields were provided the value will be
1083 a list of sub-records
1085 The following sub-fields may be set on the record (by key):
1086 * None is the name_get for the record (to use with name_create/name_search)
1087 * "id" is the External ID for the record
1088 * ".id" is the Database ID for the record
1090 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1091 # Fake columns to avoid special cases in extractor
1092 columns[None] = fields.char('rec_name')
1093 columns['id'] = fields.char('External ID')
1094 columns['.id'] = fields.integer('Database ID')
1096 # m2o fields can't be on multiple lines so exclude them from the
1097 # is_relational field rows filter, but special-case it later on to
1098 # be handled with relational fields (as it can have subfields)
1099 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1100 get_o2m_values = itemgetter_tuple(
1101 [index for index, field in enumerate(fields_)
1102 if columns[field[0]]._type == 'one2many'])
1103 get_nono2m_values = itemgetter_tuple(
1104 [index for index, field in enumerate(fields_)
1105 if columns[field[0]]._type != 'one2many'])
1106 # Checks if the provided row has any non-empty non-relational field
1107 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1108 return any(g(row)) and not any(f(row))
1112 if index >= len(data): return
1115 # copy non-relational fields to record dict
1116 record = dict((field[0], value)
1117 for field, value in itertools.izip(fields_, row)
1118 if not is_relational(field[0]))
1120 # Get all following rows which have relational values attached to
1121 # the current record (no non-relational values)
1122 record_span = itertools.takewhile(
1123 only_o2m_values, itertools.islice(data, index + 1, None))
1124 # stitch record row back on for relational fields
1125 record_span = list(itertools.chain([row], record_span))
1126 for relfield in set(
1127 field[0] for field in fields_
1128 if is_relational(field[0])):
1129 column = columns[relfield]
1130 # FIXME: how to not use _obj without relying on fields_get?
1131 Model = self.pool[column._obj]
1133 # get only cells for this sub-field, should be strictly
1134 # non-empty, field path [None] is for name_get column
1135 indices, subfields = zip(*((index, field[1:] or [None])
1136 for index, field in enumerate(fields_)
1137 if field[0] == relfield))
1139 # return all rows which have at least one value for the
1140 # subfields of relfield
1141 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1142 record[relfield] = [subrecord
1143 for subrecord, _subinfo in Model._extract_records(
1144 cr, uid, subfields, relfield_data,
1145 context=context, log=log)]
1147 yield record, {'rows': {
1149 'to': index + len(record_span) - 1
1151 index += len(record_span)
1153 def _convert_records(self, cr, uid, records,
1154 context=None, log=lambda a: None):
1155 """ Converts records from the source iterable (recursive dicts of
1156 strings) into forms which can be written to the database (via
1157 self.create or (ir.model.data)._update)
1159 :returns: a list of triplets of (id, xid, record)
1160 :rtype: list((int|None, str|None, dict))
1162 if context is None: context = {}
1163 Converter = self.pool['ir.fields.converter']
1164 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1165 Translation = self.pool['ir.translation']
1167 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1168 context.get('lang'))
1170 for f, column in columns.iteritems())
1172 convert = Converter.for_model(cr, uid, self, context=context)
1174 def _log(base, field, exception):
1175 type = 'warning' if isinstance(exception, Warning) else 'error'
1176 # logs the logical (not human-readable) field name for automated
1177 # processing of response, but injects human readable in message
1178 record = dict(base, type=type, field=field,
1179 message=unicode(exception.args[0]) % base)
1180 if len(exception.args) > 1 and exception.args[1]:
1181 record.update(exception.args[1])
1184 stream = CountingStream(records)
1185 for record, extras in stream:
1188 # name_get/name_create
1189 if None in record: pass
1196 dbid = int(record['.id'])
1198 # in case of overridden id column
1199 dbid = record['.id']
1200 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1203 record=stream.index,
1205 message=_(u"Unknown database identifier '%s'") % dbid))
1208 converted = convert(record, lambda field, err:\
1209 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1211 yield dbid, xid, converted, dict(extras, record=stream.index)
1214 def _validate_fields(self, field_names):
1215 field_names = set(field_names)
1217 # old-style constraint methods
1218 trans = self.env['ir.translation']
1219 cr, uid, context = self.env.args
1222 for fun, msg, names in self._constraints:
1224 # validation must be context-independent; call `fun` without context
1225 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1227 except Exception, e:
1228 _logger.debug('Exception while validating constraint', exc_info=True)
1230 extra_error = tools.ustr(e)
1233 res_msg = msg(self._model, cr, uid, ids, context=context)
1234 if isinstance(res_msg, tuple):
1235 template, params = res_msg
1236 res_msg = template % params
1238 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1240 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1242 _("Field(s) `%s` failed against a constraint: %s") %
1243 (', '.join(names), res_msg)
1246 raise except_orm('ValidateError', '\n'.join(errors))
1248 # new-style constraint methods
1249 for check in self._constraint_methods:
1250 if set(check._constrains) & field_names:
1253 def default_get(self, cr, uid, fields_list, context=None):
1254 """ Return default values for the fields in `fields_list`. Default
1255 values are determined by the context, user defaults, and the model
1258 :param fields_list: a list of field names
1259 :return: a dictionary mapping each field name to its corresponding
1260 default value; the keys of the dictionary are the fields in
1261 `fields_list` that have a default value different from ``False``.
1263 This method should not be overridden. In order to change the
1264 mechanism for determining default values, you should override method
1265 :meth:`add_default_value` instead.
1267 # trigger view init hook
1268 self.view_init(cr, uid, fields_list, context)
1270 # use a new record to determine default values
1271 record = self.new(cr, uid, {}, context=context)
1272 for name in fields_list:
1273 if name in self._fields:
1274 record[name] # force evaluation of defaults
1276 # retrieve defaults from record's cache
1277 return self._convert_to_write(record._cache)
1279 def add_default_value(self, field):
1280 """ Set the default value of `field` to the new record `self`.
1281 The value must be assigned to `self`.
1283 assert not self.id, "Expected new record: %s" % self
1284 cr, uid, context = self.env.args
1287 # 1. look up context
1288 key = 'default_' + name
1290 self[name] = context[key]
1293 # 2. look up ir_values
1294 # Note: performance is good, because get_defaults_dict is cached!
1295 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1296 if name in ir_values_dict:
1297 self[name] = ir_values_dict[name]
1300 # 3. look up property fields
1301 # TODO: get rid of this one
1302 column = self._columns.get(name)
1303 if isinstance(column, fields.property):
1304 self[name] = self.env['ir.property'].get(name, self._name)
1307 # 4. look up _defaults
1308 if name in self._defaults:
1309 value = self._defaults[name]
1311 value = value(self._model, cr, uid, context)
1315 # 5. delegate to field
1316 field.determine_default(self)
1318 def fields_get_keys(self, cr, user, context=None):
1319 res = self._columns.keys()
1320 # TODO I believe this loop can be replace by
1321 # res.extend(self._inherit_fields.key())
1322 for parent in self._inherits:
1323 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1326 def _rec_name_fallback(self, cr, uid, context=None):
1327 rec_name = self._rec_name
1328 if rec_name not in self._columns:
1329 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1333 # Overload this method if you need a window title which depends on the context
1335 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1338 def user_has_groups(self, cr, uid, groups, context=None):
1339 """Return true if the user is at least member of one of the groups
1340 in groups_str. Typically used to resolve `groups` attribute
1341 in view and model definitions.
1343 :param str groups: comma-separated list of fully-qualified group
1344 external IDs, e.g.: ``base.group_user,base.group_system``
1345 :return: True if the current user is a member of one of the
1348 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1349 for group_ext_id in groups.split(','))
1351 def _get_default_form_view(self, cr, user, context=None):
1352 """ Generates a default single-line form view using all fields
1353 of the current model except the m2m and o2m ones.
1355 :param cr: database cursor
1356 :param int user: user id
1357 :param dict context: connection context
1358 :returns: a form view as an lxml document
1359 :rtype: etree._Element
1361 view = etree.Element('form', string=self._description)
1362 group = etree.SubElement(view, 'group', col="4")
1363 for fname, field in self._fields.iteritems():
1364 if field.automatic or field.type in ('one2many', 'many2many'):
1367 etree.SubElement(group, 'field', name=fname)
1368 if field.type == 'text':
1369 etree.SubElement(group, 'newline')
1372 def _get_default_search_view(self, cr, user, context=None):
1373 """ Generates a single-field search view, based on _rec_name.
1375 :param cr: database cursor
1376 :param int user: user id
1377 :param dict context: connection context
1378 :returns: a tree view as an lxml document
1379 :rtype: etree._Element
1381 view = etree.Element('search', string=self._description)
1382 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1385 def _get_default_tree_view(self, cr, user, context=None):
1386 """ Generates a single-field tree view, based on _rec_name.
1388 :param cr: database cursor
1389 :param int user: user id
1390 :param dict context: connection context
1391 :returns: a tree view as an lxml document
1392 :rtype: etree._Element
1394 view = etree.Element('tree', string=self._description)
1395 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1398 def _get_default_calendar_view(self, cr, user, context=None):
1399 """ Generates a default calendar view by trying to infer
1400 calendar fields from a number of pre-set attribute names
1402 :param cr: database cursor
1403 :param int user: user id
1404 :param dict context: connection context
1405 :returns: a calendar view
1406 :rtype: etree._Element
1408 def set_first_of(seq, in_, to):
1409 """Sets the first value of `seq` also found in `in_` to
1410 the `to` attribute of the view being closed over.
1412 Returns whether it's found a suitable value (and set it on
1413 the attribute) or not
1421 view = etree.Element('calendar', string=self._description)
1422 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1424 if self._date_name not in self._columns:
1426 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1427 if dt in self._columns:
1428 self._date_name = dt
1433 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1434 view.set('date_start', self._date_name)
1436 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1437 self._columns, 'color')
1439 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1440 self._columns, 'date_stop'):
1441 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1442 self._columns, 'date_delay'):
1444 _('Invalid Object Architecture!'),
1445 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1449 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1451 Get the detailed composition of the requested view like fields, model, view architecture
1453 :param view_id: id of the view or None
1454 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1455 :param toolbar: true to include contextual actions
1456 :param submenu: deprecated
1457 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1458 :raise AttributeError:
1459 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1460 * if some tag other than 'position' is found in parent view
1461 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1465 View = self.pool['ir.ui.view']
1468 'model': self._name,
1469 'field_parent': False,
1472 # try to find a view_id if none provided
1474 # <view_type>_view_ref in context can be used to overrride the default view
1475 view_ref_key = view_type + '_view_ref'
1476 view_ref = context.get(view_ref_key)
1479 module, view_ref = view_ref.split('.', 1)
1480 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1481 view_ref_res = cr.fetchone()
1483 view_id = view_ref_res[0]
1485 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1486 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1490 # otherwise try to find the lowest priority matching ir.ui.view
1491 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1493 # context for post-processing might be overriden
1496 # read the view with inherited views applied
1497 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1498 result['arch'] = root_view['arch']
1499 result['name'] = root_view['name']
1500 result['type'] = root_view['type']
1501 result['view_id'] = root_view['id']
1502 result['field_parent'] = root_view['field_parent']
1503 # override context fro postprocessing
1504 if root_view.get('model') != self._name:
1505 ctx = dict(context, base_model_name=root_view.get('model'))
1507 # fallback on default views methods if no ir.ui.view could be found
1509 get_func = getattr(self, '_get_default_%s_view' % view_type)
1510 arch_etree = get_func(cr, uid, context)
1511 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1512 result['type'] = view_type
1513 result['name'] = 'default'
1514 except AttributeError:
1515 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1517 # Apply post processing, groups and modifiers etc...
1518 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1519 result['arch'] = xarch
1520 result['fields'] = xfields
1522 # Add related action information if aksed
1524 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1530 ir_values_obj = self.pool.get('ir.values')
1531 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1532 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1533 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1534 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1535 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1536 #When multi="True" set it will display only in More of the list view
1537 resrelate = [clean(action) for action in resrelate
1538 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1540 for x in itertools.chain(resprint, resaction, resrelate):
1541 x['string'] = x['name']
1543 result['toolbar'] = {
1545 'action': resaction,
1550 def get_formview_id(self, cr, uid, id, context=None):
1551 """ Return an view id to open the document with. This method is meant to be
1552 overridden in addons that want to give specific view ids for example.
1554 :param int id: id of the document to open
1558 def get_formview_action(self, cr, uid, id, context=None):
1559 """ Return an action to open the document. This method is meant to be
1560 overridden in addons that want to give specific view ids for example.
1562 :param int id: id of the document to open
1564 view_id = self.get_formview_id(cr, uid, id, context=context)
1566 'type': 'ir.actions.act_window',
1567 'res_model': self._name,
1568 'view_type': 'form',
1569 'view_mode': 'form',
1570 'views': [(view_id, 'form')],
1571 'target': 'current',
1575 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1576 return self.pool['ir.ui.view'].postprocess_and_fields(
1577 cr, uid, self._name, node, view_id, context=context)
1579 def search_count(self, cr, user, args, context=None):
1580 res = self.search(cr, user, args, context=context, count=True)
1581 if isinstance(res, list):
1585 @api.returns('self')
1586 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1588 Search for records based on a search domain.
1590 :param cr: database cursor
1591 :param user: current user id
1592 :param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
1593 :param offset: optional number of results to skip in the returned values (default: 0)
1594 :param limit: optional max number of records to return (default: **None**)
1595 :param order: optional columns to sort by (default: self._order=id )
1596 :param context: optional context arguments, like lang, time zone
1597 :type context: dictionary
1598 :param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
1599 :return: id or list of ids of records matching the criteria
1600 :rtype: integer or list of integers
1601 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1603 **Expressing a search domain (args)**
1605 Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
1607 * **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
1608 * **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
1609 The semantics of most of these operators are obvious.
1610 The ``child_of`` operator will look for records who are children or grand-children of a given record,
1611 according to the semantics of this model (i.e following the relationship field named by
1612 ``self._parent_name``, by default ``parent_id``.
1613 * **value** must be a valid value to compare with the values of **field_name**, depending on its type.
1615 Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
1616 These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
1617 Be very careful about this when you combine them the first time.
1619 Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
1621 [('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
1623 The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
1625 (name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
1628 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1631 # display_name, name_get, name_create, name_search
1634 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1635 def _compute_display_name(self):
1636 name = self._rec_name
1637 if name in self._fields:
1638 convert = self._fields[name].convert_to_display_name
1640 record.display_name = convert(record[name])
1643 record.display_name = "%s,%s" % (self._name, self.id)
1645 def _inverse_display_name(self):
1646 name = self._rec_name
1647 if name in self._fields and not self._fields[name].relational:
1649 record[name] = record.display_name
1651 _logger.warning("Cannot inverse field display_name on %s", self._name)
1653 def _search_display_name(self, operator, value):
1654 name = self._rec_name
1655 if name in self._fields:
1656 return [(name, operator, value)]
1658 _logger.warning("Cannot search field display_name on %s", self._name)
1659 return [(0, '=', 1)]
1663 """ Return a textual representation for the records in `self`.
1664 By default this is the value of field ``display_name``.
1667 :return: list of pairs ``(id, text_repr)`` for all records
1672 result.append((record.id, record.display_name))
1673 except MissingError:
1678 def name_create(self, name):
1679 """ Create a new record by calling :meth:`~.create` with only one value
1680 provided: the display name of the new record.
1682 The new record will be initialized with any default values
1683 applicable to this model, or provided through the context. The usual
1684 behavior of :meth:`~.create` applies.
1686 :param name: display name of the record to create
1688 :return: the :meth:`~.name_get` pair value of the created record
1690 # Shortcut the inverse function of 'display_name' with self._rec_name.
1691 # This is useful when self._rec_name is a required field: in that case,
1692 # create() creates a record without the field, and inverse display_name
1694 field_name = self._rec_name if self._rec_name else 'display_name'
1695 record = self.create({field_name: name})
1696 return (record.id, record.display_name)
1699 def name_search(self, name='', args=None, operator='ilike', limit=100):
1700 """ Search for records that have a display name matching the given
1701 `name` pattern when compared with the given `operator`, while also
1702 matching the optional search domain (`args`).
1704 This is used for example to provide suggestions based on a partial
1705 value for a relational field. Sometimes be seen as the inverse
1706 function of :meth:`~.name_get`, but it is not guaranteed to be.
1708 This method is equivalent to calling :meth:`~.search` with a search
1709 domain based on `display_name` and then :meth:`~.name_get` on the
1710 result of the search.
1712 :param name: the name pattern to match
1713 :param list args: optional search domain (see :meth:`~.search` for
1714 syntax), specifying further restrictions
1715 :param str operator: domain operator for matching `name`, such as
1716 ``'like'`` or ``'='``.
1717 :param int limit: optional max number of records to return
1719 :return: list of pairs ``(id, text_repr)`` for all matching records.
1721 args = list(args or [])
1722 if not (name == '' and operator == 'ilike'):
1723 args += [('display_name', operator, name)]
1724 return self.search(args, limit=limit).name_get()
1726 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1727 # private implementation of name_search, allows passing a dedicated user
1728 # for the name_get part to solve some access rights issues
1729 args = list(args or [])
1730 # optimize out the default criterion of ``ilike ''`` that matches everything
1731 if not (name == '' and operator == 'ilike'):
1732 args += [('display_name', operator, name)]
1733 access_rights_uid = name_get_uid or user
1734 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1735 res = self.name_get(cr, access_rights_uid, ids, context)
1738 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1741 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1743 fields = self._columns.keys() + self._inherit_fields.keys()
1744 #FIXME: collect all calls to _get_source into one SQL call.
1746 res[lang] = {'code': lang}
1748 if f in self._columns:
1749 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1751 res[lang][f] = res_trans
1753 res[lang][f] = self._columns[f].string
1754 for table in self._inherits:
1755 cols = intersect(self._inherit_fields.keys(), fields)
1756 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1759 res[lang]['code'] = lang
1760 for f in res2[lang]:
1761 res[lang][f] = res2[lang][f]
1764 def write_string(self, cr, uid, id, langs, vals, context=None):
1765 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1766 #FIXME: try to only call the translation in one SQL
1769 if field in self._columns:
1770 src = self._columns[field].string
1771 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1772 for table in self._inherits:
1773 cols = intersect(self._inherit_fields.keys(), vals)
1775 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1778 def _add_missing_default_values(self, cr, uid, values, context=None):
1779 # avoid overriding inherited values when parent is set
1781 for tables, parent_field in self._inherits.items():
1782 if parent_field in values:
1783 avoid_tables.append(tables)
1785 # compute missing fields
1786 missing_defaults = set()
1787 for field in self._columns.keys():
1788 if not field in values:
1789 missing_defaults.add(field)
1790 for field in self._inherit_fields.keys():
1791 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1792 missing_defaults.add(field)
1793 # discard magic fields
1794 missing_defaults -= set(MAGIC_COLUMNS)
1796 if missing_defaults:
1797 # override defaults with the provided values, never allow the other way around
1798 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1800 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1801 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1802 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1803 defaults[dv] = [(6, 0, defaults[dv])]
1804 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1805 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1806 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1807 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1808 defaults.update(values)
1812 def clear_caches(self):
1813 """ Clear the caches
1815 This clears the caches associated to methods decorated with
1816 ``tools.ormcache`` or ``tools.ormcache_multi``.
1819 self._ormcache.clear()
1820 self.pool._any_cache_cleared = True
1821 except AttributeError:
1825 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,
1826 read_group_result, read_group_order=None, context=None):
1827 """Helper method for filling in empty groups for all possible values of
1828 the field being grouped by"""
1830 # self._group_by_full should map groupable fields to a method that returns
1831 # a list of all aggregated values that we want to display for this field,
1832 # in the form of a m2o-like pair (key,label).
1833 # This is useful to implement kanban views for instance, where all columns
1834 # should be displayed even if they don't contain any record.
1836 # Grab the list of all groups that should be displayed, including all present groups
1837 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1838 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1839 read_group_order=read_group_order,
1840 access_rights_uid=openerp.SUPERUSER_ID,
1843 result_template = dict.fromkeys(aggregated_fields, False)
1844 result_template[groupby + '_count'] = 0
1845 if remaining_groupbys:
1846 result_template['__context'] = {'group_by': remaining_groupbys}
1848 # Merge the left_side (current results as dicts) with the right_side (all
1849 # possible values as m2o pairs). Both lists are supposed to be using the
1850 # same ordering, and can be merged in one pass.
1853 def append_left(left_side):
1854 grouped_value = left_side[groupby] and left_side[groupby][0]
1855 if not grouped_value in known_values:
1856 result.append(left_side)
1857 known_values[grouped_value] = left_side
1859 count_attr = groupby + '_count'
1860 known_values[grouped_value].update({count_attr: left_side[count_attr]})
1861 def append_right(right_side):
1862 grouped_value = right_side[0]
1863 if not grouped_value in known_values:
1864 line = dict(result_template)
1865 line[groupby] = right_side
1866 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1868 known_values[grouped_value] = line
1869 while read_group_result or all_groups:
1870 left_side = read_group_result[0] if read_group_result else None
1871 right_side = all_groups[0] if all_groups else None
1872 assert left_side is None or left_side[groupby] is False \
1873 or isinstance(left_side[groupby], (tuple,list)), \
1874 'M2O-like pair expected, got %r' % left_side[groupby]
1875 assert right_side is None or isinstance(right_side, (tuple,list)), \
1876 'M2O-like pair expected, got %r' % right_side
1877 if left_side is None:
1878 append_right(all_groups.pop(0))
1879 elif right_side is None:
1880 append_left(read_group_result.pop(0))
1881 elif left_side[groupby] == right_side:
1882 append_left(read_group_result.pop(0))
1883 all_groups.pop(0) # discard right_side
1884 elif not left_side[groupby] or not left_side[groupby][0]:
1885 # left side == "Undefined" entry, not present on right_side
1886 append_left(read_group_result.pop(0))
1888 append_right(all_groups.pop(0))
1892 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1895 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1897 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1898 to the query if order should be computed against m2o field.
1899 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1900 :param aggregated_fields: list of aggregated fields in the query
1901 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1902 These dictionaries contains the qualified name of each groupby
1903 (fully qualified SQL name for the corresponding field),
1904 and the (non raw) field name.
1905 :param osv.Query query: the query under construction
1906 :return: (groupby_terms, orderby_terms)
1909 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1910 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1912 return groupby_terms, orderby_terms
1914 self._check_qorder(orderby)
1915 for order_part in orderby.split(','):
1916 order_split = order_part.split()
1917 order_field = order_split[0]
1918 if order_field in groupby_fields:
1920 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1921 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1923 orderby_terms.append(order_clause)
1924 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1926 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1927 orderby_terms.append(order)
1928 elif order_field in aggregated_fields:
1929 orderby_terms.append(order_part)
1931 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1932 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1933 self._name, order_part)
1934 return groupby_terms, orderby_terms
1936 def _read_group_process_groupby(self, gb, query, context):
1938 Helper method to collect important information about groupbys: raw
1939 field name, type, time informations, qualified name, ...
1941 split = gb.split(':')
1942 field_type = self._all_columns[split[0]].column._type
1943 gb_function = split[1] if len(split) == 2 else None
1944 temporal = field_type in ('date', 'datetime')
1945 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1946 qualified_field = self._inherits_join_calc(split[0], query)
1949 'day': 'dd MMM YYYY',
1950 'week': "'W'w YYYY",
1951 'month': 'MMMM YYYY',
1952 'quarter': 'QQQ YYYY',
1956 'day': dateutil.relativedelta.relativedelta(days=1),
1957 'week': datetime.timedelta(days=7),
1958 'month': dateutil.relativedelta.relativedelta(months=1),
1959 'quarter': dateutil.relativedelta.relativedelta(months=3),
1960 'year': dateutil.relativedelta.relativedelta(years=1)
1963 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
1964 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
1965 if field_type == 'boolean':
1966 qualified_field = "coalesce(%s,false)" % qualified_field
1971 'display_format': display_formats[gb_function or 'month'] if temporal else None,
1972 'interval': time_intervals[gb_function or 'month'] if temporal else None,
1973 'tz_convert': tz_convert,
1974 'qualified_field': qualified_field
1977 def _read_group_prepare_data(self, key, value, groupby_dict, context):
1979 Helper method to sanitize the data received by read_group. The None
1980 values are converted to False, and the date/datetime are formatted,
1981 and corrected according to the timezones.
1983 value = False if value is None else value
1984 gb = groupby_dict.get(key)
1985 if gb and gb['type'] in ('date', 'datetime') and value:
1986 if isinstance(value, basestring):
1987 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
1988 value = datetime.datetime.strptime(value, dt_format)
1989 if gb['tz_convert']:
1990 value = pytz.timezone(context['tz']).localize(value)
1993 def _read_group_get_domain(self, groupby, value):
1995 Helper method to construct the domain corresponding to a groupby and
1996 a given value. This is mostly relevant for date/datetime.
1998 if groupby['type'] in ('date', 'datetime') and value:
1999 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2000 domain_dt_begin = value
2001 domain_dt_end = value + groupby['interval']
2002 if groupby['tz_convert']:
2003 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2004 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2005 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2006 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2007 if groupby['type'] == 'many2one' and value:
2009 return [(groupby['field'], '=', value)]
2011 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2013 Helper method to format the data contained in the dictianary data by
2014 adding the domain corresponding to its values, the groupbys in the
2015 context and by properly formatting the date/datetime values.
2017 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2018 for k,v in data.iteritems():
2019 gb = groupby_dict.get(k)
2020 if gb and gb['type'] in ('date', 'datetime') and v:
2021 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2023 data['__domain'] = domain_group + domain
2024 if len(groupby) - len(annotated_groupbys) >= 1:
2025 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2029 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2031 Get the list of records in list view grouped by the given ``groupby`` fields
2033 :param cr: database cursor
2034 :param uid: current user id
2035 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2036 :param list fields: list of fields present in the list view specified on the object
2037 :param list groupby: list of groupby descriptions by which the records will be grouped.
2038 A groupby description is either a field (then it will be grouped by that field)
2039 or a string 'field:groupby_function'. Right now, the only functions supported
2040 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2041 date/datetime fields.
2042 :param int offset: optional number of records to skip
2043 :param int limit: optional max number of records to return
2044 :param dict context: context arguments, like lang, time zone.
2045 :param list orderby: optional ``order by`` specification, for
2046 overriding the natural sort ordering of the
2047 groups, see also :py:meth:`~osv.osv.osv.search`
2048 (supported only for many2one fields currently)
2049 :param bool lazy: if true, the results are only grouped by the first groupby and the
2050 remaining groupbys are put in the __context key. If false, all the groupbys are
2052 :return: list of dictionaries(one dictionary for each record) containing:
2054 * the values of fields grouped by the fields in ``groupby`` argument
2055 * __domain: list of tuples specifying the search criteria
2056 * __context: dictionary with argument like ``groupby``
2057 :rtype: [{'field_name_1': value, ...]
2058 :raise AccessError: * if user has no read rights on the requested object
2059 * if user tries to bypass access rules for read on the requested object
2063 self.check_access_rights(cr, uid, 'read')
2064 query = self._where_calc(cr, uid, domain, context=context)
2065 fields = fields or self._columns.keys()
2067 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2068 groupby_list = groupby[:1] if lazy else groupby
2069 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2070 for gb in groupby_list]
2071 groupby_fields = [g['field'] for g in annotated_groupbys]
2072 order = orderby or ','.join([g for g in groupby_list])
2073 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2075 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2076 for gb in groupby_fields:
2077 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2078 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2079 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2080 if not (gb in self._all_columns):
2081 # Don't allow arbitrary values, as this would be a SQL injection vector!
2082 raise except_orm(_('Invalid group_by'),
2083 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2085 aggregated_fields = [
2087 if f not in ('id', 'sequence')
2088 if f not in groupby_fields
2089 if self._all_columns[f].column._type in ('integer', 'float')
2090 if getattr(self._all_columns[f].column, '_classic_write')]
2092 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2093 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2095 for gb in annotated_groupbys:
2096 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2098 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2099 from_clause, where_clause, where_clause_params = query.get_sql()
2100 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2101 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2105 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2106 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2109 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count %(extra_fields)s
2117 'table': self._table,
2118 'count_field': count_field,
2119 'extra_fields': prefix_terms(',', select_terms),
2120 'from': from_clause,
2121 'where': prefix_term('WHERE', where_clause),
2122 'groupby': prefix_terms('GROUP BY', groupby_terms),
2123 'orderby': prefix_terms('ORDER BY', orderby_terms),
2124 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2125 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2127 cr.execute(query, where_clause_params)
2128 fetched_data = cr.dictfetchall()
2130 if not groupby_fields:
2133 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2135 data_ids = [r['id'] for r in fetched_data]
2136 many2onefields = list(set(many2onefields))
2137 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2138 for d in fetched_data:
2139 d.update(data_dict[d['id']])
2141 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2142 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2143 if lazy and groupby_fields[0] in self._group_by_full:
2144 # Right now, read_group only fill results in lazy mode (by default).
2145 # If you need to have the empty groups in 'eager' mode, then the
2146 # method _read_group_fill_results need to be completely reimplemented
2148 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2149 aggregated_fields, result, read_group_order=order,
2153 def _inherits_join_add(self, current_model, parent_model_name, query):
2155 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2156 :param current_model: current model object
2157 :param parent_model_name: name of the parent model for which the clauses should be added
2158 :param query: query object on which the JOIN should be added
2160 inherits_field = current_model._inherits[parent_model_name]
2161 parent_model = self.pool[parent_model_name]
2162 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2165 def _inherits_join_calc(self, field, query):
2167 Adds missing table select and join clause(s) to ``query`` for reaching
2168 the field coming from an '_inherits' parent table (no duplicates).
2170 :param field: name of inherited field to reach
2171 :param query: query object on which the JOIN should be added
2172 :return: qualified name of field, to be used in SELECT clause
2174 current_table = self
2175 parent_alias = '"%s"' % current_table._table
2176 while field in current_table._inherit_fields and not field in current_table._columns:
2177 parent_model_name = current_table._inherit_fields[field][0]
2178 parent_table = self.pool[parent_model_name]
2179 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2180 current_table = parent_table
2181 return '%s."%s"' % (parent_alias, field)
2183 def _parent_store_compute(self, cr):
2184 if not self._parent_store:
2186 _logger.info('Computing parent left and right for table %s...', self._table)
2187 def browse_rec(root, pos=0):
2189 where = self._parent_name+'='+str(root)
2191 where = self._parent_name+' IS NULL'
2192 if self._parent_order:
2193 where += ' order by '+self._parent_order
2194 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2196 for id in cr.fetchall():
2197 pos2 = browse_rec(id[0], pos2)
2198 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2200 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2201 if self._parent_order:
2202 query += ' order by ' + self._parent_order
2205 for (root,) in cr.fetchall():
2206 pos = browse_rec(root, pos)
2207 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2210 def _update_store(self, cr, f, k):
2211 _logger.info("storing computed values of fields.function '%s'", k)
2212 ss = self._columns[k]._symbol_set
2213 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2214 cr.execute('select id from '+self._table)
2215 ids_lst = map(lambda x: x[0], cr.fetchall())
2217 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2218 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2219 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2220 for key, val in res.items():
2223 # if val is a many2one, just write the ID
2224 if type(val) == tuple:
2226 if val is not False:
2227 cr.execute(update_query, (ss[1](val), key))
2229 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2230 """Raise except_orm if value is not among the valid values for the selection field"""
2231 if self._columns[field]._type == 'reference':
2232 val_model, val_id_str = value.split(',', 1)
2235 val_id = long(val_id_str)
2239 raise except_orm(_('ValidateError'),
2240 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2244 if isinstance(self._columns[field].selection, (tuple, list)):
2245 if val in dict(self._columns[field].selection):
2247 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2249 raise except_orm(_('ValidateError'),
2250 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
2252 def _check_removed_columns(self, cr, log=False):
2253 # iterate on the database columns to drop the NOT NULL constraints
2254 # of fields which were required but have been removed (or will be added by another module)
2255 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2256 columns += MAGIC_COLUMNS
2257 cr.execute("SELECT a.attname, a.attnotnull"
2258 " FROM pg_class c, pg_attribute a"
2259 " WHERE c.relname=%s"
2260 " AND c.oid=a.attrelid"
2261 " AND a.attisdropped=%s"
2262 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2263 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2265 for column in cr.dictfetchall():
2267 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2268 column['attname'], self._table, self._name)
2269 if column['attnotnull']:
2270 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2271 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2272 self._table, column['attname'])
2274 def _save_constraint(self, cr, constraint_name, type):
2276 Record the creation of a constraint for this model, to make it possible
2277 to delete it later when the module is uninstalled. Type can be either
2278 'f' or 'u' depending on the constraint being a foreign key or not.
2280 if not self._module:
2281 # no need to save constraints for custom models as they're not part
2284 assert type in ('f', 'u')
2286 SELECT 1 FROM ir_model_constraint, ir_module_module
2287 WHERE ir_model_constraint.module=ir_module_module.id
2288 AND ir_model_constraint.name=%s
2289 AND ir_module_module.name=%s
2290 """, (constraint_name, self._module))
2293 INSERT INTO ir_model_constraint
2294 (name, date_init, date_update, module, model, type)
2295 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2296 (SELECT id FROM ir_module_module WHERE name=%s),
2297 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2298 (constraint_name, self._module, self._name, type))
2300 def _save_relation_table(self, cr, relation_table):
2302 Record the creation of a many2many for this model, to make it possible
2303 to delete it later when the module is uninstalled.
2306 SELECT 1 FROM ir_model_relation, ir_module_module
2307 WHERE ir_model_relation.module=ir_module_module.id
2308 AND ir_model_relation.name=%s
2309 AND ir_module_module.name=%s
2310 """, (relation_table, self._module))
2312 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2313 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2314 (SELECT id FROM ir_module_module WHERE name=%s),
2315 (SELECT id FROM ir_model WHERE model=%s))""",
2316 (relation_table, self._module, self._name))
2317 self.invalidate_cache(cr, SUPERUSER_ID)
2319 # checked version: for direct m2o starting from `self`
2320 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2321 assert self.is_transient() or not dest_model.is_transient(), \
2322 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2323 if self.is_transient() and not dest_model.is_transient():
2324 # TransientModel relationships to regular Models are annoying
2325 # usually because they could block deletion due to the FKs.
2326 # So unless stated otherwise we default them to ondelete=cascade.
2327 ondelete = ondelete or 'cascade'
2328 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2329 self._foreign_keys.add(fk_def)
2330 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2332 # unchecked version: for custom cases, such as m2m relationships
2333 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2334 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2335 self._foreign_keys.add(fk_def)
2336 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2338 def _drop_constraint(self, cr, source_table, constraint_name):
2339 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2341 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2342 # Find FK constraint(s) currently established for the m2o field,
2343 # and see whether they are stale or not
2344 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2345 cl2.relname as foreign_table
2346 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2347 pg_attribute as att1, pg_attribute as att2
2348 WHERE con.conrelid = cl1.oid
2349 AND cl1.relname = %s
2350 AND con.confrelid = cl2.oid
2351 AND array_lower(con.conkey, 1) = 1
2352 AND con.conkey[1] = att1.attnum
2353 AND att1.attrelid = cl1.oid
2354 AND att1.attname = %s
2355 AND array_lower(con.confkey, 1) = 1
2356 AND con.confkey[1] = att2.attnum
2357 AND att2.attrelid = cl2.oid
2358 AND att2.attname = %s
2359 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2360 constraints = cr.dictfetchall()
2362 if len(constraints) == 1:
2363 # Is it the right constraint?
2365 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2366 or cons['foreign_table'] != dest_model._table:
2367 # Wrong FK: drop it and recreate
2368 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2369 source_table, cons['constraint_name'])
2370 self._drop_constraint(cr, source_table, cons['constraint_name'])
2372 # it's all good, nothing to do!
2375 # Multiple FKs found for the same field, drop them all, and re-create
2376 for cons in constraints:
2377 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2378 source_table, cons['constraint_name'])
2379 self._drop_constraint(cr, source_table, cons['constraint_name'])
2381 # (re-)create the FK
2382 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2385 def _set_default_value_on_column(self, cr, column_name, context=None):
2386 # ideally should use add_default_value but fails
2387 # due to ir.values not being ready
2389 # get old-style default
2390 default = self._defaults.get(column_name)
2391 if callable(default):
2392 default = default(self, cr, SUPERUSER_ID, context)
2394 # get new_style default if no old-style
2396 record = self.new(cr, SUPERUSER_ID, context=context)
2397 field = self._fields[column_name]
2398 field.determine_default(record)
2399 defaults = dict(record._cache)
2400 if column_name in defaults:
2401 default = field.convert_to_write(defaults[column_name])
2403 if default is not None:
2404 _logger.debug("Table '%s': setting default value of new column %s",
2405 self._table, column_name)
2406 ss = self._columns[column_name]._symbol_set
2407 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2408 self._table, column_name, ss[0], column_name)
2409 cr.execute(query, (ss[1](default),))
2410 # this is a disgrace
2413 def _auto_init(self, cr, context=None):
2416 Call _field_create and, unless _auto is False:
2418 - create the corresponding table in database for the model,
2419 - possibly add the parent columns in database,
2420 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2421 'write_date' in database if _log_access is True (the default),
2422 - report on database columns no more existing in _columns,
2423 - remove no more existing not null constraints,
2424 - alter existing database columns to match _columns,
2425 - create database tables to match _columns,
2426 - add database indices to match _columns,
2427 - save in self._foreign_keys a list a foreign keys to create (see
2431 self._foreign_keys = set()
2432 raise_on_invalid_object_name(self._name)
2435 store_compute = False
2436 stored_fields = [] # new-style stored fields with compute
2438 update_custom_fields = context.get('update_custom_fields', False)
2439 self._field_create(cr, context=context)
2440 create = not self._table_exist(cr)
2444 self._create_table(cr)
2447 if self._parent_store:
2448 if not self._parent_columns_exist(cr):
2449 self._create_parent_columns(cr)
2450 store_compute = True
2452 self._check_removed_columns(cr, log=False)
2454 # iterate on the "object columns"
2455 column_data = self._select_column_data(cr)
2457 for k, f in self._columns.iteritems():
2458 if k == 'id': # FIXME: maybe id should be a regular column?
2460 # Don't update custom (also called manual) fields
2461 if f.manual and not update_custom_fields:
2464 if isinstance(f, fields.one2many):
2465 self._o2m_raise_on_missing_reference(cr, f)
2467 elif isinstance(f, fields.many2many):
2468 self._m2m_raise_or_create_relation(cr, f)
2471 res = column_data.get(k)
2473 # The field is not found as-is in database, try if it
2474 # exists with an old name.
2475 if not res and hasattr(f, 'oldname'):
2476 res = column_data.get(f.oldname)
2478 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2480 column_data[k] = res
2481 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2482 self._table, f.oldname, k)
2484 # The field already exists in database. Possibly
2485 # change its type, rename it, drop it or change its
2488 f_pg_type = res['typname']
2489 f_pg_size = res['size']
2490 f_pg_notnull = res['attnotnull']
2491 if isinstance(f, fields.function) and not f.store and\
2492 not getattr(f, 'nodrop', False):
2493 _logger.info('column %s (%s) converted to a function, removed from table %s',
2494 k, f.string, self._table)
2495 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2497 _schema.debug("Table '%s': dropped column '%s' with cascade",
2501 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2506 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2507 ('varchar', 'text', 'TEXT', ''),
2508 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2509 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2510 ('timestamp', 'date', 'date', '::date'),
2511 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2512 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2514 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2516 with cr.savepoint():
2517 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2518 except psycopg2.NotSupportedError:
2519 # In place alter table cannot be done because a view is depending of this field.
2520 # Do a manual copy. This will drop the view (that will be recreated later)
2521 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2522 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2523 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2524 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2526 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2527 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2529 if (f_pg_type==c[0]) and (f._type==c[1]):
2530 if f_pg_type != f_obj_type:
2532 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2533 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2534 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2535 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2537 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2538 self._table, k, c[0], c[1])
2541 if f_pg_type != f_obj_type:
2545 newname = k + '_moved' + str(i)
2546 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2547 "WHERE c.relname=%s " \
2548 "AND a.attname=%s " \
2549 "AND c.oid=a.attrelid ", (self._table, newname))
2550 if not cr.fetchone()[0]:
2554 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2555 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2556 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2557 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2558 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2559 self._table, k, f_pg_type, f._type, newname)
2561 # if the field is required and hasn't got a NOT NULL constraint
2562 if f.required and f_pg_notnull == 0:
2563 self._set_default_value_on_column(cr, k, context=context)
2564 # add the NOT NULL constraint
2566 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2568 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2571 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2572 "If you want to have it, you should update the records and execute manually:\n"\
2573 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2574 _schema.warning(msg, self._table, k, self._table, k)
2576 elif not f.required and f_pg_notnull == 1:
2577 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2579 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2582 indexname = '%s_%s_index' % (self._table, k)
2583 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2584 res2 = cr.dictfetchall()
2585 if not res2 and f.select:
2586 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2588 if f._type == 'text':
2589 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2590 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2591 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2592 " because there is a length limit for indexable btree values!\n"\
2593 "Use a search view instead if you simply want to make the field searchable."
2594 _schema.warning(msg, self._table, f._type, k)
2595 if res2 and not f.select:
2596 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2598 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2599 _schema.debug(msg, self._table, k, f._type)
2601 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2602 dest_model = self.pool[f._obj]
2603 if dest_model._table != 'ir_actions':
2604 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2606 # The field doesn't exist in database. Create it if necessary.
2608 if not isinstance(f, fields.function) or f.store:
2609 # add the missing field
2610 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2611 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2612 _schema.debug("Table '%s': added column '%s' with definition=%s",
2613 self._table, k, get_pg_type(f)[1])
2617 self._set_default_value_on_column(cr, k, context=context)
2619 # remember the functions to call for the stored fields
2620 if isinstance(f, fields.function):
2622 if f.store is not True: # i.e. if f.store is a dict
2623 order = f.store[f.store.keys()[0]][2]
2624 todo_end.append((order, self._update_store, (f, k)))
2626 # remember new-style stored fields with compute method
2627 if k in self._fields and self._fields[k].depends:
2628 stored_fields.append(self._fields[k])
2630 # and add constraints if needed
2631 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2632 if f._obj not in self.pool:
2633 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2634 dest_model = self.pool[f._obj]
2635 ref = dest_model._table
2636 # ir_actions is inherited so foreign key doesn't work on it
2637 if ref != 'ir_actions':
2638 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2640 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2644 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2645 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2648 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2649 "Try to re-run: openerp-server --update=module\n"\
2650 "If it doesn't work, update records and execute manually:\n"\
2651 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2652 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2656 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2657 create = not bool(cr.fetchone())
2659 cr.commit() # start a new transaction
2662 self._add_sql_constraints(cr)
2665 self._execute_sql(cr)
2668 self._parent_store_compute(cr)
2672 # trigger computation of new-style stored fields with a compute
2674 _logger.info("Storing computed values of %s fields %s",
2675 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2676 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2677 recs = recs.search([])
2679 map(recs._recompute_todo, stored_fields)
2682 todo_end.append((1000, func, ()))
2686 def _auto_end(self, cr, context=None):
2687 """ Create the foreign keys recorded by _auto_init. """
2688 for t, k, r, d in self._foreign_keys:
2689 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2690 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2692 del self._foreign_keys
2695 def _table_exist(self, cr):
2696 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2700 def _create_table(self, cr):
2701 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2702 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2703 _schema.debug("Table '%s': created", self._table)
2706 def _parent_columns_exist(self, cr):
2707 cr.execute("""SELECT c.relname
2708 FROM pg_class c, pg_attribute a
2709 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2710 """, (self._table, 'parent_left'))
2714 def _create_parent_columns(self, cr):
2715 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2716 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2717 if 'parent_left' not in self._columns:
2718 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2720 _schema.debug("Table '%s': added column '%s' with definition=%s",
2721 self._table, 'parent_left', 'INTEGER')
2722 elif not self._columns['parent_left'].select:
2723 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2725 if 'parent_right' not in self._columns:
2726 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2728 _schema.debug("Table '%s': added column '%s' with definition=%s",
2729 self._table, 'parent_right', 'INTEGER')
2730 elif not self._columns['parent_right'].select:
2731 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2733 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2734 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2735 self._parent_name, self._name)
2740 def _select_column_data(self, cr):
2741 # attlen is the number of bytes necessary to represent the type when
2742 # the type has a fixed size. If the type has a varying size attlen is
2743 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2744 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2745 "FROM pg_class c,pg_attribute a,pg_type t " \
2746 "WHERE c.relname=%s " \
2747 "AND c.oid=a.attrelid " \
2748 "AND a.atttypid=t.oid", (self._table,))
2749 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2752 def _o2m_raise_on_missing_reference(self, cr, f):
2753 # TODO this check should be a method on fields.one2many.
2754 if f._obj in self.pool:
2755 other = self.pool[f._obj]
2756 # TODO the condition could use fields_get_keys().
2757 if f._fields_id not in other._columns.keys():
2758 if f._fields_id not in other._inherit_fields.keys():
2759 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2761 def _m2m_raise_or_create_relation(self, cr, f):
2762 m2m_tbl, col1, col2 = f._sql_names(self)
2763 self._save_relation_table(cr, m2m_tbl)
2764 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2765 if not cr.dictfetchall():
2766 if f._obj not in self.pool:
2767 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2768 dest_model = self.pool[f._obj]
2769 ref = dest_model._table
2770 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2771 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2772 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2773 if not cr.fetchall():
2774 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2775 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2776 if not cr.fetchall():
2777 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2779 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2780 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2781 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2783 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2786 def _add_sql_constraints(self, cr):
2789 Modify this model's database table constraints so they match the one in
2793 def unify_cons_text(txt):
2794 return txt.lower().replace(', ',',').replace(' (','(')
2796 for (key, con, _) in self._sql_constraints:
2797 conname = '%s_%s' % (self._table, key)
2799 self._save_constraint(cr, conname, 'u')
2800 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2801 existing_constraints = cr.dictfetchall()
2805 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2806 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2807 self._table, conname, con),
2808 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2813 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2814 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2815 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2821 if not existing_constraints:
2822 # constraint does not exists:
2823 sql_actions['add']['execute'] = True
2824 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2825 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2826 # constraint exists but its definition has changed:
2827 sql_actions['drop']['execute'] = True
2828 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2829 sql_actions['add']['execute'] = True
2830 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2832 # we need to add the constraint:
2833 sql_actions = [item for item in sql_actions.values()]
2834 sql_actions.sort(key=lambda x: x['order'])
2835 for sql_action in [action for action in sql_actions if action['execute']]:
2837 cr.execute(sql_action['query'])
2839 _schema.debug(sql_action['msg_ok'])
2841 _schema.warning(sql_action['msg_err'])
2845 def _execute_sql(self, cr):
2846 """ Execute the SQL code from the _sql attribute (if any)."""
2847 if hasattr(self, "_sql"):
2848 for line in self._sql.split(';'):
2849 line2 = line.replace('\n', '').strip()
2855 # Update objects that uses this one to update their _inherits fields
2859 def _inherits_reload_src(cls):
2860 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2861 for model in cls.pool.values():
2862 if cls._name in model._inherits:
2863 model._inherits_reload()
2866 def _inherits_reload(cls):
2867 """ Recompute the _inherit_fields mapping.
2869 This will also call itself on each inherits'd child model.
2873 for table in cls._inherits:
2874 other = cls.pool[table]
2875 for col in other._columns.keys():
2876 res[col] = (table, cls._inherits[table], other._columns[col], table)
2877 for col in other._inherit_fields.keys():
2878 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2879 cls._inherit_fields = res
2880 cls._all_columns = cls._get_column_infos()
2882 # interface columns with new-style fields
2883 for attr, column in cls._columns.items():
2884 if attr not in cls._fields:
2885 cls._add_field(attr, column.to_field())
2887 # interface inherited fields with new-style fields (note that the
2888 # reverse order is for being consistent with _all_columns above)
2889 for parent_model, parent_field in reversed(cls._inherits.items()):
2890 for attr, field in cls.pool[parent_model]._fields.iteritems():
2891 if attr not in cls._fields:
2892 new_field = field.copy(related=(parent_field, attr), _origin=field)
2893 cls._add_field(attr, new_field)
2895 cls._inherits_reload_src()
2898 def _get_column_infos(cls):
2899 """Returns a dict mapping all fields names (direct fields and
2900 inherited field via _inherits) to a ``column_info`` struct
2901 giving detailed columns """
2903 # do not inverse for loops, since local fields may hide inherited ones!
2904 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2905 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2906 for k, col in cls._columns.iteritems():
2907 result[k] = fields.column_info(k, col)
2911 def _inherits_check(cls):
2912 for table, field_name in cls._inherits.items():
2913 if field_name not in cls._columns:
2914 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2915 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2916 required=True, ondelete="cascade")
2917 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2918 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2919 cls._columns[field_name].required = True
2920 cls._columns[field_name].ondelete = "cascade"
2922 # reflect fields with delegate=True in dictionary cls._inherits
2923 for field in cls._fields.itervalues():
2924 if field.type == 'many2one' and not field.related and field.delegate:
2925 if not field.required:
2926 _logger.warning("Field %s with delegate=True must be required.", field)
2927 field.required = True
2928 if field.ondelete.lower() not in ('cascade', 'restrict'):
2929 field.ondelete = 'cascade'
2930 cls._inherits[field.comodel_name] = field.name
2933 def _prepare_setup_fields(self):
2934 """ Prepare the setup of fields once the models have been loaded. """
2935 for field in self._fields.itervalues():
2939 def _setup_fields(self):
2940 """ Setup the fields (dependency triggers, etc). """
2941 for field in self._fields.itervalues():
2942 field.setup(self.env)
2944 # group fields by compute to determine field.computed_fields
2945 fields_by_compute = defaultdict(list)
2946 for field in self._fields.itervalues():
2948 field.computed_fields = fields_by_compute[field.compute]
2949 field.computed_fields.append(field)
2951 field.computed_fields = []
2953 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
2954 """ Return the definition of each field.
2956 The returned value is a dictionary (indiced by field name) of
2957 dictionaries. The _inherits'd fields are included. The string, help,
2958 and selection (if present) attributes are translated.
2960 :param cr: database cursor
2961 :param user: current user id
2962 :param allfields: list of fields
2963 :param context: context arguments, like lang, time zone
2964 :return: dictionary of field dictionaries, each one describing a field of the business object
2965 :raise AccessError: * if user has no create/write rights on the requested object
2968 recs = self.browse(cr, user, [], context)
2971 for fname, field in self._fields.iteritems():
2972 if allfields and fname not in allfields:
2974 if field.groups and not recs.user_has_groups(field.groups):
2976 res[fname] = field.get_description(recs.env)
2978 # if user cannot create or modify records, make all fields readonly
2979 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
2980 if not (has_access('write') or has_access('create')):
2981 for description in res.itervalues():
2982 description['readonly'] = True
2983 description['states'] = {}
2987 def get_empty_list_help(self, cr, user, help, context=None):
2988 """ Generic method giving the help message displayed when having
2989 no result to display in a list or kanban view. By default it returns
2990 the help given in parameter that is generally the help message
2991 defined in the action.
2995 def check_field_access_rights(self, cr, user, operation, fields, context=None):
2997 Check the user access rights on the given fields. This raises Access
2998 Denied if the user does not have the rights. Otherwise it returns the
2999 fields (as is if the fields is not falsy, or the readable/writable
3000 fields if fields is falsy).
3002 if user == SUPERUSER_ID:
3003 return fields or list(self._fields)
3006 """ determine whether user has access to field `fname` """
3007 field = self._fields.get(fname)
3008 if field and field.groups:
3009 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3014 fields = filter(valid, self._fields)
3016 invalid_fields = list(set(filter(lambda name: not valid(name), fields)))
3018 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3019 operation, user, self._name, ', '.join(invalid_fields))
3021 _('The requested operation cannot be completed due to security restrictions. '
3022 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3023 (self._description, operation))
3027 # new-style implementation of read(); old-style is defined below
3029 def read(self, fields=None, load='_classic_read'):
3030 """ Read the given fields for the records in `self`.
3032 :param fields: optional list of field names to return (default is
3034 :param load: deprecated, this argument is ignored
3035 :return: a list of dictionaries mapping field names to their values,
3036 with one dictionary per record
3037 :raise AccessError: if user has no read rights on some of the given
3040 # check access rights
3041 self.check_access_rights('read')
3042 fields = self.check_field_access_rights('read', fields)
3044 # split fields into stored and computed fields
3045 stored, computed = [], []
3047 if name in self._columns:
3049 elif name in self._fields:
3050 computed.append(name)
3052 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3054 # fetch stored fields from the database to the cache
3055 self._read_from_database(stored)
3057 # retrieve results from records; this takes values from the cache and
3058 # computes remaining fields
3060 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3061 use_name_get = (load == '_classic_read')
3064 values = {'id': record.id}
3065 for name, field in name_fields:
3066 values[name] = field.convert_to_read(record[name], use_name_get)
3067 result.append(values)
3068 except MissingError:
3073 # add explicit old-style implementation to read()
3075 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3076 records = self.browse(cr, user, ids, context)
3077 result = BaseModel.read(records, fields, load=load)
3078 return result if isinstance(ids, list) else (bool(result) and result[0])
3081 def _prefetch_field(self, field):
3082 """ Read from the database in order to fetch `field` (:class:`Field`
3083 instance) for `self` in cache.
3085 # fetch the records of this model without field_name in their cache
3086 records = self._in_cache_without(field)
3088 # by default, simply fetch field
3089 fnames = set((field.name,))
3092 # columns may be missing from database, do not prefetch other fields
3094 elif self.env.in_draft:
3095 # we may be doing an onchange, do not prefetch other fields
3097 elif field in self.env.todo:
3098 # field must be recomputed, do not prefetch records to recompute
3099 records -= self.env.todo[field]
3100 elif self._columns[field.name]._prefetch:
3101 # here we can optimize: prefetch all classic and many2one fields
3103 for fname, fcolumn in self._columns.iteritems()
3104 if fcolumn._prefetch)
3106 # fetch records with read()
3107 assert self in records and field.name in fnames
3109 result = records.read(list(fnames), load='_classic_write')
3110 except AccessError as e:
3111 # update cache with the exception
3112 records._cache[field] = FailedValue(e)
3115 # check the cache, and update it if necessary
3116 if field not in self._cache:
3117 for values in result:
3118 record = self.browse(values.pop('id'))
3119 record._cache.update(record._convert_to_cache(values))
3120 if field not in self._cache:
3121 e = AccessError("No value found for %s.%s" % (self, field.name))
3122 self._cache[field] = FailedValue(e)
3125 def _read_from_database(self, field_names):
3126 """ Read the given fields of the records in `self` from the database,
3127 and store them in cache. Access errors are also stored in cache.
3130 cr, user, context = env.args
3132 # Construct a clause for the security rules.
3133 # 'tables' holds the list of tables necessary for the SELECT, including
3134 # the ir.rule clauses, and contains at least self._table.
3135 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3137 # determine the fields that are stored as columns in self._table
3138 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3140 # we need fully-qualified column names in case len(tables) > 1
3142 if isinstance(self._columns.get(f), fields.binary) and \
3143 context.get('bin_size_%s' % f, context.get('bin_size')):
3144 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3145 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3147 return '%s."%s"' % (self._table, f)
3148 qual_names = map(qualify, set(fields_pre + ['id']))
3150 query = """ SELECT %(qual_names)s FROM %(tables)s
3151 WHERE %(table)s.id IN %%s AND (%(extra)s)
3154 'qual_names': ",".join(qual_names),
3155 'tables': ",".join(tables),
3156 'table': self._table,
3157 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3158 'order': self._parent_order or self._order,
3162 for sub_ids in cr.split_for_in_conditions(self.ids):
3163 cr.execute(query, [tuple(sub_ids)] + rule_params)
3164 result.extend(cr.dictfetchall())
3166 ids = [vals['id'] for vals in result]
3169 # translate the fields if necessary
3170 if context.get('lang'):
3171 ir_translation = env['ir.translation']
3172 for f in fields_pre:
3173 if self._columns[f].translate:
3174 #TODO: optimize out of this loop
3175 res_trans = ir_translation._get_ids(
3176 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3178 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3180 # apply the symbol_get functions of the fields we just read
3181 for f in fields_pre:
3182 symbol_get = self._columns[f]._symbol_get
3185 vals[f] = symbol_get(vals[f])
3187 # store result in cache for POST fields
3189 record = self.browse(vals['id'])
3190 record._cache.update(record._convert_to_cache(vals))
3192 # determine the fields that must be processed now
3193 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3195 # Compute POST fields, grouped by multi
3196 by_multi = defaultdict(list)
3197 for f in fields_post:
3198 by_multi[self._columns[f]._multi].append(f)
3200 for multi, fs in by_multi.iteritems():
3202 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3203 assert res2 is not None, \
3204 'The function field "%s" on the "%s" model returned None\n' \
3205 '(a dictionary was expected).' % (fs[0], self._name)
3207 # TOCHECK : why got string instend of dict in python2.6
3208 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3209 multi_fields = res2.get(vals['id'], {})
3212 vals[f] = multi_fields.get(f, [])
3215 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3218 vals[f] = res2[vals['id']]
3222 # Warn about deprecated fields now that fields_pre and fields_post are computed
3223 for f in field_names:
3224 column = self._columns[f]
3225 if column.deprecated:
3226 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3228 # store result in cache
3230 record = self.browse(vals.pop('id'))
3231 record._cache.update(record._convert_to_cache(vals))
3233 # store failed values in cache for the records that could not be read
3234 fetched = self.browse(ids)
3235 missing = self - fetched
3237 extras = fetched - self
3240 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3241 ', '.join(map(repr, missing._ids)),
3242 ', '.join(map(repr, extras._ids)),
3244 # store an access error exception in existing records
3246 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3247 (self._name, 'read')
3249 forbidden = missing.exists()
3250 forbidden._cache.update(FailedValue(exc))
3251 # store a missing error exception in non-existing records
3253 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3255 (missing - forbidden)._cache.update(FailedValue(exc))
3258 def get_metadata(self):
3260 Returns some metadata about the given records.
3262 :return: list of ownership dictionaries for each requested record
3263 :rtype: list of dictionaries with the following keys:
3266 * create_uid: user who created the record
3267 * create_date: date when the record was created
3268 * write_uid: last user who changed the record
3269 * write_date: date of the last change to the record
3270 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3273 if self._log_access:
3274 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3275 quoted_table = '"%s"' % self._table
3276 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3277 query = '''SELECT %s, __imd.module, __imd.name
3278 FROM %s LEFT JOIN ir_model_data __imd
3279 ON (__imd.model = %%s and __imd.res_id = %s.id)
3280 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3281 self._cr.execute(query, (self._name, tuple(self.ids)))
3282 res = self._cr.dictfetchall()
3284 uids = list(set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k)))
3285 names = dict(self.env['res.users'].browse(uids).name_get())
3289 value = r[key] = r[key] or False
3290 if key in ('write_uid', 'create_uid') and value in names:
3291 r[key] = (value, names[value])
3292 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3293 del r['name'], r['module']
3296 def _check_concurrency(self, cr, ids, context):
3299 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3301 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3302 for sub_ids in cr.split_for_in_conditions(ids):
3305 id_ref = "%s,%s" % (self._name, id)
3306 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3308 ids_to_check.extend([id, update_date])
3309 if not ids_to_check:
3311 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3314 # mention the first one only to keep the error message readable
3315 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3317 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3318 """Verify the returned rows after applying record rules matches
3319 the length of `ids`, and raise an appropriate exception if it does not.
3323 ids, result_ids = set(ids), set(result_ids)
3324 missing_ids = ids - result_ids
3326 # Attempt to distinguish record rule restriction vs deleted records,
3327 # to provide a more specific error message - check if the missinf
3328 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3329 forbidden_ids = [x[0] for x in cr.fetchall()]
3331 # the missing ids are (at least partially) hidden by access rules
3332 if uid == SUPERUSER_ID:
3334 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3335 raise except_orm(_('Access Denied'),
3336 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3337 (self._description, operation))
3339 # If we get here, the missing_ids are not in the database
3340 if operation in ('read','unlink'):
3341 # No need to warn about deleting an already deleted record.
3342 # And no error when reading a record that was deleted, to prevent spurious
3343 # errors for non-transactional search/read sequences coming from clients
3345 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3346 raise except_orm(_('Missing document(s)'),
3347 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3350 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3351 """Verifies that the operation given by ``operation`` is allowed for the user
3352 according to the access rights."""
3353 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3355 def check_access_rule(self, cr, uid, ids, operation, context=None):
3356 """Verifies that the operation given by ``operation`` is allowed for the user
3357 according to ir.rules.
3359 :param operation: one of ``write``, ``unlink``
3360 :raise except_orm: * if current ir.rules do not permit this operation.
3361 :return: None if the operation is allowed
3363 if uid == SUPERUSER_ID:
3366 if self.is_transient():
3367 # Only one single implicit access rule for transient models: owner only!
3368 # This is ok to hardcode because we assert that TransientModels always
3369 # have log_access enabled so that the create_uid column is always there.
3370 # And even with _inherits, these fields are always present in the local
3371 # table too, so no need for JOINs.
3372 cr.execute("""SELECT distinct create_uid
3374 WHERE id IN %%s""" % self._table, (tuple(ids),))
3375 uids = [x[0] for x in cr.fetchall()]
3376 if len(uids) != 1 or uids[0] != uid:
3377 raise except_orm(_('Access Denied'),
3378 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3380 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3382 where_clause = ' and ' + ' and '.join(where_clause)
3383 for sub_ids in cr.split_for_in_conditions(ids):
3384 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3385 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3386 [sub_ids] + where_params)
3387 returned_ids = [x['id'] for x in cr.dictfetchall()]
3388 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3390 def create_workflow(self, cr, uid, ids, context=None):
3391 """Create a workflow instance for each given record IDs."""
3392 from openerp import workflow
3394 workflow.trg_create(uid, self._name, res_id, cr)
3395 # self.invalidate_cache(cr, uid, context=context) ?
3398 def delete_workflow(self, cr, uid, ids, context=None):
3399 """Delete the workflow instances bound to the given record IDs."""
3400 from openerp import workflow
3402 workflow.trg_delete(uid, self._name, res_id, cr)
3403 self.invalidate_cache(cr, uid, context=context)
3406 def step_workflow(self, cr, uid, ids, context=None):
3407 """Reevaluate the workflow instances of the given record IDs."""
3408 from openerp import workflow
3410 workflow.trg_write(uid, self._name, res_id, cr)
3411 # self.invalidate_cache(cr, uid, context=context) ?
3414 def signal_workflow(self, cr, uid, ids, signal, context=None):
3415 """Send given workflow signal and return a dict mapping ids to workflow results"""
3416 from openerp import workflow
3419 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3420 # self.invalidate_cache(cr, uid, context=context) ?
3423 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3424 """ Rebind the workflow instance bound to the given 'old' record IDs to
3425 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3427 from openerp import workflow
3428 for old_id, new_id in old_new_ids:
3429 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3430 self.invalidate_cache(cr, uid, context=context)
3433 def unlink(self, cr, uid, ids, context=None):
3435 Delete records with given ids
3437 :param cr: database cursor
3438 :param uid: current user id
3439 :param ids: id or list of ids
3440 :param context: (optional) context arguments, like lang, time zone
3442 :raise AccessError: * if user has no unlink rights on the requested object
3443 * if user tries to bypass access rules for unlink on the requested object
3444 :raise UserError: if the record is default property for other records
3449 if isinstance(ids, (int, long)):
3452 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3454 # for recomputing new-style fields
3455 recs = self.browse(cr, uid, ids, context)
3456 recs.modified(self._fields)
3458 self._check_concurrency(cr, ids, context)
3460 self.check_access_rights(cr, uid, 'unlink')
3462 ir_property = self.pool.get('ir.property')
3464 # Check if the records are used as default properties.
3465 domain = [('res_id', '=', False),
3466 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3468 if ir_property.search(cr, uid, domain, context=context):
3469 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3471 # Delete the records' properties.
3472 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3473 ir_property.unlink(cr, uid, property_ids, context=context)
3475 self.delete_workflow(cr, uid, ids, context=context)
3477 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3478 pool_model_data = self.pool.get('ir.model.data')
3479 ir_values_obj = self.pool.get('ir.values')
3480 for sub_ids in cr.split_for_in_conditions(ids):
3481 cr.execute('delete from ' + self._table + ' ' \
3482 'where id IN %s', (sub_ids,))
3484 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3485 # as these are not connected with real database foreign keys, and would be dangling references.
3486 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3487 # to avoid possible side-effects during admin calls.
3488 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3489 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3490 # Step 2. Marching towards the real deletion of referenced records
3492 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3494 # For the same reason, removing the record relevant to ir_values
3495 ir_value_ids = ir_values_obj.search(cr, uid,
3496 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3499 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3501 # invalidate the *whole* cache, since the orm does not handle all
3502 # changes made in the database, like cascading delete!
3503 recs.invalidate_cache()
3505 for order, obj_name, store_ids, fields in result_store:
3506 if obj_name == self._name:
3507 effective_store_ids = list(set(store_ids) - set(ids))
3509 effective_store_ids = store_ids
3510 if effective_store_ids:
3511 obj = self.pool[obj_name]
3512 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3513 rids = map(lambda x: x[0], cr.fetchall())
3515 obj._store_set_values(cr, uid, rids, fields, context)
3517 # recompute new-style fields
3526 def write(self, vals):
3528 Update records in `self` with the given field values.
3530 :param vals: field values to update, e.g {'field_name': new_field_value, ...}
3531 :type vals: dictionary
3533 :raise AccessError: * if user has no write rights on the requested object
3534 * if user tries to bypass access rules for write on the requested object
3535 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3536 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3538 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
3540 + For a many2many field, a list of tuples is expected.
3541 Here is the list of tuple that are accepted, with the corresponding semantics ::
3543 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3544 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3545 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3546 (3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
3547 (4, ID) link to existing record with id = ID (adds a relationship)
3548 (5) unlink all (like using (3,ID) for all linked records)
3549 (6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
3552 [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
3554 + For a one2many field, a lits of tuples is expected.
3555 Here is the list of tuple that are accepted, with the corresponding semantics ::
3557 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3558 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3559 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3562 [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
3564 + For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
3565 + For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
3571 cr, uid, context = self.env.args
3572 self._check_concurrency(self._ids)
3573 self.check_access_rights('write')
3575 # No user-driven update of these columns
3576 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3577 vals.pop(field, None)
3579 # split up fields into old-style and pure new-style ones
3580 old_vals, new_vals, unknown = {}, {}, []
3581 for key, val in vals.iteritems():
3582 if key in self._columns:
3584 elif key in self._fields:
3590 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3592 # write old-style fields with (low-level) method _write
3594 self._write(old_vals)
3596 # put the values of pure new-style fields into cache, and inverse them
3598 self._cache.update(self._convert_to_cache(new_vals))
3599 for key in new_vals:
3600 self._fields[key].determine_inverse(self)
3604 def _write(self, cr, user, ids, vals, context=None):
3605 # low-level implementation of write()
3610 self.check_field_access_rights(cr, user, 'write', vals.keys())
3611 for field in vals.keys():
3613 if field in self._columns:
3614 fobj = self._columns[field]
3615 elif field in self._inherit_fields:
3616 fobj = self._inherit_fields[field][2]
3623 for group in groups:
3624 module = group.split(".")[0]
3625 grp = group.split(".")[1]
3626 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3627 (grp, module, 'res.groups', user))
3628 readonly = cr.fetchall()
3629 if readonly[0][0] >= 1:
3636 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3638 # for recomputing new-style fields
3639 recs = self.browse(cr, user, ids, context)
3640 modified_fields = list(vals)
3641 if self._log_access:
3642 modified_fields += ['write_date', 'write_uid']
3643 recs.modified(modified_fields)
3645 parents_changed = []
3646 parent_order = self._parent_order or self._order
3647 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3648 # The parent_left/right computation may take up to
3649 # 5 seconds. No need to recompute the values if the
3650 # parent is the same.
3651 # Note: to respect parent_order, nodes must be processed in
3652 # order, so ``parents_changed`` must be ordered properly.
3653 parent_val = vals[self._parent_name]
3655 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3656 (self._table, self._parent_name, self._parent_name, parent_order)
3657 cr.execute(query, (tuple(ids), parent_val))
3659 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3660 (self._table, self._parent_name, parent_order)
3661 cr.execute(query, (tuple(ids),))
3662 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3669 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3671 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3672 if field_column and field_column.deprecated:
3673 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3674 if field in self._columns:
3675 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3676 if (not totranslate) or not self._columns[field].translate:
3677 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3678 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3679 direct.append(field)
3681 upd_todo.append(field)
3683 updend.append(field)
3684 if field in self._columns \
3685 and hasattr(self._columns[field], 'selection') \
3687 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3689 if self._log_access:
3690 upd0.append('write_uid=%s')
3691 upd0.append("write_date=(now() at time zone 'UTC')")
3695 self.check_access_rule(cr, user, ids, 'write', context=context)
3696 for sub_ids in cr.split_for_in_conditions(ids):
3697 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3698 'where id IN %s', upd1 + [sub_ids])
3699 if cr.rowcount != len(sub_ids):
3700 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3705 if self._columns[f].translate:
3706 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3709 # Inserting value to DB
3710 context_wo_lang = dict(context, lang=None)
3711 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3712 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3714 # call the 'set' method of fields which are not classic_write
3715 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3717 # default element in context must be removed when call a one2many or many2many
3718 rel_context = context.copy()
3719 for c in context.items():
3720 if c[0].startswith('default_'):
3721 del rel_context[c[0]]
3723 for field in upd_todo:
3725 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3727 unknown_fields = updend[:]
3728 for table in self._inherits:
3729 col = self._inherits[table]
3731 for sub_ids in cr.split_for_in_conditions(ids):
3732 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3733 'where id IN %s', (sub_ids,))
3734 nids.extend([x[0] for x in cr.fetchall()])
3738 if self._inherit_fields[val][0] == table:
3740 unknown_fields.remove(val)
3742 self.pool[table].write(cr, user, nids, v, context)
3746 'No such field(s) in model %s: %s.',
3747 self._name, ', '.join(unknown_fields))
3749 # check Python constraints
3750 recs._validate_fields(vals)
3752 # TODO: use _order to set dest at the right position and not first node of parent
3753 # We can't defer parent_store computation because the stored function
3754 # fields that are computer may refer (directly or indirectly) to
3755 # parent_left/right (via a child_of domain)
3758 self.pool._init_parent[self._name] = True
3760 order = self._parent_order or self._order
3761 parent_val = vals[self._parent_name]
3763 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3765 clause, params = '%s IS NULL' % (self._parent_name,), ()
3767 for id in parents_changed:
3768 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3769 pleft, pright = cr.fetchone()
3770 distance = pright - pleft + 1
3772 # Positions of current siblings, to locate proper insertion point;
3773 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3774 # after each update, in case several nodes are sequentially inserted one
3775 # next to the other (i.e computed incrementally)
3776 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3777 parents = cr.fetchall()
3779 # Find Position of the element
3781 for (parent_pright, parent_id) in parents:
3784 position = parent_pright and parent_pright + 1 or 1
3786 # It's the first node of the parent
3791 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3792 position = cr.fetchone()[0] + 1
3794 if pleft < position <= pright:
3795 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3797 if pleft < position:
3798 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3799 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3800 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3802 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3803 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3804 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3805 recs.invalidate_cache(['parent_left', 'parent_right'])
3807 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3810 # for recomputing new-style fields
3811 recs.modified(modified_fields)
3814 for order, model_name, ids_to_update, fields_to_recompute in result:
3815 key = (model_name, tuple(fields_to_recompute))
3816 done.setdefault(key, {})
3817 # avoid to do several times the same computation
3819 for id in ids_to_update:
3820 if id not in done[key]:
3821 done[key][id] = True
3823 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3825 # recompute new-style fields
3826 if context.get('recompute', True):
3829 self.step_workflow(cr, user, ids, context=context)
3833 # TODO: Should set perm to user.xxx
3836 @api.returns('self', lambda value: value.id)
3837 def create(self, vals):
3838 """ Create a new record for the model.
3840 The values for the new record are initialized using the dictionary
3841 `vals`, and if necessary the result of :meth:`default_get`.
3843 :param vals: field values like ``{'field_name': field_value, ...}``,
3844 see :meth:`write` for details about the values format
3845 :return: new record created
3846 :raise AccessError: * if user has no create rights on the requested object
3847 * if user tries to bypass access rules for create on the requested object
3848 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3849 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3851 self.check_access_rights('create')
3853 # add missing defaults, and drop fields that may not be set by user
3854 vals = self._add_missing_default_values(vals)
3855 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3856 vals.pop(field, None)
3858 # split up fields into old-style and pure new-style ones
3859 old_vals, new_vals, unknown = {}, {}, []
3860 for key, val in vals.iteritems():
3861 if key in self._all_columns:
3863 elif key in self._fields:
3869 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3871 # create record with old-style fields
3872 record = self.browse(self._create(old_vals))
3874 # put the values of pure new-style fields into cache, and inverse them
3875 record._cache.update(record._convert_to_cache(new_vals))
3876 for key in new_vals:
3877 self._fields[key].determine_inverse(record)
3881 def _create(self, cr, user, vals, context=None):
3882 # low-level implementation of create()
3886 if self.is_transient():
3887 self._transient_vacuum(cr, user)
3890 for v in self._inherits:
3891 if self._inherits[v] not in vals:
3894 tocreate[v] = {'id': vals[self._inherits[v]]}
3897 # list of column assignments defined as tuples like:
3898 # (column_name, format_string, column_value)
3899 # (column_name, sql_formula)
3900 # Those tuples will be used by the string formatting for the INSERT
3902 ('id', "nextval('%s')" % self._sequence),
3907 for v in vals.keys():
3908 if v in self._inherit_fields and v not in self._columns:
3909 (table, col, col_detail, original_parent) = self._inherit_fields[v]
3910 tocreate[table][v] = vals[v]
3913 if (v not in self._inherit_fields) and (v not in self._columns):
3915 unknown_fields.append(v)
3918 'No such field(s) in model %s: %s.',
3919 self._name, ', '.join(unknown_fields))
3921 for table in tocreate:
3922 if self._inherits[table] in vals:
3923 del vals[self._inherits[table]]
3925 record_id = tocreate[table].pop('id', None)
3927 if isinstance(record_id, dict):
3928 # Shit happens: this possibly comes from a new record
3929 tocreate[table] = dict(record_id, **tocreate[table])
3932 # When linking/creating parent records, force context without 'no_store_function' key that
3933 # defers stored functions computing, as these won't be computed in batch at the end of create().
3934 parent_context = dict(context)
3935 parent_context.pop('no_store_function', None)
3937 if record_id is None or not record_id:
3938 record_id = self.pool[table].create(cr, user, tocreate[table], context=parent_context)
3940 self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
3942 updates.append((self._inherits[table], '%s', record_id))
3944 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
3945 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
3947 for bool_field in bool_fields:
3948 if bool_field not in vals:
3949 vals[bool_field] = False
3951 for field in vals.keys():
3953 if field in self._columns:
3954 fobj = self._columns[field]
3956 fobj = self._inherit_fields[field][2]
3962 for group in groups:
3963 module = group.split(".")[0]
3964 grp = group.split(".")[1]
3965 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
3966 (grp, module, 'res.groups', user))
3967 readonly = cr.fetchall()
3968 if readonly[0][0] >= 1:
3971 elif readonly[0][0] == 0:
3979 current_field = self._columns[field]
3980 if current_field._classic_write:
3981 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
3983 #for the function fields that receive a value, we set them directly in the database
3984 #(they may be required), but we also need to trigger the _fct_inv()
3985 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
3986 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
3987 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
3988 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
3989 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
3990 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
3991 #after the release but, definitively, the behavior shouldn't be different for related and function
3993 upd_todo.append(field)
3995 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
3996 #related. See the above TODO comment for further explanations.
3997 if not isinstance(current_field, fields.related):
3998 upd_todo.append(field)
3999 if field in self._columns \
4000 and hasattr(current_field, 'selection') \
4002 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4003 if self._log_access:
4004 updates.append(('create_uid', '%s', user))
4005 updates.append(('write_uid', '%s', user))
4006 updates.append(('create_date', "(now() at time zone 'UTC')"))
4007 updates.append(('write_date', "(now() at time zone 'UTC')"))
4009 # the list of tuples used in this formatting corresponds to
4010 # tuple(field_name, format, value)
4011 # In some case, for example (id, create_date, write_date) we does not
4012 # need to read the third value of the tuple, because the real value is
4013 # encoded in the second value (the format).
4015 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4017 ', '.join('"%s"' % u[0] for u in updates),
4018 ', '.join(u[1] for u in updates)
4020 tuple([u[2] for u in updates if len(u) > 2])
4023 id_new, = cr.fetchone()
4024 recs = self.browse(cr, user, id_new, context)
4025 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4027 if self._parent_store and not context.get('defer_parent_store_computation'):
4029 self.pool._init_parent[self._name] = True
4031 parent = vals.get(self._parent_name, False)
4033 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4035 result_p = cr.fetchall()
4036 for (pleft,) in result_p:
4041 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4042 pleft_old = cr.fetchone()[0]
4045 cr.execute('select max(parent_right) from '+self._table)
4046 pleft = cr.fetchone()[0] or 0
4047 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4048 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4049 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4050 recs.invalidate_cache(['parent_left', 'parent_right'])
4052 # default element in context must be remove when call a one2many or many2many
4053 rel_context = context.copy()
4054 for c in context.items():
4055 if c[0].startswith('default_'):
4056 del rel_context[c[0]]
4059 for field in upd_todo:
4060 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4062 # check Python constraints
4063 recs._validate_fields(vals)
4065 if not context.get('no_store_function', False):
4066 result += self._store_get_values(cr, user, [id_new],
4067 list(set(vals.keys() + self._inherits.values())),
4071 for order, model_name, ids, fields2 in result:
4072 if not (model_name, ids, fields2) in done:
4073 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4074 done.append((model_name, ids, fields2))
4076 # recompute new-style fields
4077 modified_fields = list(vals)
4078 if self._log_access:
4079 modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
4080 recs.modified(modified_fields)
4083 if self._log_create and not (context and context.get('no_store_function', False)):
4084 message = self._description + \
4086 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4087 "' " + _("created.")
4088 self.log(cr, user, id_new, message, True, context=context)
4090 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4091 self.create_workflow(cr, user, [id_new], context=context)
4094 def _store_get_values(self, cr, uid, ids, fields, context):
4095 """Returns an ordered list of fields.function to call due to
4096 an update operation on ``fields`` of records with ``ids``,
4097 obtained by calling the 'store' triggers of these fields,
4098 as setup by their 'store' attribute.
4100 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4102 if fields is None: fields = []
4103 stored_functions = self.pool._store_function.get(self._name, [])
4105 # use indexed names for the details of the stored_functions:
4106 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4108 # only keep store triggers that should be triggered for the ``fields``
4110 triggers_to_compute = (
4111 f for f in stored_functions
4112 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4116 target_id_results = {}
4117 for store_trigger in triggers_to_compute:
4118 target_func_id_ = id(store_trigger[target_ids_func_])
4119 if target_func_id_ not in target_id_results:
4120 # use admin user for accessing objects having rules defined on store fields
4121 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4122 target_ids = target_id_results[target_func_id_]
4124 # the compound key must consider the priority and model name
4125 key = (store_trigger[priority_], store_trigger[model_name_])
4126 for target_id in target_ids:
4127 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4129 # Here to_compute_map looks like:
4130 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4131 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4132 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4135 # Now we need to generate the batch function calls list
4137 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4139 for ((priority,model), id_map) in to_compute_map.iteritems():
4140 trigger_ids_maps = {}
4141 # function_ids_maps =
4142 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4143 for target_id, triggers in id_map.iteritems():
4144 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4145 for triggers, target_ids in trigger_ids_maps.iteritems():
4146 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4147 [t[func_field_to_compute_] for t in triggers]))
4150 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4153 def _store_set_values(self, cr, uid, ids, fields, context):
4154 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4155 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4160 if self._log_access:
4161 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4165 field_dict.setdefault(r[0], [])
4166 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4167 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4168 for i in self.pool._store_function.get(self._name, []):
4170 up_write_date = write_date + datetime.timedelta(hours=i[5])
4171 if datetime.datetime.now() < up_write_date:
4173 field_dict[r[0]].append(i[1])
4179 if self._columns[f]._multi not in keys:
4180 keys.append(self._columns[f]._multi)
4181 todo.setdefault(self._columns[f]._multi, [])
4182 todo[self._columns[f]._multi].append(f)
4186 # use admin user for accessing objects having rules defined on store fields
4187 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4188 for id, value in result.items():
4190 for f in value.keys():
4191 if f in field_dict[id]:
4198 if self._columns[v]._type == 'many2one':
4200 value[v] = value[v][0]
4203 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4204 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4207 cr.execute('update "' + self._table + '" set ' + \
4208 ','.join(upd0) + ' where id = %s', upd1)
4212 # use admin user for accessing objects having rules defined on store fields
4213 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4214 for r in result.keys():
4216 if r in field_dict.keys():
4217 if f in field_dict[r]:
4219 for id, value in result.items():
4220 if self._columns[f]._type == 'many2one':
4225 cr.execute('update "' + self._table + '" set ' + \
4226 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4228 # invalidate the cache for the modified fields
4229 self.browse(cr, uid, ids, context).modified(fields)
4233 # TODO: ameliorer avec NULL
4234 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4235 """Computes the WHERE clause needed to implement an OpenERP domain.
4236 :param domain: the domain to compute
4238 :param active_test: whether the default filtering of records with ``active``
4239 field set to ``False`` should be applied.
4240 :return: the query expressing the given domain as provided in domain
4241 :rtype: osv.query.Query
4246 # if the object has a field named 'active', filter out all inactive
4247 # records unless they were explicitely asked for
4248 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4250 # the item[0] trick below works for domain items and '&'/'|'/'!'
4252 if not any(item[0] == 'active' for item in domain):
4253 domain.insert(0, ('active', '=', 1))
4255 domain = [('active', '=', 1)]
4258 e = expression.expression(cr, user, domain, self, context)
4259 tables = e.get_tables()
4260 where_clause, where_params = e.to_sql()
4261 where_clause = where_clause and [where_clause] or []
4263 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4265 return Query(tables, where_clause, where_params)
4267 def _check_qorder(self, word):
4268 if not regex_order.match(word):
4269 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4272 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4273 """Add what's missing in ``query`` to implement all appropriate ir.rules
4274 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4276 :param query: the current query object
4278 if uid == SUPERUSER_ID:
4281 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4282 """ :param parent_model: name of the parent model, if the added
4283 clause comes from a parent model
4287 # as inherited rules are being applied, we need to add the missing JOIN
4288 # to reach the parent table (if it was not JOINed yet in the query)
4289 parent_alias = self._inherits_join_add(self, parent_model, query)
4290 # inherited rules are applied on the external table -> need to get the alias and replace
4291 parent_table = self.pool[parent_model]._table
4292 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4293 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4295 for table in added_tables:
4296 # table is just a table name -> switch to the full alias
4297 if table == '"%s"' % parent_table:
4298 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4299 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4301 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4302 added_tables = new_tables
4303 query.where_clause += added_clause
4304 query.where_clause_params += added_params
4305 for table in added_tables:
4306 if table not in query.tables:
4307 query.tables.append(table)
4311 # apply main rules on the object
4312 rule_obj = self.pool.get('ir.rule')
4313 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4314 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4316 # apply ir.rules from the parents (through _inherits)
4317 for inherited_model in self._inherits:
4318 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4319 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4320 parent_model=inherited_model)
4322 def _generate_m2o_order_by(self, order_field, query):
4324 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4325 either native m2o fields or function/related fields that are stored, including
4326 intermediate JOINs for inheritance if required.
4328 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4330 if order_field not in self._columns and order_field in self._inherit_fields:
4331 # also add missing joins for reaching the table containing the m2o field
4332 qualified_field = self._inherits_join_calc(order_field, query)
4333 order_field_column = self._inherit_fields[order_field][2]
4335 qualified_field = '"%s"."%s"' % (self._table, order_field)
4336 order_field_column = self._columns[order_field]
4338 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4339 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4340 _logger.debug("Many2one function/related fields must be stored " \
4341 "to be used as ordering fields! Ignoring sorting for %s.%s",
4342 self._name, order_field)
4345 # figure out the applicable order_by for the m2o
4346 dest_model = self.pool[order_field_column._obj]
4347 m2o_order = dest_model._order
4348 if not regex_order.match(m2o_order):
4349 # _order is complex, can't use it here, so we default to _rec_name
4350 m2o_order = dest_model._rec_name
4352 # extract the field names, to be able to qualify them and add desc/asc
4354 for order_part in m2o_order.split(","):
4355 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4356 m2o_order = m2o_order_list
4358 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4359 # as we don't want to exclude results that have NULL values for the m2o
4360 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4361 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4362 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4363 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4365 def _generate_order_by(self, order_spec, query):
4367 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4368 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4370 :raise" except_orm in case order_spec is malformed
4372 order_by_clause = ''
4373 order_spec = order_spec or self._order
4375 order_by_elements = []
4376 self._check_qorder(order_spec)
4377 for order_part in order_spec.split(','):
4378 order_split = order_part.strip().split(' ')
4379 order_field = order_split[0].strip()
4380 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4382 if order_field == 'id':
4383 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4384 elif order_field in self._columns:
4385 order_column = self._columns[order_field]
4386 if order_column._classic_read:
4387 inner_clause = '"%s"."%s"' % (self._table, order_field)
4388 elif order_column._type == 'many2one':
4389 inner_clause = self._generate_m2o_order_by(order_field, query)
4391 continue # ignore non-readable or "non-joinable" fields
4392 elif order_field in self._inherit_fields:
4393 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4394 order_column = parent_obj._columns[order_field]
4395 if order_column._classic_read:
4396 inner_clause = self._inherits_join_calc(order_field, query)
4397 elif order_column._type == 'many2one':
4398 inner_clause = self._generate_m2o_order_by(order_field, query)
4400 continue # ignore non-readable or "non-joinable" fields
4402 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4404 if isinstance(inner_clause, list):
4405 for clause in inner_clause:
4406 order_by_elements.append("%s %s" % (clause, order_direction))
4408 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4409 if order_by_elements:
4410 order_by_clause = ",".join(order_by_elements)
4412 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4414 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4416 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4417 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4418 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4419 This is ok at the security level because this method is private and not callable through XML-RPC.
4421 :param access_rights_uid: optional user ID to use when checking access rights
4422 (not for ir.rules, this is only for ir.model.access)
4426 self.check_access_rights(cr, access_rights_uid or user, 'read')
4428 # For transient models, restrict acces to the current user, except for the super-user
4429 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4430 args = expression.AND(([('create_uid', '=', user)], args or []))
4432 query = self._where_calc(cr, user, args, context=context)
4433 self._apply_ir_rules(cr, user, query, 'read', context=context)
4434 order_by = self._generate_order_by(order, query)
4435 from_clause, where_clause, where_clause_params = query.get_sql()
4437 limit_str = limit and ' limit %d' % limit or ''
4438 offset_str = offset and ' offset %d' % offset or ''
4439 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4440 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4443 # /!\ the main query must be executed as a subquery, otherwise
4444 # offset and limit apply to the result of count()!
4445 cr.execute('SELECT count(*) FROM (%s) AS count' % query_str, where_clause_params)
4449 cr.execute(query_str, where_clause_params)
4452 # TDE note: with auto_join, we could have several lines about the same result
4453 # i.e. a lead with several unread messages; we uniquify the result using
4454 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4455 def _uniquify_list(seq):
4457 return [x for x in seq if x not in seen and not seen.add(x)]
4459 return _uniquify_list([x[0] for x in res])
4461 # returns the different values ever entered for one field
4462 # this is used, for example, in the client when the user hits enter on
4464 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4467 if field in self._inherit_fields:
4468 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4470 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4472 def copy_data(self, cr, uid, id, default=None, context=None):
4474 Copy given record's data with all its fields values
4476 :param cr: database cursor
4477 :param uid: current user id
4478 :param id: id of the record to copy
4479 :param default: field values to override in the original values of the copied record
4480 :type default: dictionary
4481 :param context: context arguments, like lang, time zone
4482 :type context: dictionary
4483 :return: dictionary containing all the field values
4489 # avoid recursion through already copied records in case of circular relationship
4490 seen_map = context.setdefault('__copy_data_seen', {})
4491 if id in seen_map.setdefault(self._name, []):
4493 seen_map[self._name].append(id)
4497 if 'state' not in default:
4498 if 'state' in self._defaults:
4499 if callable(self._defaults['state']):
4500 default['state'] = self._defaults['state'](self, cr, uid, context)
4502 default['state'] = self._defaults['state']
4504 # build a black list of fields that should not be copied
4505 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4506 def blacklist_given_fields(obj):
4507 # blacklist the fields that are given by inheritance
4508 for other, field_to_other in obj._inherits.items():
4509 blacklist.add(field_to_other)
4510 if field_to_other in default:
4511 # all the fields of 'other' are given by the record: default[field_to_other],
4512 # except the ones redefined in self
4513 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4515 blacklist_given_fields(self.pool[other])
4516 # blacklist deprecated fields
4517 for name, field in obj._columns.items():
4518 if field.deprecated:
4521 blacklist_given_fields(self)
4524 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4527 if f not in blacklist)
4529 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4533 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4536 for f, colinfo in fields_to_copy.iteritems():
4537 field = colinfo.column
4538 if field._type == 'many2one':
4539 res[f] = data[f] and data[f][0]
4540 elif field._type == 'one2many':
4541 other = self.pool[field._obj]
4542 # duplicate following the order of the ids because we'll rely on
4543 # it later for copying translations in copy_translation()!
4544 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4545 # the lines are duplicated using the wrong (old) parent, but then
4546 # are reassigned to the correct one thanks to the (0, 0, ...)
4547 res[f] = [(0, 0, line) for line in lines if line]
4548 elif field._type == 'many2many':
4549 res[f] = [(6, 0, data[f])]
4555 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4559 # avoid recursion through already copied records in case of circular relationship
4560 seen_map = context.setdefault('__copy_translations_seen',{})
4561 if old_id in seen_map.setdefault(self._name,[]):
4563 seen_map[self._name].append(old_id)
4565 trans_obj = self.pool.get('ir.translation')
4566 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4567 fields = self.fields_get(cr, uid, context=context)
4569 for field_name, field_def in fields.items():
4570 # removing the lang to compare untranslated values
4571 context_wo_lang = dict(context, lang=None)
4572 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4573 # we must recursively copy the translations for o2o and o2m
4574 if field_def['type'] == 'one2many':
4575 target_obj = self.pool[field_def['relation']]
4576 # here we rely on the order of the ids to match the translations
4577 # as foreseen in copy_data()
4578 old_children = sorted(r.id for r in old_record[field_name])
4579 new_children = sorted(r.id for r in new_record[field_name])
4580 for (old_child, new_child) in zip(old_children, new_children):
4581 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4582 # and for translatable fields we keep them for copy
4583 elif field_def.get('translate'):
4584 if field_name in self._columns:
4585 trans_name = self._name + "," + field_name
4588 elif field_name in self._inherit_fields:
4589 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4590 # get the id of the parent record to set the translation
4591 inherit_field_name = self._inherit_fields[field_name][1]
4592 target_id = new_record[inherit_field_name].id
4593 source_id = old_record[inherit_field_name].id
4597 trans_ids = trans_obj.search(cr, uid, [
4598 ('name', '=', trans_name),
4599 ('res_id', '=', source_id)
4601 user_lang = context.get('lang')
4602 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4604 # remove source to avoid triggering _set_src
4605 del record['source']
4606 record.update({'res_id': target_id})
4607 if user_lang and user_lang == record['lang']:
4608 # 'source' to force the call to _set_src
4609 # 'value' needed if value is changed in copy(), want to see the new_value
4610 record['source'] = old_record[field_name]
4611 record['value'] = new_record[field_name]
4612 trans_obj.create(cr, uid, record, context=context)
4614 @api.returns('self', lambda value: value.id)
4615 def copy(self, cr, uid, id, default=None, context=None):
4617 Duplicate record with given id updating it with default values
4619 :param cr: database cursor
4620 :param uid: current user id
4621 :param id: id of the record to copy
4622 :param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4623 :type default: dictionary
4624 :param context: context arguments, like lang, time zone
4625 :type context: dictionary
4626 :return: id of the newly created record
4631 context = context.copy()
4632 data = self.copy_data(cr, uid, id, default, context)
4633 new_id = self.create(cr, uid, data, context)
4634 self.copy_translations(cr, uid, id, new_id, context)
4638 @api.returns('self')
4640 """ Return the subset of records in `self` that exist, and mark deleted
4641 records as such in cache. It can be used as a test on records::
4646 By convention, new records are returned as existing.
4648 ids = filter(None, self._ids) # ids to check in database
4651 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4652 self._cr.execute(query, (ids,))
4653 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4654 [id for id in self._ids if not id]) # new ids
4655 existing = self.browse(ids)
4656 if len(existing) < len(self):
4657 # mark missing records in cache with a failed value
4658 exc = MissingError(_("Record does not exist or has been deleted."))
4659 (self - existing)._cache.update(FailedValue(exc))
4662 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4663 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4665 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4666 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4667 return self._check_recursion(cr, uid, ids, context, parent)
4669 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4671 Verifies that there is no loop in a hierarchical structure of records,
4672 by following the parent relationship using the **parent** field until a loop
4673 is detected or until a top-level record is found.
4675 :param cr: database cursor
4676 :param uid: current user id
4677 :param ids: list of ids of records to check
4678 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4679 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4682 parent = self._parent_name
4684 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4685 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4688 while current_id is not None:
4689 cr.execute(query, (current_id,))
4690 result = cr.fetchone()
4691 current_id = result[0] if result else None
4692 if current_id == id:
4696 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4698 Verifies that there is no loop in a hierarchical structure of records,
4699 by following the parent relationship using the **parent** field until a loop
4700 is detected or until a top-level record is found.
4702 :param cr: database cursor
4703 :param uid: current user id
4704 :param ids: list of ids of records to check
4705 :param field_name: field to check
4706 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4709 field = self._all_columns.get(field_name)
4710 field = field.column if field else None
4711 if not field or field._type != 'many2many' or field._obj != self._name:
4712 # field must be a many2many on itself
4713 raise ValueError('invalid field_name: %r' % (field_name,))
4715 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4719 for i in range(0, len(ids_parent), cr.IN_MAX):
4721 sub_ids_parent = ids_parent[i:j]
4722 cr.execute(query, (tuple(sub_ids_parent),))
4723 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4724 ids_parent = ids_parent2
4725 for i in ids_parent:
4730 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4731 """Retrieve the External ID(s) of any database record.
4733 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4735 :return: map of ids to the list of their fully qualified External IDs
4736 in the form ``module.key``, or an empty list when there's no External
4737 ID for a record, e.g.::
4739 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4742 ir_model_data = self.pool.get('ir.model.data')
4743 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4744 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4747 # can't use dict.fromkeys() as the list would be shared!
4749 for record in data_results:
4750 result[record['res_id']].append('%(module)s.%(name)s' % record)
4753 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4754 """Retrieve the External ID of any database record, if there
4755 is one. This method works as a possible implementation
4756 for a function field, to be able to add it to any
4757 model object easily, referencing it as ``Model.get_external_id``.
4759 When multiple External IDs exist for a record, only one
4760 of them is returned (randomly).
4762 :return: map of ids to their fully qualified XML ID,
4763 defaulting to an empty string when there's none
4764 (to be usable as a function field),
4767 { 'id': 'module.ext_id',
4770 results = self._get_xml_ids(cr, uid, ids)
4771 for k, v in results.iteritems():
4778 # backwards compatibility
4779 get_xml_id = get_external_id
4780 _get_xml_ids = _get_external_ids
4782 def print_report(self, cr, uid, ids, name, data, context=None):
4784 Render the report `name` for the given IDs. The report must be defined
4785 for this model, not another.
4787 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4788 assert self._name == report.table
4789 return report.create(cr, uid, ids, data, context)
4793 def is_transient(cls):
4794 """ Return whether the model is transient.
4796 See :class:`TransientModel`.
4799 return cls._transient
4801 def _transient_clean_rows_older_than(self, cr, seconds):
4802 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4803 # Never delete rows used in last 5 minutes
4804 seconds = max(seconds, 300)
4805 query = ("SELECT id FROM " + self._table + " WHERE"
4806 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4807 " < ((now() at time zone 'UTC') - interval %s)")
4808 cr.execute(query, ("%s seconds" % seconds,))
4809 ids = [x[0] for x in cr.fetchall()]
4810 self.unlink(cr, SUPERUSER_ID, ids)
4812 def _transient_clean_old_rows(self, cr, max_count):
4813 # Check how many rows we have in the table
4814 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4816 if res[0][0] <= max_count:
4817 return # max not reached, nothing to do
4818 self._transient_clean_rows_older_than(cr, 300)
4820 def _transient_vacuum(self, cr, uid, force=False):
4821 """Clean the transient records.
4823 This unlinks old records from the transient model tables whenever the
4824 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4825 Actual cleaning will happen only once every "_transient_check_time" calls.
4826 This means this method can be called frequently called (e.g. whenever
4827 a new record is created).
4828 Example with both max_hours and max_count active:
4829 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4830 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4831 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4832 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4833 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4834 would immediately cause the maximum to be reached again.
4835 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4837 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4838 _transient_check_time = 20 # arbitrary limit on vacuum executions
4839 self._transient_check_count += 1
4840 if not force and (self._transient_check_count < _transient_check_time):
4841 return True # no vacuum cleaning this time
4842 self._transient_check_count = 0
4844 # Age-based expiration
4845 if self._transient_max_hours:
4846 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4848 # Count-based expiration
4849 if self._transient_max_count:
4850 self._transient_clean_old_rows(cr, self._transient_max_count)
4854 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4855 """ Serializes one2many and many2many commands into record dictionaries
4856 (as if all the records came from the database via a read()). This
4857 method is aimed at onchange methods on one2many and many2many fields.
4859 Because commands might be creation commands, not all record dicts
4860 will contain an ``id`` field. Commands matching an existing record
4861 will have an ``id``.
4863 :param field_name: name of the one2many or many2many field matching the commands
4864 :type field_name: str
4865 :param commands: one2many or many2many commands to execute on ``field_name``
4866 :type commands: list((int|False, int|False, dict|False))
4867 :param fields: list of fields to read from the database, when applicable
4868 :type fields: list(str)
4869 :returns: records in a shape similar to that returned by ``read()``
4870 (except records may be missing the ``id`` field if they don't exist in db)
4873 result = [] # result (list of dict)
4874 record_ids = [] # ids of records to read
4875 updates = {} # {id: dict} of updates on particular records
4877 for command in commands or []:
4878 if not isinstance(command, (list, tuple)):
4879 record_ids.append(command)
4880 elif command[0] == 0:
4881 result.append(command[2])
4882 elif command[0] == 1:
4883 record_ids.append(command[1])
4884 updates.setdefault(command[1], {}).update(command[2])
4885 elif command[0] in (2, 3):
4886 record_ids = [id for id in record_ids if id != command[1]]
4887 elif command[0] == 4:
4888 record_ids.append(command[1])
4889 elif command[0] == 5:
4890 result, record_ids = [], []
4891 elif command[0] == 6:
4892 result, record_ids = [], list(command[2])
4894 # read the records and apply the updates
4895 other_model = self.pool[self._all_columns[field_name].column._obj]
4896 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4897 record.update(updates.get(record['id'], {}))
4898 result.append(record)
4902 # for backward compatibility
4903 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
4905 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
4907 Performs a ``search()`` followed by a ``read()``.
4909 :param cr: database cursor
4910 :param user: current user id
4911 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
4912 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
4913 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
4914 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
4915 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
4916 :param context: context arguments.
4917 :return: List of dictionaries containing the asked fields.
4918 :rtype: List of dictionaries.
4921 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
4925 if fields and fields == ['id']:
4926 # shortcut read if we only want the ids
4927 return [{'id': id} for id in record_ids]
4929 # read() ignores active_test, but it would forward it to any downstream search call
4930 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
4931 # was presumably only meant for the main search().
4932 # TODO: Move this to read() directly?
4933 read_ctx = dict(context or {})
4934 read_ctx.pop('active_test', None)
4936 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
4937 if len(result) <= 1:
4941 index = dict((r['id'], r) for r in result)
4942 return [index[x] for x in record_ids if x in index]
4944 def _register_hook(self, cr):
4945 """ stuff to do right after the registry is built """
4948 def _patch_method(self, name, method):
4949 """ Monkey-patch a method for all instances of this model. This replaces
4950 the method called `name` by `method` in `self`'s class.
4951 The original method is then accessible via ``method.origin``, and it
4952 can be restored with :meth:`~._revert_method`.
4957 def do_write(self, values):
4958 # do stuff, and call the original method
4959 return do_write.origin(self, values)
4961 # patch method write of model
4962 model._patch_method('write', do_write)
4964 # this will call do_write
4965 records = model.search([...])
4968 # restore the original method
4969 model._revert_method('write')
4972 origin = getattr(cls, name)
4973 method.origin = origin
4974 # propagate decorators from origin to method, and apply api decorator
4975 wrapped = api.guess(api.propagate(origin, method))
4976 wrapped.origin = origin
4977 setattr(cls, name, wrapped)
4979 def _revert_method(self, name):
4980 """ Revert the original method of `self` called `name`.
4981 See :meth:`~._patch_method`.
4984 method = getattr(cls, name)
4985 setattr(cls, name, method.origin)
4990 # An instance represents an ordered collection of records in a given
4991 # execution environment. The instance object refers to the environment, and
4992 # the records themselves are represented by their cache dictionary. The 'id'
4993 # of each record is found in its corresponding cache dictionary.
4995 # This design has the following advantages:
4996 # - cache access is direct and thus fast;
4997 # - one can consider records without an 'id' (see new records);
4998 # - the global cache is only an index to "resolve" a record 'id'.
5002 def _browse(cls, env, ids):
5003 """ Create an instance attached to `env`; `ids` is a tuple of record
5006 records = object.__new__(cls)
5009 env.prefetch[cls._name].update(ids)
5013 def browse(self, arg=None):
5014 """ Return an instance corresponding to `arg` and attached to
5015 `self.env`; `arg` is either a record id, or a collection of record ids.
5017 ids = _normalize_ids(arg)
5018 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5019 return self._browse(self.env, ids)
5022 def browse(self, cr, uid, arg=None, context=None):
5023 ids = _normalize_ids(arg)
5024 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5025 return self._browse(Environment(cr, uid, context or {}), ids)
5028 # Internal properties, for manipulating the instance's implementation
5033 """ Return the list of non-false record ids of this instance. """
5034 return filter(None, list(self._ids))
5036 # backward-compatibility with former browse records
5037 _cr = property(lambda self: self.env.cr)
5038 _uid = property(lambda self: self.env.uid)
5039 _context = property(lambda self: self.env.context)
5042 # Conversion methods
5045 def ensure_one(self):
5046 """ Return `self` if it is a singleton instance, otherwise raise an
5051 raise except_orm("ValueError", "Expected singleton: %s" % self)
5053 def with_env(self, env):
5054 """ Return an instance equivalent to `self` attached to `env`.
5056 return self._browse(env, self._ids)
5058 def sudo(self, user=SUPERUSER_ID):
5059 """ Return an instance equivalent to `self` attached to an environment
5060 based on `self.env` with the given `user`.
5062 return self.with_env(self.env(user=user))
5064 def with_context(self, *args, **kwargs):
5065 """ Return an instance equivalent to `self` attached to an environment
5066 based on `self.env` with another context. The context is given by
5067 `self._context` or the positional argument if given, and modified by
5070 context = dict(args[0] if args else self._context, **kwargs)
5071 return self.with_env(self.env(context=context))
5073 def _convert_to_cache(self, values):
5074 """ Convert the `values` dictionary into cached values. """
5075 fields = self._fields
5077 name: fields[name].convert_to_cache(value, self.env)
5078 for name, value in values.iteritems()
5082 def _convert_to_write(self, values):
5083 """ Convert the `values` dictionary into the format of :meth:`write`. """
5084 fields = self._fields
5086 (name, fields[name].convert_to_write(value))
5087 for name, value in values.iteritems()
5088 if name in self._fields
5092 # Record traversal and update
5095 def _mapped_func(self, func):
5096 """ Apply function `func` on all records in `self`, and return the
5097 result as a list or a recordset (if `func` return recordsets).
5099 vals = [func(rec) for rec in self]
5100 val0 = vals[0] if vals else func(self)
5101 if isinstance(val0, BaseModel):
5102 return reduce(operator.or_, vals, val0)
5105 def mapped(self, func):
5106 """ Apply `func` on all records in `self`, and return the result as a
5107 list or a recordset (if `func` return recordsets). In the latter
5108 case, the order of the returned recordset is arbritrary.
5110 :param func: a function or a dot-separated sequence of field names
5112 if isinstance(func, basestring):
5114 for name in func.split('.'):
5115 recs = recs._mapped_func(operator.itemgetter(name))
5118 return self._mapped_func(func)
5120 def _mapped_cache(self, name_seq):
5121 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5122 field names, and only cached values are used.
5125 for name in name_seq.split('.'):
5126 field = recs._fields[name]
5127 null = field.null(self.env)
5128 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5131 def filtered(self, func):
5132 """ Select the records in `self` such that `func(rec)` is true, and
5133 return them as a recordset.
5135 :param func: a function or a dot-separated sequence of field names
5137 if isinstance(func, basestring):
5139 func = lambda rec: filter(None, rec.mapped(name))
5140 return self.browse([rec.id for rec in self if func(rec)])
5142 def sorted(self, key=None):
5143 """ Return the recordset `self` ordered by `key` """
5145 return self.search([('id', 'in', self.ids)])
5147 return self.browse(map(int, sorted(self, key=key)))
5149 def update(self, values):
5150 """ Update record `self[0]` with `values`. """
5151 for name, value in values.iteritems():
5155 # New records - represent records that do not exist in the database yet;
5156 # they are used to compute default values and perform onchanges.
5160 def new(self, values={}):
5161 """ Return a new record instance attached to `self.env`, and
5162 initialized with the `values` dictionary. Such a record does not
5163 exist in the database.
5165 record = self.browse([NewId()])
5166 record._cache.update(self._convert_to_cache(values))
5168 if record.env.in_onchange:
5169 # The cache update does not set inverse fields, so do it manually.
5170 # This is useful for computing a function field on secondary
5171 # records, if that field depends on the main record.
5173 field = self._fields.get(name)
5174 if field and field.inverse_field:
5175 field.inverse_field._update(record[name], record)
5180 # Dirty flag, to mark records modified (in draft mode)
5185 """ Return whether any record in `self` is dirty. """
5186 dirty = self.env.dirty
5187 return any(record in dirty for record in self)
5190 def _dirty(self, value):
5191 """ Mark the records in `self` as dirty. """
5193 map(self.env.dirty.add, self)
5195 map(self.env.dirty.discard, self)
5201 def __nonzero__(self):
5202 """ Test whether `self` is nonempty. """
5203 return bool(getattr(self, '_ids', True))
5206 """ Return the size of `self`. """
5207 return len(self._ids)
5210 """ Return an iterator over `self`. """
5211 for id in self._ids:
5212 yield self._browse(self.env, (id,))
5214 def __contains__(self, item):
5215 """ Test whether `item` is a subset of `self` or a field name. """
5216 if isinstance(item, BaseModel):
5217 if self._name == item._name:
5218 return set(item._ids) <= set(self._ids)
5219 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5220 if isinstance(item, basestring):
5221 return item in self._fields
5222 return item in self.ids
5224 def __add__(self, other):
5225 """ Return the concatenation of two recordsets. """
5226 if not isinstance(other, BaseModel) or self._name != other._name:
5227 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5228 return self.browse(self._ids + other._ids)
5230 def __sub__(self, other):
5231 """ Return the recordset of all the records in `self` that are not in `other`. """
5232 if not isinstance(other, BaseModel) or self._name != other._name:
5233 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5234 other_ids = set(other._ids)
5235 return self.browse([id for id in self._ids if id not in other_ids])
5237 def __and__(self, other):
5238 """ Return the intersection of two recordsets.
5239 Note that recordset order is not preserved.
5241 if not isinstance(other, BaseModel) or self._name != other._name:
5242 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5243 return self.browse(set(self._ids) & set(other._ids))
5245 def __or__(self, other):
5246 """ Return the union of two recordsets.
5247 Note that recordset order is not preserved.
5249 if not isinstance(other, BaseModel) or self._name != other._name:
5250 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5251 return self.browse(set(self._ids) | set(other._ids))
5253 def __eq__(self, other):
5254 """ Test whether two recordsets are equivalent (up to reordering). """
5255 if not isinstance(other, BaseModel):
5257 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5259 return self._name == other._name and set(self._ids) == set(other._ids)
5261 def __ne__(self, other):
5262 return not self == other
5264 def __lt__(self, other):
5265 if not isinstance(other, BaseModel) or self._name != other._name:
5266 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5267 return set(self._ids) < set(other._ids)
5269 def __le__(self, other):
5270 if not isinstance(other, BaseModel) or self._name != other._name:
5271 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5272 return set(self._ids) <= set(other._ids)
5274 def __gt__(self, other):
5275 if not isinstance(other, BaseModel) or self._name != other._name:
5276 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5277 return set(self._ids) > set(other._ids)
5279 def __ge__(self, other):
5280 if not isinstance(other, BaseModel) or self._name != other._name:
5281 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5282 return set(self._ids) >= set(other._ids)
5288 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5290 def __unicode__(self):
5291 return unicode(str(self))
5296 if hasattr(self, '_ids'):
5297 return hash((self._name, frozenset(self._ids)))
5299 return hash(self._name)
5301 def __getitem__(self, key):
5302 """ If `key` is an integer or a slice, return the corresponding record
5303 selection as an instance (attached to `self.env`).
5304 Otherwise read the field `key` of the first record in `self`.
5308 inst = model.search(dom) # inst is a recordset
5309 r4 = inst[3] # fourth record in inst
5310 rs = inst[10:20] # subset of inst
5311 nm = rs['name'] # name of first record in inst
5313 if isinstance(key, basestring):
5314 # important: one must call the field's getter
5315 return self._fields[key].__get__(self, type(self))
5316 elif isinstance(key, slice):
5317 return self._browse(self.env, self._ids[key])
5319 return self._browse(self.env, (self._ids[key],))
5321 def __setitem__(self, key, value):
5322 """ Assign the field `key` to `value` in record `self`. """
5323 # important: one must call the field's setter
5324 return self._fields[key].__set__(self, value)
5327 # Cache and recomputation management
5332 """ Return the cache of `self`, mapping field names to values. """
5333 return RecordCache(self)
5336 def _in_cache_without(self, field):
5337 """ Make sure `self` is present in cache (for prefetching), and return
5338 the records of model `self` in cache that have no value for `field`
5339 (:class:`Field` instance).
5342 prefetch_ids = env.prefetch[self._name]
5343 prefetch_ids.update(self._ids)
5344 ids = filter(None, prefetch_ids - set(env.cache[field]))
5345 return self.browse(ids)
5349 """ Clear the records cache.
5352 The record cache is automatically invalidated.
5354 self.invalidate_cache()
5357 def invalidate_cache(self, fnames=None, ids=None):
5358 """ Invalidate the record caches after some records have been modified.
5359 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5361 :param fnames: the list of modified fields, or ``None`` for all fields
5362 :param ids: the list of modified record ids, or ``None`` for all
5366 return self.env.invalidate_all()
5367 fields = self._fields.values()
5369 fields = map(self._fields.__getitem__, fnames)
5371 # invalidate fields and inverse fields, too
5372 spec = [(f, ids) for f in fields] + \
5373 [(f.inverse_field, None) for f in fields if f.inverse_field]
5374 self.env.invalidate(spec)
5377 def modified(self, fnames):
5378 """ Notify that fields have been modified on `self`. This invalidates
5379 the cache, and prepares the recomputation of stored function fields
5380 (new-style fields only).
5382 :param fnames: iterable of field names that have been modified on
5385 # each field knows what to invalidate and recompute
5387 for fname in fnames:
5388 spec += self._fields[fname].modified(self)
5392 for env in self.env.all
5393 for field in env.cache
5395 # invalidate non-stored fields.function which are currently cached
5396 spec += [(f, None) for f in self.pool.pure_function_fields
5397 if f in cached_fields]
5399 self.env.invalidate(spec)
5401 def _recompute_check(self, field):
5402 """ If `field` must be recomputed on some record in `self`, return the
5403 corresponding records that must be recomputed.
5405 for env in [self.env] + list(self.env.all):
5406 if env.todo.get(field) and env.todo[field] & self:
5407 return env.todo[field]
5409 def _recompute_todo(self, field):
5410 """ Mark `field` to be recomputed. """
5411 todo = self.env.todo
5412 todo[field] = (todo.get(field) or self.browse()) | self
5414 def _recompute_done(self, field):
5415 """ Mark `field` as being recomputed. """
5416 todo = self.env.todo
5418 recs = todo.pop(field) - self
5423 def recompute(self):
5424 """ Recompute stored function fields. The fields and records to
5425 recompute have been determined by method :meth:`modified`.
5427 for env in list(self.env.all):
5429 field, recs = next(env.todo.iteritems())
5430 # evaluate the fields to recompute, and save them to database
5431 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5433 values = rec._convert_to_write({
5434 f.name: rec[f.name] for f in field.computed_fields
5437 except MissingError:
5439 # mark the computed fields as done
5440 map(recs._recompute_done, field.computed_fields)
5443 # Generic onchange method
5446 def _has_onchange(self, field, other_fields):
5447 """ Return whether `field` should trigger an onchange event in the
5448 presence of `other_fields`.
5450 # test whether self has an onchange method for field, or field is a
5451 # dependency of any field in other_fields
5452 return field.name in self._onchange_methods or \
5453 any(dep in other_fields for dep in field.dependents)
5456 def _onchange_spec(self, view_info=None):
5457 """ Return the onchange spec from a view description; if not given, the
5458 result of ``self.fields_view_get()`` is used.
5462 # for traversing the XML arch and populating result
5463 def process(node, info, prefix):
5464 if node.tag == 'field':
5465 name = node.attrib['name']
5466 names = "%s.%s" % (prefix, name) if prefix else name
5467 if not result.get(names):
5468 result[names] = node.attrib.get('on_change')
5469 # traverse the subviews included in relational fields
5470 for subinfo in info['fields'][name].get('views', {}).itervalues():
5471 process(etree.fromstring(subinfo['arch']), subinfo, names)
5474 process(child, info, prefix)
5476 if view_info is None:
5477 view_info = self.fields_view_get()
5478 process(etree.fromstring(view_info['arch']), view_info, '')
5481 def _onchange_eval(self, field_name, onchange, result):
5482 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5483 on record `self`. Value assignments are applied on `self`, while
5484 domain and warning messages are put in dictionary `result`.
5486 onchange = onchange.strip()
5489 if onchange in ("1", "true"):
5490 for method in self._onchange_methods.get(field_name, ()):
5491 method_res = method(self)
5494 if 'domain' in method_res:
5495 result.setdefault('domain', {}).update(method_res['domain'])
5496 if 'warning' in method_res:
5497 result['warning'] = method_res['warning']
5501 match = onchange_v7.match(onchange)
5503 method, params = match.groups()
5505 # evaluate params -> tuple
5506 global_vars = {'context': self._context, 'uid': self._uid}
5507 if self._context.get('field_parent'):
5508 class RawRecord(object):
5509 def __init__(self, record):
5510 self._record = record
5511 def __getattr__(self, name):
5512 field = self._record._fields[name]
5513 value = self._record[name]
5514 return field.convert_to_onchange(value)
5515 record = self[self._context['field_parent']]
5516 global_vars['parent'] = RawRecord(record)
5518 key: self._fields[key].convert_to_onchange(val)
5519 for key, val in self._cache.iteritems()
5521 params = eval("[%s]" % params, global_vars, field_vars)
5523 # call onchange method
5524 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5525 method_res = getattr(self._model, method)(*args)
5526 if not isinstance(method_res, dict):
5528 if 'value' in method_res:
5529 method_res['value'].pop('id', None)
5530 self.update(self._convert_to_cache(method_res['value']))
5531 if 'domain' in method_res:
5532 result.setdefault('domain', {}).update(method_res['domain'])
5533 if 'warning' in method_res:
5534 result['warning'] = method_res['warning']
5537 def onchange(self, values, field_name, field_onchange):
5538 """ Perform an onchange on the given field.
5540 :param values: dictionary mapping field names to values, giving the
5541 current state of modification
5542 :param field_name: name of the modified field_name
5543 :param field_onchange: dictionary mapping field names to their
5548 if field_name and field_name not in self._fields:
5551 # determine subfields for field.convert_to_write() below
5553 subfields = defaultdict(set)
5554 for dotname in field_onchange:
5556 secondary.append(dotname)
5557 name, subname = dotname.split('.')
5558 subfields[name].add(subname)
5560 # create a new record with values, and attach `self` to it
5561 with env.do_in_onchange():
5562 record = self.new(values)
5563 values = dict(record._cache)
5564 # attach `self` with a different context (for cache consistency)
5565 record._origin = self.with_context(__onchange=True)
5567 # determine which field should be triggered an onchange
5568 todo = set([field_name]) if field_name else set(values)
5571 # dummy assignment: trigger invalidations on the record
5573 record[name] = record[name]
5575 result = {'value': {}}
5583 with env.do_in_onchange():
5584 # apply field-specific onchange methods
5585 if field_onchange.get(name):
5586 record._onchange_eval(name, field_onchange[name], result)
5588 # force re-evaluation of function fields on secondary records
5589 for field_seq in secondary:
5590 record.mapped(field_seq)
5592 # determine which fields have been modified
5593 for name, oldval in values.iteritems():
5594 newval = record[name]
5595 if newval != oldval or getattr(newval, '_dirty', False):
5596 field = self._fields[name]
5597 result['value'][name] = field.convert_to_write(
5598 newval, record._origin, subfields[name],
5602 # At the moment, the client does not support updates on a *2many field
5603 # while this one is modified by the user.
5604 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5605 result['value'].pop(field_name, None)
5610 class RecordCache(MutableMapping):
5611 """ Implements a proxy dictionary to read/update the cache of a record.
5612 Upon iteration, it looks like a dictionary mapping field names to
5613 values. However, fields may be used as keys as well.
5615 def __init__(self, records):
5616 self._recs = records
5618 def __contains__(self, field):
5619 """ Return whether `records[0]` has a value for `field` in cache. """
5620 if isinstance(field, basestring):
5621 field = self._recs._fields[field]
5622 return self._recs.id in self._recs.env.cache[field]
5624 def __getitem__(self, field):
5625 """ Return the cached value of `field` for `records[0]`. """
5626 if isinstance(field, basestring):
5627 field = self._recs._fields[field]
5628 value = self._recs.env.cache[field][self._recs.id]
5629 return value.get() if isinstance(value, SpecialValue) else value
5631 def __setitem__(self, field, value):
5632 """ Assign the cached value of `field` for all records in `records`. """
5633 if isinstance(field, basestring):
5634 field = self._recs._fields[field]
5635 values = dict.fromkeys(self._recs._ids, value)
5636 self._recs.env.cache[field].update(values)
5638 def update(self, *args, **kwargs):
5639 """ Update the cache of all records in `records`. If the argument is a
5640 `SpecialValue`, update all fields (except "magic" columns).
5642 if args and isinstance(args[0], SpecialValue):
5643 values = dict.fromkeys(self._recs._ids, args[0])
5644 for name, field in self._recs._fields.iteritems():
5645 if name not in MAGIC_COLUMNS:
5646 self._recs.env.cache[field].update(values)
5648 return super(RecordCache, self).update(*args, **kwargs)
5650 def __delitem__(self, field):
5651 """ Remove the cached value of `field` for all `records`. """
5652 if isinstance(field, basestring):
5653 field = self._recs._fields[field]
5654 field_cache = self._recs.env.cache[field]
5655 for id in self._recs._ids:
5656 field_cache.pop(id, None)
5659 """ Iterate over the field names with a regular value in cache. """
5660 cache, id = self._recs.env.cache, self._recs.id
5661 dummy = SpecialValue(None)
5662 for name, field in self._recs._fields.iteritems():
5663 if name not in MAGIC_COLUMNS and \
5664 not isinstance(cache[field].get(id, dummy), SpecialValue):
5668 """ Return the number of fields with a regular value in cache. """
5669 return sum(1 for name in self)
5671 class Model(BaseModel):
5672 """Main super-class for regular database-persisted OpenERP models.
5674 OpenERP models are created by inheriting from this class::
5679 The system will later instantiate the class once per database (on
5680 which the class' module is installed).
5683 _register = False # not visible in ORM registry, meant to be python-inherited only
5684 _transient = False # True in a TransientModel
5686 class TransientModel(BaseModel):
5687 """Model super-class for transient records, meant to be temporarily
5688 persisted, and regularly vaccuum-cleaned.
5690 A TransientModel has a simplified access rights management,
5691 all users can create new records, and may only access the
5692 records they created. The super-user has unrestricted access
5693 to all TransientModel records.
5696 _register = False # not visible in ORM registry, meant to be python-inherited only
5699 class AbstractModel(BaseModel):
5700 """Abstract Model super-class for creating an abstract class meant to be
5701 inherited by regular models (Models or TransientModels) but not meant to
5702 be usable on its own, or persisted.
5704 Technical note: we don't want to make AbstractModel the super-class of
5705 Model or BaseModel because it would not make sense to put the main
5706 definition of persistence methods such as create() in it, and still we
5707 should be able to override them within an AbstractModel.
5709 _auto = False # don't create any database backend for AbstractModels
5710 _register = False # not visible in ORM registry, meant to be python-inherited only
5713 def itemgetter_tuple(items):
5714 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5715 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5720 return lambda gettable: (gettable[items[0]],)
5721 return operator.itemgetter(*items)
5723 def convert_pgerror_23502(model, fields, info, e):
5724 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5725 r'not-null constraint\n',
5727 field_name = m and m.group('field')
5728 if not m or field_name not in fields:
5729 return {'message': unicode(e)}
5730 message = _(u"Missing required value for the field '%s'.") % field_name
5731 field = fields.get(field_name)
5733 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5736 'field': field_name,
5739 def convert_pgerror_23505(model, fields, info, e):
5740 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5742 field_name = m and m.group('field')
5743 if not m or field_name not in fields:
5744 return {'message': unicode(e)}
5745 message = _(u"The value for the field '%s' already exists.") % field_name
5746 field = fields.get(field_name)
5748 message = _(u"%s This might be '%s' in the current model, or a field "
5749 u"of the same name in an o2m.") % (message, field['string'])
5752 'field': field_name,
5755 PGERROR_TO_OE = defaultdict(
5756 # shape of mapped converters
5757 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5758 # not_null_violation
5759 '23502': convert_pgerror_23502,
5760 # unique constraint error
5761 '23505': convert_pgerror_23505,
5764 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5765 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5767 Various implementations were tested on the corpus of all browse() calls
5768 performed during a full crawler run (after having installed all website_*
5769 modules) and this one was the most efficient overall.
5771 A possible bit of correctness was sacrificed by not doing any test on
5772 Iterable and just assuming that any non-atomic type was an iterable of
5777 # much of the corpus is falsy objects (empty list, tuple or set, None)
5781 # `type in set` is significantly faster (because more restrictive) than
5782 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5783 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5784 # (and looks much worse) in most cases, but over millions of calls it
5785 # does have a very minor effect.
5786 if arg.__class__ in atoms:
5791 # keep those imports here to avoid dependency cycle errors
5792 from .osv import expression
5793 from .fields import Field, SpecialValue, FailedValue
5795 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: