1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object Relational Mapping module:
25 * Hierarchical structure
26 * Constraints consistency and validation
27 * Object metadata depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default field values
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * Two different inheritance mechanisms
35 * Rich set of field types:
36 - classical (varchar, integer, boolean, ...)
37 - relational (one2many, many2one, many2many)
52 from collections import defaultdict, MutableMapping
53 from inspect import getmembers
56 import dateutil.relativedelta
58 from lxml import etree
61 from . import SUPERUSER_ID
64 from .api import Environment
65 from .exceptions import except_orm, AccessError, MissingError
66 from .osv import fields
67 from .osv.query import Query
68 from .tools import lazy_property
69 from .tools.config import config
70 from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
71 from .tools.safe_eval import safe_eval as eval
72 from .tools.translate import _
74 _logger = logging.getLogger(__name__)
75 _schema = logging.getLogger(__name__ + '.schema')
77 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
78 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
79 onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
81 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
84 def check_object_name(name):
85 """ Check if the given name is a valid openerp object name.
87 The _name attribute in osv and osv_memory object is subject to
88 some restrictions. This function returns True or False whether
89 the given name is allowed or not.
91 TODO: this is an approximation. The goal in this approximation
92 is to disallow uppercase characters (in some places, we quote
93 table/column names and in other not, which leads to this kind
96 psycopg2.ProgrammingError: relation "xxx" does not exist).
98 The same restriction should apply to both osv and osv_memory
99 objects for consistency.
102 if regex_object_name.match(name) is None:
106 def raise_on_invalid_object_name(name):
107 if not check_object_name(name):
108 msg = "The _name attribute %s is not valid." % name
110 raise except_orm('ValueError', msg)
112 POSTGRES_CONFDELTYPES = {
120 def intersect(la, lb):
121 return filter(lambda x: x in lb, la)
124 """ Test whether functions `f` and `g` are identical or have the same name """
125 return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
127 def fix_import_export_id_paths(fieldname):
129 Fixes the id fields in import and exports, and splits field paths
132 :param str fieldname: name of the field to import/export
133 :return: split field name
136 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
137 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
138 return fixed_external_id.split('/')
140 def pg_varchar(size=0):
141 """ Returns the VARCHAR declaration for the provided size:
143 * If no size (or an empty or negative size is provided) return an
145 * Otherwise return a VARCHAR(n)
147 :type int size: varchar size, optional
151 if not isinstance(size, int):
152 raise TypeError("VARCHAR parameter should be an int, got %s"
155 return 'VARCHAR(%d)' % size
158 FIELDS_TO_PGTYPES = {
159 fields.boolean: 'bool',
160 fields.integer: 'int4',
164 fields.datetime: 'timestamp',
165 fields.binary: 'bytea',
166 fields.many2one: 'int4',
167 fields.serialized: 'text',
170 def get_pg_type(f, type_override=None):
172 :param fields._column f: field to get a Postgres type for
173 :param type type_override: use the provided type for dispatching instead of the field's own type
174 :returns: (postgres_identification_type, postgres_type_specification)
177 field_type = type_override or type(f)
179 if field_type in FIELDS_TO_PGTYPES:
180 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
181 elif issubclass(field_type, fields.float):
183 pg_type = ('numeric', 'NUMERIC')
185 pg_type = ('float8', 'DOUBLE PRECISION')
186 elif issubclass(field_type, (fields.char, fields.reference)):
187 pg_type = ('varchar', pg_varchar(f.size))
188 elif issubclass(field_type, fields.selection):
189 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
190 or getattr(f, 'size', None) == -1:
191 pg_type = ('int4', 'INTEGER')
193 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
194 elif issubclass(field_type, fields.function):
195 if f._type == 'selection':
196 pg_type = ('varchar', pg_varchar())
198 pg_type = get_pg_type(f, getattr(fields, f._type))
200 _logger.warning('%s type not supported!', field_type)
206 class MetaModel(api.Meta):
207 """ Metaclass for the models.
209 This class is used as the metaclass for the class :class:`BaseModel` to
210 discover the models defined in a module (without instanciating them).
211 If the automatic discovery is not needed, it is possible to set the model's
212 ``_register`` attribute to False.
216 module_to_models = {}
218 def __init__(self, name, bases, attrs):
219 if not self._register:
220 self._register = True
221 super(MetaModel, self).__init__(name, bases, attrs)
224 if not hasattr(self, '_module'):
225 # The (OpenERP) module name can be in the `openerp.addons` namespace
226 # or not. For instance, module `sale` can be imported as
227 # `openerp.addons.sale` (the right way) or `sale` (for backward
229 module_parts = self.__module__.split('.')
230 if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
231 module_name = self.__module__.split('.')[2]
233 module_name = self.__module__.split('.')[0]
234 self._module = module_name
236 # Remember which models to instanciate for this module.
238 self.module_to_models.setdefault(self._module, []).append(self)
242 """ Pseudo-ids for new records. """
243 def __nonzero__(self):
246 IdType = (int, long, basestring, NewId)
249 # special columns automatically created by the ORM
250 LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
251 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
253 class BaseModel(object):
254 """ Base class for OpenERP models.
256 OpenERP models are created by inheriting from this class' subclasses:
258 * :class:`Model` for regular database-persisted models
260 * :class:`TransientModel` for temporary data, stored in the database but
261 automatically vaccuumed every so often
263 * :class:`AbstractModel` for abstract super classes meant to be shared by
264 multiple inheriting model
266 The system automatically instantiates every model once per database. Those
267 instances represent the available models on each database, and depend on
268 which modules are installed on that database. The actual class of each
269 instance is built from the Python classes that create and inherit from the
272 Every model instance is a "recordset", i.e., an ordered collection of
273 records of the model. Recordsets are returned by methods like
274 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
275 explicit representation: a record is represented as a recordset of one
278 To create a class that should not be instantiated, the _register class
279 attribute may be set to False.
281 __metaclass__ = MetaModel
282 _auto = True # create database backend
283 _register = False # Set to false if the model shouldn't be automatically discovered.
290 _parent_name = 'parent_id'
291 _parent_store = False
292 _parent_order = False
299 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
300 # to include in the _read_group, if grouped on this field
304 _transient = False # True in a TransientModel
307 # { 'parent_model': 'm2o_field', ... }
310 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
311 # model from which it is inherits'd, r is the (local) field towards m, f
312 # is the _column object itself, and n is the original (i.e. top-most)
315 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
316 # field_column_obj, origina_parent_model), ... }
319 # Mapping field name/column_info object
320 # This is similar to _inherit_fields but:
321 # 1. includes self fields,
322 # 2. uses column_info instead of a triple.
327 _sql_constraints = []
329 CONCURRENCY_CHECK_FIELD = '__last_update'
331 def log(self, cr, uid, id, message, secondary=False, context=None):
332 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
334 def view_init(self, cr, uid, fields_list, context=None):
335 """Override this method to do specific things when a view on the object is opened."""
338 def _field_create(self, cr, context=None):
339 """ Create entries in ir_model_fields for all the model's fields.
341 If necessary, also create an entry in ir_model, and if called from the
342 modules loading scheme (by receiving 'module' in the context), also
343 create entries in ir_model_data (for the model and the fields).
345 - create an entry in ir_model (if there is not already one),
346 - create an entry in ir_model_data (if there is not already one, and if
347 'module' is in the context),
348 - update ir_model_fields with the fields found in _columns
349 (TODO there is some redundancy as _columns is updated from
350 ir_model_fields in __init__).
355 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
357 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
358 model_id = cr.fetchone()[0]
359 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
361 model_id = cr.fetchone()[0]
362 if 'module' in context:
363 name_id = 'model_'+self._name.replace('.', '_')
364 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
366 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
367 (name_id, context['module'], 'ir.model', model_id)
370 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
372 for rec in cr.dictfetchall():
373 cols[rec['name']] = rec
375 ir_model_fields_obj = self.pool.get('ir.model.fields')
377 # sparse field should be created at the end, as it depends on its serialized field already existing
378 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
379 for (k, f) in model_fields:
381 'model_id': model_id,
384 'field_description': f.string,
386 'relation': f._obj or '',
387 'select_level': tools.ustr(int(f.select)),
388 'readonly': (f.readonly and 1) or 0,
389 'required': (f.required and 1) or 0,
390 'selectable': (f.selectable and 1) or 0,
391 'translate': (f.translate and 1) or 0,
392 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
393 'serialization_field_id': None,
395 if getattr(f, 'serialization_field', None):
396 # resolve link to serialization_field if specified by name
397 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
398 if not serialization_field_id:
399 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
400 vals['serialization_field_id'] = serialization_field_id[0]
402 # When its a custom field,it does not contain f.select
403 if context.get('field_state', 'base') == 'manual':
404 if context.get('field_name', '') == k:
405 vals['select_level'] = context.get('select', '0')
406 #setting value to let the problem NOT occur next time
408 vals['select_level'] = cols[k]['select_level']
411 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
412 id = cr.fetchone()[0]
414 cr.execute("""INSERT INTO ir_model_fields (
415 id, model_id, model, name, field_description, ttype,
416 relation,state,select_level,relation_field, translate, serialization_field_id
418 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
420 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
421 vals['relation'], 'base',
422 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
424 if 'module' in context:
425 name1 = 'field_' + self._table + '_' + k
426 cr.execute("select name from ir_model_data where name=%s", (name1,))
428 name1 = name1 + "_" + str(id)
429 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
430 (name1, context['module'], 'ir.model.fields', id)
433 for key, val in vals.items():
434 if cols[k][key] != vals[key]:
435 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
436 cr.execute("""UPDATE ir_model_fields SET
437 model_id=%s, field_description=%s, ttype=%s, relation=%s,
438 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
440 model=%s AND name=%s""", (
441 vals['model_id'], vals['field_description'], vals['ttype'],
443 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
446 self.invalidate_cache(cr, SUPERUSER_ID)
449 def _add_field(cls, name, field):
450 """ Add the given `field` under the given `name` in the class """
451 field.set_class_name(cls, name)
453 # add field in _fields (for reflection)
454 cls._fields[name] = field
456 # add field as an attribute, unless another kind of value already exists
457 if isinstance(getattr(cls, name, field), Field):
458 setattr(cls, name, field)
460 _logger.warning("In model %r, member %r is not a field", cls._name, name)
463 cls._columns[name] = field.to_column()
465 # remove potential column that may be overridden by field
466 cls._columns.pop(name, None)
469 def _add_magic_fields(cls):
470 """ Introduce magic fields on the current class
472 * id is a "normal" field (with a specific getter)
473 * create_uid, create_date, write_uid and write_date have become
475 * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
476 method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
477 to get the same structure as the previous
478 ``(now() at time zone 'UTC')::timestamp``::
480 # select (now() at time zone 'UTC')::timestamp;
482 ----------------------------
483 2013-06-18 08:30:37.292809
485 >>> str(datetime.datetime.utcnow())
486 '2013-06-18 08:31:32.821177'
488 def add(name, field):
489 """ add `field` with the given `name` if it does not exist yet """
490 if name not in cls._columns and name not in cls._fields:
491 cls._add_field(name, field)
496 # this field 'id' must override any other column or field
497 cls._add_field('id', fields.Id(automatic=True))
499 add('display_name', fields.Char(string='Name',
500 compute='_compute_display_name', inverse='_inverse_display_name',
501 search='_search_display_name', automatic=True))
504 add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
505 add('create_date', fields.Datetime(string='Created on', automatic=True))
506 add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
507 add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
508 last_modified_name = 'compute_concurrency_field_with_access'
510 last_modified_name = 'compute_concurrency_field'
512 # this field must override any other column or field
513 cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
514 string='Last Modified on', compute=last_modified_name, automatic=True))
517 def compute_concurrency_field(self):
518 self[self.CONCURRENCY_CHECK_FIELD] = \
519 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
522 @api.depends('create_date', 'write_date')
523 def compute_concurrency_field_with_access(self):
524 self[self.CONCURRENCY_CHECK_FIELD] = \
525 self.write_date or self.create_date or \
526 datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
529 # Goal: try to apply inheritance at the instanciation level and
530 # put objects in the pool var
533 def _build_model(cls, pool, cr):
534 """ Instanciate a given model.
536 This class method instanciates the class of some model (i.e. a class
537 deriving from osv or osv_memory). The class might be the class passed
538 in argument or, if it inherits from another class, a class constructed
539 by combining the two classes.
543 # IMPORTANT: the registry contains an instance for each model. The class
544 # of each model carries inferred metadata that is shared among the
545 # model's instances for this registry, but not among registries. Hence
546 # we cannot use that "registry class" for combining model classes by
547 # inheritance, since it confuses the metadata inference process.
549 # Keep links to non-inherited constraints in cls; this is useful for
550 # instance when exporting translations
551 cls._local_constraints = cls.__dict__.get('_constraints', [])
552 cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
554 # determine inherited models
555 parents = getattr(cls, '_inherit', [])
556 parents = [parents] if isinstance(parents, basestring) else (parents or [])
558 # determine the model's name
559 name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
561 # determine the module that introduced the model
562 original_module = pool[name]._original_module if name in parents else cls._module
564 # build the class hierarchy for the model
565 for parent in parents:
566 if parent not in pool:
567 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
568 'You may need to add a dependency on the parent class\' module.' % (name, parent))
569 parent_model = pool[parent]
571 # do no use the class of parent_model, since that class contains
572 # inferred metadata; use its ancestor instead
573 parent_class = type(parent_model).__base__
575 # don't inherit custom fields
576 columns = dict((key, val)
577 for key, val in parent_class._columns.iteritems()
580 columns.update(cls._columns)
582 defaults = dict(parent_class._defaults)
583 defaults.update(cls._defaults)
585 inherits = dict(parent_class._inherits)
586 inherits.update(cls._inherits)
588 old_constraints = parent_class._constraints
589 new_constraints = cls._constraints
590 # filter out from old_constraints the ones overridden by a
591 # constraint with the same function name in new_constraints
592 constraints = new_constraints + [oldc
593 for oldc in old_constraints
594 if not any(newc[2] == oldc[2] and same_name(newc[0], oldc[0])
595 for newc in new_constraints)
598 sql_constraints = cls._sql_constraints + \
599 parent_class._sql_constraints
605 '_defaults': defaults,
606 '_inherits': inherits,
607 '_constraints': constraints,
608 '_sql_constraints': sql_constraints,
610 cls = type(name, (cls, parent_class), attrs)
612 # introduce the "registry class" of the model;
613 # duplicate some attributes so that the ORM can modify them
617 '_columns': dict(cls._columns),
618 '_defaults': dict(cls._defaults),
619 '_inherits': dict(cls._inherits),
620 '_constraints': list(cls._constraints),
621 '_sql_constraints': list(cls._sql_constraints),
622 '_original_module': original_module,
624 cls = type(cls._name, (cls,), attrs)
626 # float fields are registry-dependent (digit attribute); duplicate them
628 for key, col in cls._columns.items():
629 if col._type == 'float':
630 cls._columns[key] = copy.copy(col)
632 # instantiate the model, and initialize it
633 model = object.__new__(cls)
634 model.__init__(pool, cr)
638 def _init_function_fields(cls, pool, cr):
639 # initialize the list of non-stored function fields for this model
640 pool._pure_function_fields[cls._name] = []
642 # process store of low-level function fields
643 for fname, column in cls._columns.iteritems():
644 if hasattr(column, 'digits_change'):
645 column.digits_change(cr)
646 # filter out existing store about this field
647 pool._store_function[cls._name] = [
649 for stored in pool._store_function.get(cls._name, [])
650 if (stored[0], stored[1]) != (cls._name, fname)
652 if not isinstance(column, fields.function):
655 # register it on the pool for invalidation
656 pool._pure_function_fields[cls._name].append(fname)
658 # process store parameter
661 get_ids = lambda self, cr, uid, ids, c={}: ids
662 store = {cls._name: (get_ids, None, column.priority, None)}
663 for model, spec in store.iteritems():
665 (fnct, fields2, order, length) = spec
667 (fnct, fields2, order) = spec
670 raise except_orm('Error',
671 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
672 pool._store_function.setdefault(model, [])
673 t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
674 if t not in pool._store_function[model]:
675 pool._store_function[model].append(t)
676 pool._store_function[model].sort(key=lambda x: x[4])
679 def _init_manual_fields(cls, pool, cr):
680 # Check whether the query is already done
681 if pool.fields_by_model is not None:
682 manual_fields = pool.fields_by_model.get(cls._name, [])
684 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (cls._name, 'manual'))
685 manual_fields = cr.dictfetchall()
687 for field in manual_fields:
688 if field['name'] in cls._columns:
691 'string': field['field_description'],
692 'required': bool(field['required']),
693 'readonly': bool(field['readonly']),
694 'domain': eval(field['domain']) if field['domain'] else None,
695 'size': field['size'] or None,
696 'ondelete': field['on_delete'],
697 'translate': (field['translate']),
700 #'select': int(field['select_level'])
702 if field['serialization_field_id']:
703 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
704 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
705 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
706 attrs.update({'relation': field['relation']})
707 cls._columns[field['name']] = fields.sparse(**attrs)
708 elif field['ttype'] == 'selection':
709 cls._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
710 elif field['ttype'] == 'reference':
711 cls._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
712 elif field['ttype'] == 'many2one':
713 cls._columns[field['name']] = fields.many2one(field['relation'], **attrs)
714 elif field['ttype'] == 'one2many':
715 cls._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
716 elif field['ttype'] == 'many2many':
717 _rel1 = field['relation'].replace('.', '_')
718 _rel2 = field['model'].replace('.', '_')
719 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
720 cls._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
722 cls._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
725 def _init_constraints_onchanges(cls):
726 # store sql constraint error messages
727 for (key, _, msg) in cls._sql_constraints:
728 cls.pool._sql_error[cls._table + '_' + key] = msg
730 # collect constraint and onchange methods
731 cls._constraint_methods = []
732 cls._onchange_methods = defaultdict(list)
733 for attr, func in getmembers(cls, callable):
734 if hasattr(func, '_constrains'):
735 if not all(name in cls._fields for name in func._constrains):
736 _logger.warning("@constrains%r parameters must be field names", func._constrains)
737 cls._constraint_methods.append(func)
738 if hasattr(func, '_onchange'):
739 if not all(name in cls._fields for name in func._onchange):
740 _logger.warning("@onchange%r parameters must be field names", func._onchange)
741 for name in func._onchange:
742 cls._onchange_methods[name].append(func)
745 # In the past, this method was registering the model class in the server.
746 # This job is now done entirely by the metaclass MetaModel.
748 # Do not create an instance here. Model instances are created by method
752 def __init__(self, pool, cr):
753 """ Initialize a model and make it part of the given registry.
755 - copy the stored fields' functions in the registry,
756 - retrieve custom fields and add them in the model,
757 - ensure there is a many2one for each _inherits'd parent,
758 - update the children's _columns,
759 - give a chance to each field to initialize itself.
764 # link the class to the registry, and update the registry
766 cls._model = self # backward compatibility
767 pool.add(cls._name, self)
769 # determine description, table, sequence and log_access
770 if not cls._description:
771 cls._description = cls._name
773 cls._table = cls._name.replace('.', '_')
774 if not cls._sequence:
775 cls._sequence = cls._table + '_id_seq'
776 if not hasattr(cls, '_log_access'):
777 # If _log_access is not specified, it is the same value as _auto.
778 cls._log_access = cls._auto
781 if cls.is_transient():
782 cls._transient_check_count = 0
783 cls._transient_max_count = config.get('osv_memory_count_limit')
784 cls._transient_max_hours = config.get('osv_memory_age_limit')
785 assert cls._log_access, \
786 "TransientModels must have log_access turned on, " \
787 "in order to implement their access rights policy"
789 # retrieve new-style fields and duplicate them (to avoid clashes with
790 # inheritance between different models)
792 for attr, field in getmembers(cls, Field.__instancecheck__):
793 if not field._origin:
794 cls._add_field(attr, field.copy())
796 # introduce magic fields
797 cls._add_magic_fields()
799 # register stuff about low-level function fields and custom fields
800 cls._init_function_fields(pool, cr)
801 cls._init_manual_fields(pool, cr)
804 cls._inherits_check()
805 cls._inherits_reload()
807 # register constraints and onchange methods
808 cls._init_constraints_onchanges()
811 for k in cls._defaults:
812 assert k in cls._fields, \
813 "Model %s has a default for nonexiting field %s" % (cls._name, k)
816 for column in cls._columns.itervalues():
821 assert cls._rec_name in cls._fields, \
822 "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
823 elif 'name' in cls._fields:
824 cls._rec_name = 'name'
826 # prepare ormcache, which must be shared by all instances of the model
829 def __export_xml_id(self):
830 """ Return a valid xml_id for the record `self`. """
831 ir_model_data = self.sudo().env['ir.model.data']
832 data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
835 return '%s.%s' % (data.module, data.name)
840 name = '%s_%s' % (self._table, self.id)
841 while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
843 name = '%s_%s_%s' % (self._table, self.id, postfix)
844 ir_model_data.create({
847 'module': '__export__',
850 return '__export__.' + name
853 def __export_rows(self, fields):
854 """ Export fields of the records in `self`.
856 :param fields: list of lists of fields to traverse
857 :return: list of lists of corresponding values
861 # main line of record, initially empty
862 current = [''] * len(fields)
863 lines.append(current)
865 # list of primary fields followed by secondary field(s)
868 # process column by column
869 for i, path in enumerate(fields):
874 if name in primary_done:
878 current[i] = str(record.id)
880 current[i] = record.__export_xml_id()
882 field = record._fields[name]
885 # this part could be simpler, but it has to be done this way
886 # in order to reproduce the former behavior
887 if not isinstance(value, BaseModel):
888 current[i] = field.convert_to_export(value, self.env)
890 primary_done.append(name)
892 # This is a special case, its strange behavior is intended!
893 if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
894 xml_ids = [r.__export_xml_id() for r in value]
895 current[i] = ','.join(xml_ids) or False
898 # recursively export the fields that follow name
899 fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
900 lines2 = value.__export_rows(fields2)
902 # merge first line with record's main line
903 for j, val in enumerate(lines2[0]):
906 # check value of current field
908 # assign xml_ids, and forget about remaining lines
909 xml_ids = [item[1] for item in value.name_get()]
910 current[i] = ','.join(xml_ids)
912 # append the other lines at the end
920 def export_data(self, fields_to_export, raw_data=False):
921 """ Export fields for selected objects
923 :param fields_to_export: list of fields
924 :param raw_data: True to return value in native Python type
925 :rtype: dictionary with a *datas* matrix
927 This method is used when exporting data via client menu
929 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
931 self = self.with_context(export_raw_data=True)
932 return {'datas': self.__export_rows(fields_to_export)}
934 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
937 Use :meth:`~load` instead
939 Import given data in given module
941 This method is used when importing data via client menu.
943 Example of fields to import for a sale.order::
946 partner_id, (=name_search)
947 order_line/.id, (=database_id)
949 order_line/product_id/id, (=xml id)
950 order_line/price_unit,
951 order_line/product_uom_qty,
952 order_line/product_uom/id (=xml_id)
954 This method returns a 4-tuple with the following structure::
956 (return_code, errored_resource, error_message, unused)
958 * The first item is a return code, it is ``-1`` in case of
959 import error, or the last imported row number in case of success
960 * The second item contains the record data dict that failed to import
961 in case of error, otherwise it's 0
962 * The third item contains an error message string in case of error,
964 * The last item is currently unused, with no specific semantics
966 :param fields: list of fields to import
967 :param datas: data to import
968 :param mode: 'init' or 'update' for record creation
969 :param current_module: module name
970 :param noupdate: flag for record creation
971 :param filename: optional file to store partial import state for recovery
972 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
973 :rtype: (int, dict or 0, str or 0, str or 0)
975 context = dict(context) if context is not None else {}
976 context['_import_current_module'] = current_module
978 fields = map(fix_import_export_id_paths, fields)
979 ir_model_data_obj = self.pool.get('ir.model.data')
982 if m['type'] == 'error':
983 raise Exception(m['message'])
985 if config.get('import_partial') and filename:
986 with open(config.get('import_partial'), 'rb') as partial_import_file:
987 data = pickle.load(partial_import_file)
988 position = data.get(filename, 0)
992 for res_id, xml_id, res, info in self._convert_records(cr, uid,
993 self._extract_records(cr, uid, fields, datas,
994 context=context, log=log),
995 context=context, log=log):
996 ir_model_data_obj._update(cr, uid, self._name,
997 current_module, res, mode=mode, xml_id=xml_id,
998 noupdate=noupdate, res_id=res_id, context=context)
999 position = info.get('rows', {}).get('to', 0) + 1
1000 if config.get('import_partial') and filename and (not (position%100)):
1001 with open(config.get('import_partial'), 'rb') as partial_import:
1002 data = pickle.load(partial_import)
1003 data[filename] = position
1004 with open(config.get('import_partial'), 'wb') as partial_import:
1005 pickle.dump(data, partial_import)
1006 if context.get('defer_parent_store_computation'):
1007 self._parent_store_compute(cr)
1009 except Exception, e:
1011 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1013 if context.get('defer_parent_store_computation'):
1014 self._parent_store_compute(cr)
1015 return position, 0, 0, 0
1017 def load(self, cr, uid, fields, data, context=None):
1019 Attempts to load the data matrix, and returns a list of ids (or
1020 ``False`` if there was an error and no id could be generated) and a
1023 The ids are those of the records created and saved (in database), in
1024 the same order they were extracted from the file. They can be passed
1025 directly to :meth:`~read`
1027 :param fields: list of fields to import, at the same index as the corresponding data
1028 :type fields: list(str)
1029 :param data: row-major matrix of data to import
1030 :type data: list(list(str))
1031 :param dict context:
1032 :returns: {ids: list(int)|False, messages: [Message]}
1034 cr.execute('SAVEPOINT model_load')
1037 fields = map(fix_import_export_id_paths, fields)
1038 ModelData = self.pool['ir.model.data'].clear_caches()
1040 fg = self.fields_get(cr, uid, context=context)
1047 for id, xid, record, info in self._convert_records(cr, uid,
1048 self._extract_records(cr, uid, fields, data,
1049 context=context, log=messages.append),
1050 context=context, log=messages.append):
1052 cr.execute('SAVEPOINT model_load_save')
1053 except psycopg2.InternalError, e:
1054 # broken transaction, exit and hope the source error was
1056 if not any(message['type'] == 'error' for message in messages):
1057 messages.append(dict(info, type='error',message=
1058 u"Unknown database error: '%s'" % e))
1061 ids.append(ModelData._update(cr, uid, self._name,
1062 current_module, record, mode=mode, xml_id=xid,
1063 noupdate=noupdate, res_id=id, context=context))
1064 cr.execute('RELEASE SAVEPOINT model_load_save')
1065 except psycopg2.Warning, e:
1066 messages.append(dict(info, type='warning', message=str(e)))
1067 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1068 except psycopg2.Error, e:
1069 messages.append(dict(
1071 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1072 # Failed to write, log to messages, rollback savepoint (to
1073 # avoid broken transaction) and keep going
1074 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1075 if any(message['type'] == 'error' for message in messages):
1076 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1078 return {'ids': ids, 'messages': messages}
1080 def _extract_records(self, cr, uid, fields_, data,
1081 context=None, log=lambda a: None):
1082 """ Generates record dicts from the data sequence.
1084 The result is a generator of dicts mapping field names to raw
1085 (unconverted, unvalidated) values.
1087 For relational fields, if sub-fields were provided the value will be
1088 a list of sub-records
1090 The following sub-fields may be set on the record (by key):
1091 * None is the name_get for the record (to use with name_create/name_search)
1092 * "id" is the External ID for the record
1093 * ".id" is the Database ID for the record
1095 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1096 # Fake columns to avoid special cases in extractor
1097 columns[None] = fields.char('rec_name')
1098 columns['id'] = fields.char('External ID')
1099 columns['.id'] = fields.integer('Database ID')
1101 # m2o fields can't be on multiple lines so exclude them from the
1102 # is_relational field rows filter, but special-case it later on to
1103 # be handled with relational fields (as it can have subfields)
1104 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1105 get_o2m_values = itemgetter_tuple(
1106 [index for index, field in enumerate(fields_)
1107 if columns[field[0]]._type == 'one2many'])
1108 get_nono2m_values = itemgetter_tuple(
1109 [index for index, field in enumerate(fields_)
1110 if columns[field[0]]._type != 'one2many'])
1111 # Checks if the provided row has any non-empty non-relational field
1112 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1113 return any(g(row)) and not any(f(row))
1117 if index >= len(data): return
1120 # copy non-relational fields to record dict
1121 record = dict((field[0], value)
1122 for field, value in itertools.izip(fields_, row)
1123 if not is_relational(field[0]))
1125 # Get all following rows which have relational values attached to
1126 # the current record (no non-relational values)
1127 record_span = itertools.takewhile(
1128 only_o2m_values, itertools.islice(data, index + 1, None))
1129 # stitch record row back on for relational fields
1130 record_span = list(itertools.chain([row], record_span))
1131 for relfield in set(
1132 field[0] for field in fields_
1133 if is_relational(field[0])):
1134 column = columns[relfield]
1135 # FIXME: how to not use _obj without relying on fields_get?
1136 Model = self.pool[column._obj]
1138 # get only cells for this sub-field, should be strictly
1139 # non-empty, field path [None] is for name_get column
1140 indices, subfields = zip(*((index, field[1:] or [None])
1141 for index, field in enumerate(fields_)
1142 if field[0] == relfield))
1144 # return all rows which have at least one value for the
1145 # subfields of relfield
1146 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1147 record[relfield] = [subrecord
1148 for subrecord, _subinfo in Model._extract_records(
1149 cr, uid, subfields, relfield_data,
1150 context=context, log=log)]
1152 yield record, {'rows': {
1154 'to': index + len(record_span) - 1
1156 index += len(record_span)
1158 def _convert_records(self, cr, uid, records,
1159 context=None, log=lambda a: None):
1160 """ Converts records from the source iterable (recursive dicts of
1161 strings) into forms which can be written to the database (via
1162 self.create or (ir.model.data)._update)
1164 :returns: a list of triplets of (id, xid, record)
1165 :rtype: list((int|None, str|None, dict))
1167 if context is None: context = {}
1168 Converter = self.pool['ir.fields.converter']
1169 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1170 Translation = self.pool['ir.translation']
1172 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1173 context.get('lang'))
1175 for f, column in columns.iteritems())
1177 convert = Converter.for_model(cr, uid, self, context=context)
1179 def _log(base, field, exception):
1180 type = 'warning' if isinstance(exception, Warning) else 'error'
1181 # logs the logical (not human-readable) field name for automated
1182 # processing of response, but injects human readable in message
1183 record = dict(base, type=type, field=field,
1184 message=unicode(exception.args[0]) % base)
1185 if len(exception.args) > 1 and exception.args[1]:
1186 record.update(exception.args[1])
1189 stream = CountingStream(records)
1190 for record, extras in stream:
1193 # name_get/name_create
1194 if None in record: pass
1201 dbid = int(record['.id'])
1203 # in case of overridden id column
1204 dbid = record['.id']
1205 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1208 record=stream.index,
1210 message=_(u"Unknown database identifier '%s'") % dbid))
1213 converted = convert(record, lambda field, err:\
1214 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1216 yield dbid, xid, converted, dict(extras, record=stream.index)
1219 def _validate_fields(self, field_names):
1220 field_names = set(field_names)
1222 # old-style constraint methods
1223 trans = self.env['ir.translation']
1224 cr, uid, context = self.env.args
1227 for fun, msg, names in self._constraints:
1229 # validation must be context-independent; call `fun` without context
1230 valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
1232 except Exception, e:
1233 _logger.debug('Exception while validating constraint', exc_info=True)
1235 extra_error = tools.ustr(e)
1238 res_msg = msg(self._model, cr, uid, ids, context=context)
1239 if isinstance(res_msg, tuple):
1240 template, params = res_msg
1241 res_msg = template % params
1243 res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
1245 res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1247 _("Field(s) `%s` failed against a constraint: %s") %
1248 (', '.join(names), res_msg)
1251 raise except_orm('ValidateError', '\n'.join(errors))
1253 # new-style constraint methods
1254 for check in self._constraint_methods:
1255 if set(check._constrains) & field_names:
1258 def default_get(self, cr, uid, fields_list, context=None):
1259 """ Return default values for the fields in `fields_list`. Default
1260 values are determined by the context, user defaults, and the model
1263 :param fields_list: a list of field names
1264 :return: a dictionary mapping each field name to its corresponding
1265 default value; the keys of the dictionary are the fields in
1266 `fields_list` that have a default value different from ``False``.
1268 This method should not be overridden. In order to change the
1269 mechanism for determining default values, you should override method
1270 :meth:`add_default_value` instead.
1272 # trigger view init hook
1273 self.view_init(cr, uid, fields_list, context)
1275 # use a new record to determine default values
1276 record = self.new(cr, uid, {}, context=context)
1277 for name in fields_list:
1278 if name in self._fields:
1279 record[name] # force evaluation of defaults
1281 # retrieve defaults from record's cache
1282 return self._convert_to_write(record._cache)
1284 def add_default_value(self, field):
1285 """ Set the default value of `field` to the new record `self`.
1286 The value must be assigned to `self`.
1288 assert not self.id, "Expected new record: %s" % self
1289 cr, uid, context = self.env.args
1292 # 1. look up context
1293 key = 'default_' + name
1295 self[name] = context[key]
1298 # 2. look up ir_values
1299 # Note: performance is good, because get_defaults_dict is cached!
1300 ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
1301 if name in ir_values_dict:
1302 self[name] = ir_values_dict[name]
1305 # 3. look up property fields
1306 # TODO: get rid of this one
1307 column = self._columns.get(name)
1308 if isinstance(column, fields.property):
1309 self[name] = self.env['ir.property'].get(name, self._name)
1312 # 4. look up _defaults
1313 if name in self._defaults:
1314 value = self._defaults[name]
1316 value = value(self._model, cr, uid, context)
1320 # 5. delegate to field
1321 field.determine_default(self)
1323 def fields_get_keys(self, cr, user, context=None):
1324 res = self._columns.keys()
1325 # TODO I believe this loop can be replace by
1326 # res.extend(self._inherit_fields.key())
1327 for parent in self._inherits:
1328 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1331 def _rec_name_fallback(self, cr, uid, context=None):
1332 rec_name = self._rec_name
1333 if rec_name not in self._columns:
1334 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1338 # Overload this method if you need a window title which depends on the context
1340 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1343 def user_has_groups(self, cr, uid, groups, context=None):
1344 """Return true if the user is at least member of one of the groups
1345 in groups_str. Typically used to resolve `groups` attribute
1346 in view and model definitions.
1348 :param str groups: comma-separated list of fully-qualified group
1349 external IDs, e.g.: ``base.group_user,base.group_system``
1350 :return: True if the current user is a member of one of the
1353 return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
1354 for group_ext_id in groups.split(','))
1356 def _get_default_form_view(self, cr, user, context=None):
1357 """ Generates a default single-line form view using all fields
1358 of the current model except the m2m and o2m ones.
1360 :param cr: database cursor
1361 :param int user: user id
1362 :param dict context: connection context
1363 :returns: a form view as an lxml document
1364 :rtype: etree._Element
1366 view = etree.Element('form', string=self._description)
1367 group = etree.SubElement(view, 'group', col="4")
1368 for fname, field in self._fields.iteritems():
1369 if field.automatic or field.type in ('one2many', 'many2many'):
1372 etree.SubElement(group, 'field', name=fname)
1373 if field.type == 'text':
1374 etree.SubElement(group, 'newline')
1377 def _get_default_search_view(self, cr, user, context=None):
1378 """ Generates a single-field search view, based on _rec_name.
1380 :param cr: database cursor
1381 :param int user: user id
1382 :param dict context: connection context
1383 :returns: a tree view as an lxml document
1384 :rtype: etree._Element
1386 view = etree.Element('search', string=self._description)
1387 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1390 def _get_default_tree_view(self, cr, user, context=None):
1391 """ Generates a single-field tree view, based on _rec_name.
1393 :param cr: database cursor
1394 :param int user: user id
1395 :param dict context: connection context
1396 :returns: a tree view as an lxml document
1397 :rtype: etree._Element
1399 view = etree.Element('tree', string=self._description)
1400 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1403 def _get_default_calendar_view(self, cr, user, context=None):
1404 """ Generates a default calendar view by trying to infer
1405 calendar fields from a number of pre-set attribute names
1407 :param cr: database cursor
1408 :param int user: user id
1409 :param dict context: connection context
1410 :returns: a calendar view
1411 :rtype: etree._Element
1413 def set_first_of(seq, in_, to):
1414 """Sets the first value of `seq` also found in `in_` to
1415 the `to` attribute of the view being closed over.
1417 Returns whether it's found a suitable value (and set it on
1418 the attribute) or not
1426 view = etree.Element('calendar', string=self._description)
1427 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1429 if self._date_name not in self._columns:
1431 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1432 if dt in self._columns:
1433 self._date_name = dt
1438 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1439 view.set('date_start', self._date_name)
1441 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1442 self._columns, 'color')
1444 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1445 self._columns, 'date_stop'):
1446 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1447 self._columns, 'date_delay'):
1449 _('Invalid Object Architecture!'),
1450 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1454 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1456 Get the detailed composition of the requested view like fields, model, view architecture
1458 :param view_id: id of the view or None
1459 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1460 :param toolbar: true to include contextual actions
1461 :param submenu: deprecated
1462 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1463 :raise AttributeError:
1464 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1465 * if some tag other than 'position' is found in parent view
1466 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1470 View = self.pool['ir.ui.view']
1473 'model': self._name,
1474 'field_parent': False,
1477 # try to find a view_id if none provided
1479 # <view_type>_view_ref in context can be used to overrride the default view
1480 view_ref_key = view_type + '_view_ref'
1481 view_ref = context.get(view_ref_key)
1484 module, view_ref = view_ref.split('.', 1)
1485 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1486 view_ref_res = cr.fetchone()
1488 view_id = view_ref_res[0]
1490 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1491 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1495 # otherwise try to find the lowest priority matching ir.ui.view
1496 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1498 # context for post-processing might be overriden
1501 # read the view with inherited views applied
1502 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1503 result['arch'] = root_view['arch']
1504 result['name'] = root_view['name']
1505 result['type'] = root_view['type']
1506 result['view_id'] = root_view['id']
1507 result['field_parent'] = root_view['field_parent']
1508 # override context fro postprocessing
1509 if root_view.get('model') != self._name:
1510 ctx = dict(context, base_model_name=root_view.get('model'))
1512 # fallback on default views methods if no ir.ui.view could be found
1514 get_func = getattr(self, '_get_default_%s_view' % view_type)
1515 arch_etree = get_func(cr, uid, context)
1516 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1517 result['type'] = view_type
1518 result['name'] = 'default'
1519 except AttributeError:
1520 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1522 # Apply post processing, groups and modifiers etc...
1523 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1524 result['arch'] = xarch
1525 result['fields'] = xfields
1527 # Add related action information if aksed
1529 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1535 ir_values_obj = self.pool.get('ir.values')
1536 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1537 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1538 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1539 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1540 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1541 #When multi="True" set it will display only in More of the list view
1542 resrelate = [clean(action) for action in resrelate
1543 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1545 for x in itertools.chain(resprint, resaction, resrelate):
1546 x['string'] = x['name']
1548 result['toolbar'] = {
1550 'action': resaction,
1555 def get_formview_id(self, cr, uid, id, context=None):
1556 """ Return an view id to open the document with. This method is meant to be
1557 overridden in addons that want to give specific view ids for example.
1559 :param int id: id of the document to open
1563 def get_formview_action(self, cr, uid, id, context=None):
1564 """ Return an action to open the document. This method is meant to be
1565 overridden in addons that want to give specific view ids for example.
1567 :param int id: id of the document to open
1569 view_id = self.get_formview_id(cr, uid, id, context=context)
1571 'type': 'ir.actions.act_window',
1572 'res_model': self._name,
1573 'view_type': 'form',
1574 'view_mode': 'form',
1575 'views': [(view_id, 'form')],
1576 'target': 'current',
1580 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1581 return self.pool['ir.ui.view'].postprocess_and_fields(
1582 cr, uid, self._name, node, view_id, context=context)
1584 def search_count(self, cr, user, args, context=None):
1585 res = self.search(cr, user, args, context=context, count=True)
1586 if isinstance(res, list):
1590 @api.returns('self')
1591 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1593 Search for records based on a search domain.
1595 :param cr: database cursor
1596 :param user: current user id
1597 :param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
1598 :param offset: optional number of results to skip in the returned values (default: 0)
1599 :param limit: optional max number of records to return (default: **None**)
1600 :param order: optional columns to sort by (default: self._order=id )
1601 :param context: optional context arguments, like lang, time zone
1602 :type context: dictionary
1603 :param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
1604 :return: id or list of ids of records matching the criteria
1605 :rtype: integer or list of integers
1606 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1608 **Expressing a search domain (args)**
1610 Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
1612 * **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
1613 * **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
1614 The semantics of most of these operators are obvious.
1615 The ``child_of`` operator will look for records who are children or grand-children of a given record,
1616 according to the semantics of this model (i.e following the relationship field named by
1617 ``self._parent_name``, by default ``parent_id``.
1618 * **value** must be a valid value to compare with the values of **field_name**, depending on its type.
1620 Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
1621 These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
1622 Be very careful about this when you combine them the first time.
1624 Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
1626 [('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
1628 The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
1630 (name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
1633 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1636 # display_name, name_get, name_create, name_search
1639 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1640 def _compute_display_name(self):
1641 name = self._rec_name
1642 if name in self._fields:
1643 convert = self._fields[name].convert_to_display_name
1645 record.display_name = convert(record[name])
1648 record.display_name = "%s,%s" % (self._name, self.id)
1650 def _inverse_display_name(self):
1651 name = self._rec_name
1652 if name in self._fields and not self._fields[name].relational:
1654 record[name] = record.display_name
1656 _logger.warning("Cannot inverse field display_name on %s", self._name)
1658 def _search_display_name(self, operator, value):
1659 name = self._rec_name
1660 if name in self._fields:
1661 return [(name, operator, value)]
1663 _logger.warning("Cannot search field display_name on %s", self._name)
1664 return [(0, '=', 1)]
1668 """ Return a textual representation for the records in `self`.
1669 By default this is the value of field ``display_name``.
1672 :return: list of pairs ``(id, text_repr)`` for all records
1677 result.append((record.id, record.display_name))
1678 except MissingError:
1683 def name_create(self, name):
1684 """ Create a new record by calling :meth:`~.create` with only one value
1685 provided: the display name of the new record.
1687 The new record will be initialized with any default values
1688 applicable to this model, or provided through the context. The usual
1689 behavior of :meth:`~.create` applies.
1691 :param name: display name of the record to create
1693 :return: the :meth:`~.name_get` pair value of the created record
1695 # Shortcut the inverse function of 'display_name' with self._rec_name.
1696 # This is useful when self._rec_name is a required field: in that case,
1697 # create() creates a record without the field, and inverse display_name
1699 field_name = self._rec_name if self._rec_name else 'display_name'
1700 record = self.create({field_name: name})
1701 return (record.id, record.display_name)
1704 def name_search(self, name='', args=None, operator='ilike', limit=100):
1705 """ Search for records that have a display name matching the given
1706 `name` pattern when compared with the given `operator`, while also
1707 matching the optional search domain (`args`).
1709 This is used for example to provide suggestions based on a partial
1710 value for a relational field. Sometimes be seen as the inverse
1711 function of :meth:`~.name_get`, but it is not guaranteed to be.
1713 This method is equivalent to calling :meth:`~.search` with a search
1714 domain based on `display_name` and then :meth:`~.name_get` on the
1715 result of the search.
1717 :param name: the name pattern to match
1718 :param list args: optional search domain (see :meth:`~.search` for
1719 syntax), specifying further restrictions
1720 :param str operator: domain operator for matching `name`, such as
1721 ``'like'`` or ``'='``.
1722 :param int limit: optional max number of records to return
1724 :return: list of pairs ``(id, text_repr)`` for all matching records.
1726 args = list(args or [])
1727 if not (name == '' and operator == 'ilike'):
1728 args += [('display_name', operator, name)]
1729 return self.search(args, limit=limit).name_get()
1731 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
1732 # private implementation of name_search, allows passing a dedicated user
1733 # for the name_get part to solve some access rights issues
1734 args = list(args or [])
1735 # optimize out the default criterion of ``ilike ''`` that matches everything
1736 if not (name == '' and operator == 'ilike'):
1737 args += [('display_name', operator, name)]
1738 access_rights_uid = name_get_uid or user
1739 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
1740 res = self.name_get(cr, access_rights_uid, ids, context)
1743 def read_string(self, cr, uid, id, langs, fields=None, context=None):
1746 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
1748 fields = self._columns.keys() + self._inherit_fields.keys()
1749 #FIXME: collect all calls to _get_source into one SQL call.
1751 res[lang] = {'code': lang}
1753 if f in self._columns:
1754 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
1756 res[lang][f] = res_trans
1758 res[lang][f] = self._columns[f].string
1759 for table in self._inherits:
1760 cols = intersect(self._inherit_fields.keys(), fields)
1761 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
1764 res[lang]['code'] = lang
1765 for f in res2[lang]:
1766 res[lang][f] = res2[lang][f]
1769 def write_string(self, cr, uid, id, langs, vals, context=None):
1770 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
1771 #FIXME: try to only call the translation in one SQL
1774 if field in self._columns:
1775 src = self._columns[field].string
1776 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
1777 for table in self._inherits:
1778 cols = intersect(self._inherit_fields.keys(), vals)
1780 self.pool[table].write_string(cr, uid, id, langs, vals, context)
1783 def _add_missing_default_values(self, cr, uid, values, context=None):
1784 # avoid overriding inherited values when parent is set
1786 for tables, parent_field in self._inherits.items():
1787 if parent_field in values:
1788 avoid_tables.append(tables)
1790 # compute missing fields
1791 missing_defaults = set()
1792 for field in self._columns.keys():
1793 if not field in values:
1794 missing_defaults.add(field)
1795 for field in self._inherit_fields.keys():
1796 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
1797 missing_defaults.add(field)
1798 # discard magic fields
1799 missing_defaults -= set(MAGIC_COLUMNS)
1801 if missing_defaults:
1802 # override defaults with the provided values, never allow the other way around
1803 defaults = self.default_get(cr, uid, list(missing_defaults), context)
1805 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
1806 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
1807 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
1808 defaults[dv] = [(6, 0, defaults[dv])]
1809 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
1810 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
1811 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
1812 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
1813 defaults.update(values)
1817 def clear_caches(self):
1818 """ Clear the caches
1820 This clears the caches associated to methods decorated with
1821 ``tools.ormcache`` or ``tools.ormcache_multi``.
1824 self._ormcache.clear()
1825 self.pool._any_cache_cleared = True
1826 except AttributeError:
1830 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,
1831 read_group_result, read_group_order=None, context=None):
1832 """Helper method for filling in empty groups for all possible values of
1833 the field being grouped by"""
1835 # self._group_by_full should map groupable fields to a method that returns
1836 # a list of all aggregated values that we want to display for this field,
1837 # in the form of a m2o-like pair (key,label).
1838 # This is useful to implement kanban views for instance, where all columns
1839 # should be displayed even if they don't contain any record.
1841 # Grab the list of all groups that should be displayed, including all present groups
1842 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
1843 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
1844 read_group_order=read_group_order,
1845 access_rights_uid=openerp.SUPERUSER_ID,
1848 result_template = dict.fromkeys(aggregated_fields, False)
1849 result_template[groupby + '_count'] = 0
1850 if remaining_groupbys:
1851 result_template['__context'] = {'group_by': remaining_groupbys}
1853 # Merge the left_side (current results as dicts) with the right_side (all
1854 # possible values as m2o pairs). Both lists are supposed to be using the
1855 # same ordering, and can be merged in one pass.
1858 def append_left(left_side):
1859 grouped_value = left_side[groupby] and left_side[groupby][0]
1860 if not grouped_value in known_values:
1861 result.append(left_side)
1862 known_values[grouped_value] = left_side
1864 count_attr = groupby + '_count'
1865 known_values[grouped_value].update({count_attr: left_side[count_attr]})
1866 def append_right(right_side):
1867 grouped_value = right_side[0]
1868 if not grouped_value in known_values:
1869 line = dict(result_template)
1870 line[groupby] = right_side
1871 line['__domain'] = [(groupby,'=',grouped_value)] + domain
1873 known_values[grouped_value] = line
1874 while read_group_result or all_groups:
1875 left_side = read_group_result[0] if read_group_result else None
1876 right_side = all_groups[0] if all_groups else None
1877 assert left_side is None or left_side[groupby] is False \
1878 or isinstance(left_side[groupby], (tuple,list)), \
1879 'M2O-like pair expected, got %r' % left_side[groupby]
1880 assert right_side is None or isinstance(right_side, (tuple,list)), \
1881 'M2O-like pair expected, got %r' % right_side
1882 if left_side is None:
1883 append_right(all_groups.pop(0))
1884 elif right_side is None:
1885 append_left(read_group_result.pop(0))
1886 elif left_side[groupby] == right_side:
1887 append_left(read_group_result.pop(0))
1888 all_groups.pop(0) # discard right_side
1889 elif not left_side[groupby] or not left_side[groupby][0]:
1890 # left side == "Undefined" entry, not present on right_side
1891 append_left(read_group_result.pop(0))
1893 append_right(all_groups.pop(0))
1897 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
1900 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
1902 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
1903 to the query if order should be computed against m2o field.
1904 :param orderby: the orderby definition in the form "%(field)s %(order)s"
1905 :param aggregated_fields: list of aggregated fields in the query
1906 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
1907 These dictionaries contains the qualified name of each groupby
1908 (fully qualified SQL name for the corresponding field),
1909 and the (non raw) field name.
1910 :param osv.Query query: the query under construction
1911 :return: (groupby_terms, orderby_terms)
1914 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
1915 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
1917 return groupby_terms, orderby_terms
1919 self._check_qorder(orderby)
1920 for order_part in orderby.split(','):
1921 order_split = order_part.split()
1922 order_field = order_split[0]
1923 if order_field in groupby_fields:
1925 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
1926 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
1928 orderby_terms.append(order_clause)
1929 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
1931 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
1932 orderby_terms.append(order)
1933 elif order_field in aggregated_fields:
1934 orderby_terms.append(order_part)
1936 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
1937 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
1938 self._name, order_part)
1939 return groupby_terms, orderby_terms
1941 def _read_group_process_groupby(self, gb, query, context):
1943 Helper method to collect important information about groupbys: raw
1944 field name, type, time informations, qualified name, ...
1946 split = gb.split(':')
1947 field_type = self._all_columns[split[0]].column._type
1948 gb_function = split[1] if len(split) == 2 else None
1949 temporal = field_type in ('date', 'datetime')
1950 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
1951 qualified_field = self._inherits_join_calc(split[0], query)
1954 'day': 'dd MMM YYYY',
1955 'week': "'W'w YYYY",
1956 'month': 'MMMM YYYY',
1957 'quarter': 'QQQ YYYY',
1961 'day': dateutil.relativedelta.relativedelta(days=1),
1962 'week': datetime.timedelta(days=7),
1963 'month': dateutil.relativedelta.relativedelta(months=1),
1964 'quarter': dateutil.relativedelta.relativedelta(months=3),
1965 'year': dateutil.relativedelta.relativedelta(years=1)
1968 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
1969 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
1970 if field_type == 'boolean':
1971 qualified_field = "coalesce(%s,false)" % qualified_field
1976 'display_format': display_formats[gb_function or 'month'] if temporal else None,
1977 'interval': time_intervals[gb_function or 'month'] if temporal else None,
1978 'tz_convert': tz_convert,
1979 'qualified_field': qualified_field
1982 def _read_group_prepare_data(self, key, value, groupby_dict, context):
1984 Helper method to sanitize the data received by read_group. The None
1985 values are converted to False, and the date/datetime are formatted,
1986 and corrected according to the timezones.
1988 value = False if value is None else value
1989 gb = groupby_dict.get(key)
1990 if gb and gb['type'] in ('date', 'datetime') and value:
1991 if isinstance(value, basestring):
1992 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
1993 value = datetime.datetime.strptime(value, dt_format)
1994 if gb['tz_convert']:
1995 value = pytz.timezone(context['tz']).localize(value)
1998 def _read_group_get_domain(self, groupby, value):
2000 Helper method to construct the domain corresponding to a groupby and
2001 a given value. This is mostly relevant for date/datetime.
2003 if groupby['type'] in ('date', 'datetime') and value:
2004 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2005 domain_dt_begin = value
2006 domain_dt_end = value + groupby['interval']
2007 if groupby['tz_convert']:
2008 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2009 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2010 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2011 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2012 if groupby['type'] == 'many2one' and value:
2014 return [(groupby['field'], '=', value)]
2016 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2018 Helper method to format the data contained in the dictianary data by
2019 adding the domain corresponding to its values, the groupbys in the
2020 context and by properly formatting the date/datetime values.
2022 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2023 for k,v in data.iteritems():
2024 gb = groupby_dict.get(k)
2025 if gb and gb['type'] in ('date', 'datetime') and v:
2026 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2028 data['__domain'] = domain_group + domain
2029 if len(groupby) - len(annotated_groupbys) >= 1:
2030 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2034 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2036 Get the list of records in list view grouped by the given ``groupby`` fields
2038 :param cr: database cursor
2039 :param uid: current user id
2040 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2041 :param list fields: list of fields present in the list view specified on the object
2042 :param list groupby: list of groupby descriptions by which the records will be grouped.
2043 A groupby description is either a field (then it will be grouped by that field)
2044 or a string 'field:groupby_function'. Right now, the only functions supported
2045 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2046 date/datetime fields.
2047 :param int offset: optional number of records to skip
2048 :param int limit: optional max number of records to return
2049 :param dict context: context arguments, like lang, time zone.
2050 :param list orderby: optional ``order by`` specification, for
2051 overriding the natural sort ordering of the
2052 groups, see also :py:meth:`~osv.osv.osv.search`
2053 (supported only for many2one fields currently)
2054 :param bool lazy: if true, the results are only grouped by the first groupby and the
2055 remaining groupbys are put in the __context key. If false, all the groupbys are
2057 :return: list of dictionaries(one dictionary for each record) containing:
2059 * the values of fields grouped by the fields in ``groupby`` argument
2060 * __domain: list of tuples specifying the search criteria
2061 * __context: dictionary with argument like ``groupby``
2062 :rtype: [{'field_name_1': value, ...]
2063 :raise AccessError: * if user has no read rights on the requested object
2064 * if user tries to bypass access rules for read on the requested object
2068 self.check_access_rights(cr, uid, 'read')
2069 query = self._where_calc(cr, uid, domain, context=context)
2070 fields = fields or self._columns.keys()
2072 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2073 groupby_list = groupby[:1] if lazy else groupby
2074 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2075 for gb in groupby_list]
2076 groupby_fields = [g['field'] for g in annotated_groupbys]
2077 order = orderby or ','.join([g for g in groupby_list])
2078 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2080 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2081 for gb in groupby_fields:
2082 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2083 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2084 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2085 if not (gb in self._all_columns):
2086 # Don't allow arbitrary values, as this would be a SQL injection vector!
2087 raise except_orm(_('Invalid group_by'),
2088 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2090 aggregated_fields = [
2092 if f not in ('id', 'sequence')
2093 if f not in groupby_fields
2094 if self._all_columns[f].column._type in ('integer', 'float')
2095 if getattr(self._all_columns[f].column, '_classic_write')]
2097 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2098 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2100 for gb in annotated_groupbys:
2101 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2103 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2104 from_clause, where_clause, where_clause_params = query.get_sql()
2105 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2106 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2110 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2111 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2114 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count %(extra_fields)s
2122 'table': self._table,
2123 'count_field': count_field,
2124 'extra_fields': prefix_terms(',', select_terms),
2125 'from': from_clause,
2126 'where': prefix_term('WHERE', where_clause),
2127 'groupby': prefix_terms('GROUP BY', groupby_terms),
2128 'orderby': prefix_terms('ORDER BY', orderby_terms),
2129 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2130 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2132 cr.execute(query, where_clause_params)
2133 fetched_data = cr.dictfetchall()
2135 if not groupby_fields:
2138 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2140 data_ids = [r['id'] for r in fetched_data]
2141 many2onefields = list(set(many2onefields))
2142 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2143 for d in fetched_data:
2144 d.update(data_dict[d['id']])
2146 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2147 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2148 if lazy and groupby_fields[0] in self._group_by_full:
2149 # Right now, read_group only fill results in lazy mode (by default).
2150 # If you need to have the empty groups in 'eager' mode, then the
2151 # method _read_group_fill_results need to be completely reimplemented
2153 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2154 aggregated_fields, result, read_group_order=order,
2158 def _inherits_join_add(self, current_model, parent_model_name, query):
2160 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2161 :param current_model: current model object
2162 :param parent_model_name: name of the parent model for which the clauses should be added
2163 :param query: query object on which the JOIN should be added
2165 inherits_field = current_model._inherits[parent_model_name]
2166 parent_model = self.pool[parent_model_name]
2167 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2170 def _inherits_join_calc(self, field, query):
2172 Adds missing table select and join clause(s) to ``query`` for reaching
2173 the field coming from an '_inherits' parent table (no duplicates).
2175 :param field: name of inherited field to reach
2176 :param query: query object on which the JOIN should be added
2177 :return: qualified name of field, to be used in SELECT clause
2179 current_table = self
2180 parent_alias = '"%s"' % current_table._table
2181 while field in current_table._inherit_fields and not field in current_table._columns:
2182 parent_model_name = current_table._inherit_fields[field][0]
2183 parent_table = self.pool[parent_model_name]
2184 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2185 current_table = parent_table
2186 return '%s."%s"' % (parent_alias, field)
2188 def _parent_store_compute(self, cr):
2189 if not self._parent_store:
2191 _logger.info('Computing parent left and right for table %s...', self._table)
2192 def browse_rec(root, pos=0):
2194 where = self._parent_name+'='+str(root)
2196 where = self._parent_name+' IS NULL'
2197 if self._parent_order:
2198 where += ' order by '+self._parent_order
2199 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2201 for id in cr.fetchall():
2202 pos2 = browse_rec(id[0], pos2)
2203 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2205 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2206 if self._parent_order:
2207 query += ' order by ' + self._parent_order
2210 for (root,) in cr.fetchall():
2211 pos = browse_rec(root, pos)
2212 self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
2215 def _update_store(self, cr, f, k):
2216 _logger.info("storing computed values of fields.function '%s'", k)
2217 ss = self._columns[k]._symbol_set
2218 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2219 cr.execute('select id from '+self._table)
2220 ids_lst = map(lambda x: x[0], cr.fetchall())
2222 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2223 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2224 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2225 for key, val in res.items():
2228 # if val is a many2one, just write the ID
2229 if type(val) == tuple:
2231 if val is not False:
2232 cr.execute(update_query, (ss[1](val), key))
2234 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2235 """Raise except_orm if value is not among the valid values for the selection field"""
2236 if self._columns[field]._type == 'reference':
2237 val_model, val_id_str = value.split(',', 1)
2240 val_id = long(val_id_str)
2244 raise except_orm(_('ValidateError'),
2245 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2249 if isinstance(self._columns[field].selection, (tuple, list)):
2250 if val in dict(self._columns[field].selection):
2252 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2254 raise except_orm(_('ValidateError'),
2255 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._name, field))
2257 def _check_removed_columns(self, cr, log=False):
2258 # iterate on the database columns to drop the NOT NULL constraints
2259 # of fields which were required but have been removed (or will be added by another module)
2260 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2261 columns += MAGIC_COLUMNS
2262 cr.execute("SELECT a.attname, a.attnotnull"
2263 " FROM pg_class c, pg_attribute a"
2264 " WHERE c.relname=%s"
2265 " AND c.oid=a.attrelid"
2266 " AND a.attisdropped=%s"
2267 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2268 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2270 for column in cr.dictfetchall():
2272 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2273 column['attname'], self._table, self._name)
2274 if column['attnotnull']:
2275 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2276 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2277 self._table, column['attname'])
2279 def _save_constraint(self, cr, constraint_name, type):
2281 Record the creation of a constraint for this model, to make it possible
2282 to delete it later when the module is uninstalled. Type can be either
2283 'f' or 'u' depending on the constraint being a foreign key or not.
2285 if not self._module:
2286 # no need to save constraints for custom models as they're not part
2289 assert type in ('f', 'u')
2291 SELECT 1 FROM ir_model_constraint, ir_module_module
2292 WHERE ir_model_constraint.module=ir_module_module.id
2293 AND ir_model_constraint.name=%s
2294 AND ir_module_module.name=%s
2295 """, (constraint_name, self._module))
2298 INSERT INTO ir_model_constraint
2299 (name, date_init, date_update, module, model, type)
2300 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2301 (SELECT id FROM ir_module_module WHERE name=%s),
2302 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2303 (constraint_name, self._module, self._name, type))
2305 def _save_relation_table(self, cr, relation_table):
2307 Record the creation of a many2many for this model, to make it possible
2308 to delete it later when the module is uninstalled.
2311 SELECT 1 FROM ir_model_relation, ir_module_module
2312 WHERE ir_model_relation.module=ir_module_module.id
2313 AND ir_model_relation.name=%s
2314 AND ir_module_module.name=%s
2315 """, (relation_table, self._module))
2317 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2318 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2319 (SELECT id FROM ir_module_module WHERE name=%s),
2320 (SELECT id FROM ir_model WHERE model=%s))""",
2321 (relation_table, self._module, self._name))
2322 self.invalidate_cache(cr, SUPERUSER_ID)
2324 # checked version: for direct m2o starting from `self`
2325 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2326 assert self.is_transient() or not dest_model.is_transient(), \
2327 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2328 if self.is_transient() and not dest_model.is_transient():
2329 # TransientModel relationships to regular Models are annoying
2330 # usually because they could block deletion due to the FKs.
2331 # So unless stated otherwise we default them to ondelete=cascade.
2332 ondelete = ondelete or 'cascade'
2333 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2334 self._foreign_keys.add(fk_def)
2335 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2337 # unchecked version: for custom cases, such as m2m relationships
2338 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2339 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2340 self._foreign_keys.add(fk_def)
2341 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2343 def _drop_constraint(self, cr, source_table, constraint_name):
2344 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2346 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2347 # Find FK constraint(s) currently established for the m2o field,
2348 # and see whether they are stale or not
2349 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2350 cl2.relname as foreign_table
2351 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2352 pg_attribute as att1, pg_attribute as att2
2353 WHERE con.conrelid = cl1.oid
2354 AND cl1.relname = %s
2355 AND con.confrelid = cl2.oid
2356 AND array_lower(con.conkey, 1) = 1
2357 AND con.conkey[1] = att1.attnum
2358 AND att1.attrelid = cl1.oid
2359 AND att1.attname = %s
2360 AND array_lower(con.confkey, 1) = 1
2361 AND con.confkey[1] = att2.attnum
2362 AND att2.attrelid = cl2.oid
2363 AND att2.attname = %s
2364 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2365 constraints = cr.dictfetchall()
2367 if len(constraints) == 1:
2368 # Is it the right constraint?
2370 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2371 or cons['foreign_table'] != dest_model._table:
2372 # Wrong FK: drop it and recreate
2373 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2374 source_table, cons['constraint_name'])
2375 self._drop_constraint(cr, source_table, cons['constraint_name'])
2377 # it's all good, nothing to do!
2380 # Multiple FKs found for the same field, drop them all, and re-create
2381 for cons in constraints:
2382 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2383 source_table, cons['constraint_name'])
2384 self._drop_constraint(cr, source_table, cons['constraint_name'])
2386 # (re-)create the FK
2387 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2390 def _set_default_value_on_column(self, cr, column_name, context=None):
2391 # ideally should use add_default_value but fails
2392 # due to ir.values not being ready
2394 # get old-style default
2395 default = self._defaults.get(column_name)
2396 if callable(default):
2397 default = default(self, cr, SUPERUSER_ID, context)
2399 # get new_style default if no old-style
2401 record = self.new(cr, SUPERUSER_ID, context=context)
2402 field = self._fields[column_name]
2403 field.determine_default(record)
2404 defaults = dict(record._cache)
2405 if column_name in defaults:
2406 default = field.convert_to_write(defaults[column_name])
2408 if default is not None:
2409 _logger.debug("Table '%s': setting default value of new column %s",
2410 self._table, column_name)
2411 ss = self._columns[column_name]._symbol_set
2412 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
2413 self._table, column_name, ss[0], column_name)
2414 cr.execute(query, (ss[1](default),))
2415 # this is a disgrace
2418 def _auto_init(self, cr, context=None):
2421 Call _field_create and, unless _auto is False:
2423 - create the corresponding table in database for the model,
2424 - possibly add the parent columns in database,
2425 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2426 'write_date' in database if _log_access is True (the default),
2427 - report on database columns no more existing in _columns,
2428 - remove no more existing not null constraints,
2429 - alter existing database columns to match _columns,
2430 - create database tables to match _columns,
2431 - add database indices to match _columns,
2432 - save in self._foreign_keys a list a foreign keys to create (see
2436 self._foreign_keys = set()
2437 raise_on_invalid_object_name(self._name)
2440 store_compute = False
2441 stored_fields = [] # new-style stored fields with compute
2443 update_custom_fields = context.get('update_custom_fields', False)
2444 self._field_create(cr, context=context)
2445 create = not self._table_exist(cr)
2449 self._create_table(cr)
2452 if self._parent_store:
2453 if not self._parent_columns_exist(cr):
2454 self._create_parent_columns(cr)
2455 store_compute = True
2457 self._check_removed_columns(cr, log=False)
2459 # iterate on the "object columns"
2460 column_data = self._select_column_data(cr)
2462 for k, f in self._columns.iteritems():
2463 if k == 'id': # FIXME: maybe id should be a regular column?
2465 # Don't update custom (also called manual) fields
2466 if f.manual and not update_custom_fields:
2469 if isinstance(f, fields.one2many):
2470 self._o2m_raise_on_missing_reference(cr, f)
2472 elif isinstance(f, fields.many2many):
2473 self._m2m_raise_or_create_relation(cr, f)
2476 res = column_data.get(k)
2478 # The field is not found as-is in database, try if it
2479 # exists with an old name.
2480 if not res and hasattr(f, 'oldname'):
2481 res = column_data.get(f.oldname)
2483 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2485 column_data[k] = res
2486 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2487 self._table, f.oldname, k)
2489 # The field already exists in database. Possibly
2490 # change its type, rename it, drop it or change its
2493 f_pg_type = res['typname']
2494 f_pg_size = res['size']
2495 f_pg_notnull = res['attnotnull']
2496 if isinstance(f, fields.function) and not f.store and\
2497 not getattr(f, 'nodrop', False):
2498 _logger.info('column %s (%s) converted to a function, removed from table %s',
2499 k, f.string, self._table)
2500 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2502 _schema.debug("Table '%s': dropped column '%s' with cascade",
2506 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2511 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2512 ('varchar', 'text', 'TEXT', ''),
2513 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2514 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2515 ('timestamp', 'date', 'date', '::date'),
2516 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2517 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2519 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2521 with cr.savepoint():
2522 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2523 except psycopg2.NotSupportedError:
2524 # In place alter table cannot be done because a view is depending of this field.
2525 # Do a manual copy. This will drop the view (that will be recreated later)
2526 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2527 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2528 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2529 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2531 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2532 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2534 if (f_pg_type==c[0]) and (f._type==c[1]):
2535 if f_pg_type != f_obj_type:
2537 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2538 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2539 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2540 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2542 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2543 self._table, k, c[0], c[1])
2546 if f_pg_type != f_obj_type:
2550 newname = k + '_moved' + str(i)
2551 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2552 "WHERE c.relname=%s " \
2553 "AND a.attname=%s " \
2554 "AND c.oid=a.attrelid ", (self._table, newname))
2555 if not cr.fetchone()[0]:
2559 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2560 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2561 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2562 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2563 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2564 self._table, k, f_pg_type, f._type, newname)
2566 # if the field is required and hasn't got a NOT NULL constraint
2567 if f.required and f_pg_notnull == 0:
2568 self._set_default_value_on_column(cr, k, context=context)
2569 # add the NOT NULL constraint
2571 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2573 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2576 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2577 "If you want to have it, you should update the records and execute manually:\n"\
2578 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2579 _schema.warning(msg, self._table, k, self._table, k)
2581 elif not f.required and f_pg_notnull == 1:
2582 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2584 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2587 indexname = '%s_%s_index' % (self._table, k)
2588 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2589 res2 = cr.dictfetchall()
2590 if not res2 and f.select:
2591 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2593 if f._type == 'text':
2594 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2595 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2596 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2597 " because there is a length limit for indexable btree values!\n"\
2598 "Use a search view instead if you simply want to make the field searchable."
2599 _schema.warning(msg, self._table, f._type, k)
2600 if res2 and not f.select:
2601 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2603 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2604 _schema.debug(msg, self._table, k, f._type)
2606 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2607 dest_model = self.pool[f._obj]
2608 if dest_model._table != 'ir_actions':
2609 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2611 # The field doesn't exist in database. Create it if necessary.
2613 if not isinstance(f, fields.function) or f.store:
2614 # add the missing field
2615 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2616 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2617 _schema.debug("Table '%s': added column '%s' with definition=%s",
2618 self._table, k, get_pg_type(f)[1])
2622 self._set_default_value_on_column(cr, k, context=context)
2624 # remember the functions to call for the stored fields
2625 if isinstance(f, fields.function):
2627 if f.store is not True: # i.e. if f.store is a dict
2628 order = f.store[f.store.keys()[0]][2]
2629 todo_end.append((order, self._update_store, (f, k)))
2631 # remember new-style stored fields with compute method
2632 if k in self._fields and self._fields[k].depends:
2633 stored_fields.append(self._fields[k])
2635 # and add constraints if needed
2636 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2637 if f._obj not in self.pool:
2638 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2639 dest_model = self.pool[f._obj]
2640 ref = dest_model._table
2641 # ir_actions is inherited so foreign key doesn't work on it
2642 if ref != 'ir_actions':
2643 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2645 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2649 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
2650 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2653 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2654 "Try to re-run: openerp-server --update=module\n"\
2655 "If it doesn't work, update records and execute manually:\n"\
2656 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2657 _logger.warning(msg, k, self._table, self._table, k, exc_info=True)
2661 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2662 create = not bool(cr.fetchone())
2664 cr.commit() # start a new transaction
2667 self._add_sql_constraints(cr)
2670 self._execute_sql(cr)
2673 self._parent_store_compute(cr)
2677 # trigger computation of new-style stored fields with a compute
2679 _logger.info("Storing computed values of %s fields %s",
2680 self._name, ', '.join(sorted(f.name for f in stored_fields)))
2681 recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
2682 recs = recs.search([])
2684 map(recs._recompute_todo, stored_fields)
2687 todo_end.append((1000, func, ()))
2691 def _auto_end(self, cr, context=None):
2692 """ Create the foreign keys recorded by _auto_init. """
2693 for t, k, r, d in self._foreign_keys:
2694 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2695 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2697 del self._foreign_keys
2700 def _table_exist(self, cr):
2701 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2705 def _create_table(self, cr):
2706 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2707 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2708 _schema.debug("Table '%s': created", self._table)
2711 def _parent_columns_exist(self, cr):
2712 cr.execute("""SELECT c.relname
2713 FROM pg_class c, pg_attribute a
2714 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2715 """, (self._table, 'parent_left'))
2719 def _create_parent_columns(self, cr):
2720 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2721 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2722 if 'parent_left' not in self._columns:
2723 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
2725 _schema.debug("Table '%s': added column '%s' with definition=%s",
2726 self._table, 'parent_left', 'INTEGER')
2727 elif not self._columns['parent_left'].select:
2728 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
2730 if 'parent_right' not in self._columns:
2731 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
2733 _schema.debug("Table '%s': added column '%s' with definition=%s",
2734 self._table, 'parent_right', 'INTEGER')
2735 elif not self._columns['parent_right'].select:
2736 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
2738 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
2739 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
2740 self._parent_name, self._name)
2745 def _select_column_data(self, cr):
2746 # attlen is the number of bytes necessary to represent the type when
2747 # the type has a fixed size. If the type has a varying size attlen is
2748 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
2749 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
2750 "FROM pg_class c,pg_attribute a,pg_type t " \
2751 "WHERE c.relname=%s " \
2752 "AND c.oid=a.attrelid " \
2753 "AND a.atttypid=t.oid", (self._table,))
2754 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
2757 def _o2m_raise_on_missing_reference(self, cr, f):
2758 # TODO this check should be a method on fields.one2many.
2759 if f._obj in self.pool:
2760 other = self.pool[f._obj]
2761 # TODO the condition could use fields_get_keys().
2762 if f._fields_id not in other._columns.keys():
2763 if f._fields_id not in other._inherit_fields.keys():
2764 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
2766 def _m2m_raise_or_create_relation(self, cr, f):
2767 m2m_tbl, col1, col2 = f._sql_names(self)
2768 self._save_relation_table(cr, m2m_tbl)
2769 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
2770 if not cr.dictfetchall():
2771 if f._obj not in self.pool:
2772 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
2773 dest_model = self.pool[f._obj]
2774 ref = dest_model._table
2775 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
2776 # create foreign key references with ondelete=cascade, unless the targets are SQL views
2777 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
2778 if not cr.fetchall():
2779 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
2780 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
2781 if not cr.fetchall():
2782 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
2784 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
2785 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
2786 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
2788 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
2791 def _add_sql_constraints(self, cr):
2794 Modify this model's database table constraints so they match the one in
2798 def unify_cons_text(txt):
2799 return txt.lower().replace(', ',',').replace(' (','(')
2801 for (key, con, _) in self._sql_constraints:
2802 conname = '%s_%s' % (self._table, key)
2804 self._save_constraint(cr, conname, 'u')
2805 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
2806 existing_constraints = cr.dictfetchall()
2810 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
2811 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
2812 self._table, conname, con),
2813 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
2818 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
2819 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
2820 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
2826 if not existing_constraints:
2827 # constraint does not exists:
2828 sql_actions['add']['execute'] = True
2829 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2830 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
2831 # constraint exists but its definition has changed:
2832 sql_actions['drop']['execute'] = True
2833 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
2834 sql_actions['add']['execute'] = True
2835 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
2837 # we need to add the constraint:
2838 sql_actions = [item for item in sql_actions.values()]
2839 sql_actions.sort(key=lambda x: x['order'])
2840 for sql_action in [action for action in sql_actions if action['execute']]:
2842 cr.execute(sql_action['query'])
2844 _schema.debug(sql_action['msg_ok'])
2846 _schema.warning(sql_action['msg_err'])
2850 def _execute_sql(self, cr):
2851 """ Execute the SQL code from the _sql attribute (if any)."""
2852 if hasattr(self, "_sql"):
2853 for line in self._sql.split(';'):
2854 line2 = line.replace('\n', '').strip()
2860 # Update objects that uses this one to update their _inherits fields
2864 def _inherits_reload_src(cls):
2865 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
2866 for model in cls.pool.values():
2867 if cls._name in model._inherits:
2868 model._inherits_reload()
2871 def _inherits_reload(cls):
2872 """ Recompute the _inherit_fields mapping.
2874 This will also call itself on each inherits'd child model.
2878 for table in cls._inherits:
2879 other = cls.pool[table]
2880 for col in other._columns.keys():
2881 res[col] = (table, cls._inherits[table], other._columns[col], table)
2882 for col in other._inherit_fields.keys():
2883 res[col] = (table, cls._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
2884 cls._inherit_fields = res
2885 cls._all_columns = cls._get_column_infos()
2887 # interface columns with new-style fields
2888 for attr, column in cls._columns.items():
2889 if attr not in cls._fields:
2890 cls._add_field(attr, column.to_field())
2892 # interface inherited fields with new-style fields (note that the
2893 # reverse order is for being consistent with _all_columns above)
2894 for parent_model, parent_field in reversed(cls._inherits.items()):
2895 for attr, field in cls.pool[parent_model]._fields.iteritems():
2896 if attr not in cls._fields:
2897 new_field = field.copy(related=(parent_field, attr), _origin=field)
2898 cls._add_field(attr, new_field)
2900 cls._inherits_reload_src()
2903 def _get_column_infos(cls):
2904 """Returns a dict mapping all fields names (direct fields and
2905 inherited field via _inherits) to a ``column_info`` struct
2906 giving detailed columns """
2908 # do not inverse for loops, since local fields may hide inherited ones!
2909 for k, (parent, m2o, col, original_parent) in cls._inherit_fields.iteritems():
2910 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
2911 for k, col in cls._columns.iteritems():
2912 result[k] = fields.column_info(k, col)
2916 def _inherits_check(cls):
2917 for table, field_name in cls._inherits.items():
2918 if field_name not in cls._columns:
2919 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
2920 cls._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
2921 required=True, ondelete="cascade")
2922 elif not cls._columns[field_name].required or cls._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
2923 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
2924 cls._columns[field_name].required = True
2925 cls._columns[field_name].ondelete = "cascade"
2927 # reflect fields with delegate=True in dictionary cls._inherits
2928 for field in cls._fields.itervalues():
2929 if field.type == 'many2one' and not field.related and field.delegate:
2930 if not field.required:
2931 _logger.warning("Field %s with delegate=True must be required.", field)
2932 field.required = True
2933 if field.ondelete.lower() not in ('cascade', 'restrict'):
2934 field.ondelete = 'cascade'
2935 cls._inherits[field.comodel_name] = field.name
2938 def _prepare_setup_fields(self):
2939 """ Prepare the setup of fields once the models have been loaded. """
2940 for field in self._fields.itervalues():
2944 def _setup_fields(self):
2945 """ Setup the fields (dependency triggers, etc). """
2946 for field in self._fields.itervalues():
2947 field.setup(self.env)
2949 # group fields by compute to determine field.computed_fields
2950 fields_by_compute = defaultdict(list)
2951 for field in self._fields.itervalues():
2953 field.computed_fields = fields_by_compute[field.compute]
2954 field.computed_fields.append(field)
2956 field.computed_fields = []
2958 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
2959 """ Return the definition of each field.
2961 The returned value is a dictionary (indiced by field name) of
2962 dictionaries. The _inherits'd fields are included. The string, help,
2963 and selection (if present) attributes are translated.
2965 :param cr: database cursor
2966 :param user: current user id
2967 :param allfields: list of fields
2968 :param context: context arguments, like lang, time zone
2969 :return: dictionary of field dictionaries, each one describing a field of the business object
2970 :raise AccessError: * if user has no create/write rights on the requested object
2973 recs = self.browse(cr, user, [], context)
2976 for fname, field in self._fields.iteritems():
2977 if allfields and fname not in allfields:
2979 if field.groups and not recs.user_has_groups(field.groups):
2981 res[fname] = field.get_description(recs.env)
2983 # if user cannot create or modify records, make all fields readonly
2984 has_access = functools.partial(recs.check_access_rights, raise_exception=False)
2985 if not (has_access('write') or has_access('create')):
2986 for description in res.itervalues():
2987 description['readonly'] = True
2988 description['states'] = {}
2992 def get_empty_list_help(self, cr, user, help, context=None):
2993 """ Generic method giving the help message displayed when having
2994 no result to display in a list or kanban view. By default it returns
2995 the help given in parameter that is generally the help message
2996 defined in the action.
3000 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3002 Check the user access rights on the given fields. This raises Access
3003 Denied if the user does not have the rights. Otherwise it returns the
3004 fields (as is if the fields is not falsy, or the readable/writable
3005 fields if fields is falsy).
3007 if user == SUPERUSER_ID:
3008 return fields or list(self._fields)
3011 """ determine whether user has access to field `fname` """
3012 field = self._fields.get(fname)
3013 if field and field.groups:
3014 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3019 fields = filter(valid, self._fields)
3021 invalid_fields = set(filter(lambda name: not valid(name), fields))
3023 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
3024 operation, user, self._name, ', '.join(invalid_fields))
3026 _('The requested operation cannot be completed due to security restrictions. '
3027 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3028 (self._description, operation))
3032 # new-style implementation of read(); old-style is defined below
3034 def read(self, fields=None, load='_classic_read'):
3035 """ Read the given fields for the records in `self`.
3037 :param fields: optional list of field names to return (default is
3039 :param load: deprecated, this argument is ignored
3040 :return: a list of dictionaries mapping field names to their values,
3041 with one dictionary per record
3042 :raise AccessError: if user has no read rights on some of the given
3045 # check access rights
3046 self.check_access_rights('read')
3047 fields = self.check_field_access_rights('read', fields)
3049 # split fields into stored and computed fields
3050 stored, computed = [], []
3052 if name in self._columns:
3054 elif name in self._fields:
3055 computed.append(name)
3057 _logger.warning("%s.read() with unknown field '%s'", self._name, name)
3059 # fetch stored fields from the database to the cache
3060 self._read_from_database(stored)
3062 # retrieve results from records; this takes values from the cache and
3063 # computes remaining fields
3065 name_fields = [(name, self._fields[name]) for name in (stored + computed)]
3066 use_name_get = (load == '_classic_read')
3069 values = {'id': record.id}
3070 for name, field in name_fields:
3071 values[name] = field.convert_to_read(record[name], use_name_get)
3072 result.append(values)
3073 except MissingError:
3078 # add explicit old-style implementation to read()
3080 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3081 records = self.browse(cr, user, ids, context)
3082 result = BaseModel.read(records, fields, load=load)
3083 return result if isinstance(ids, list) else (bool(result) and result[0])
3086 def _prefetch_field(self, field):
3087 """ Read from the database in order to fetch `field` (:class:`Field`
3088 instance) for `self` in cache.
3090 # fetch the records of this model without field_name in their cache
3091 records = self._in_cache_without(field)
3093 # by default, simply fetch field
3094 fnames = set((field.name,))
3097 # columns may be missing from database, do not prefetch other fields
3099 elif self.env.in_draft:
3100 # we may be doing an onchange, do not prefetch other fields
3102 elif field in self.env.todo:
3103 # field must be recomputed, do not prefetch records to recompute
3104 records -= self.env.todo[field]
3105 elif self._columns[field.name]._prefetch:
3106 # here we can optimize: prefetch all classic and many2one fields
3108 for fname, fcolumn in self._columns.iteritems()
3109 if fcolumn._prefetch)
3111 # fetch records with read()
3112 assert self in records and field.name in fnames
3114 result = records.read(list(fnames), load='_classic_write')
3115 except AccessError as e:
3116 # update cache with the exception
3117 records._cache[field] = FailedValue(e)
3120 # check the cache, and update it if necessary
3121 if field not in self._cache:
3122 for values in result:
3123 record = self.browse(values.pop('id'))
3124 record._cache.update(record._convert_to_cache(values))
3125 if field not in self._cache:
3126 e = AccessError("No value found for %s.%s" % (self, field.name))
3127 self._cache[field] = FailedValue(e)
3130 def _read_from_database(self, field_names):
3131 """ Read the given fields of the records in `self` from the database,
3132 and store them in cache. Access errors are also stored in cache.
3135 cr, user, context = env.args
3137 # Construct a clause for the security rules.
3138 # 'tables' holds the list of tables necessary for the SELECT, including
3139 # the ir.rule clauses, and contains at least self._table.
3140 rule_clause, rule_params, tables = env['ir.rule'].domain_get(self._name, 'read')
3142 # determine the fields that are stored as columns in self._table
3143 fields_pre = [f for f in field_names if self._columns[f]._classic_write]
3145 # we need fully-qualified column names in case len(tables) > 1
3147 if isinstance(self._columns.get(f), fields.binary) and \
3148 context.get('bin_size_%s' % f, context.get('bin_size')):
3149 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3150 return 'pg_size_pretty(length(%s."%s")::bigint) as "%s"' % (self._table, f, f)
3152 return '%s."%s"' % (self._table, f)
3153 qual_names = map(qualify, set(fields_pre + ['id']))
3155 query = """ SELECT %(qual_names)s FROM %(tables)s
3156 WHERE %(table)s.id IN %%s AND (%(extra)s)
3159 'qual_names': ",".join(qual_names),
3160 'tables': ",".join(tables),
3161 'table': self._table,
3162 'extra': " OR ".join(rule_clause) if rule_clause else "TRUE",
3163 'order': self._parent_order or self._order,
3167 for sub_ids in cr.split_for_in_conditions(self.ids):
3168 cr.execute(query, [tuple(sub_ids)] + rule_params)
3169 result.extend(cr.dictfetchall())
3171 ids = [vals['id'] for vals in result]
3174 # translate the fields if necessary
3175 if context.get('lang'):
3176 ir_translation = env['ir.translation']
3177 for f in fields_pre:
3178 if self._columns[f].translate:
3179 #TODO: optimize out of this loop
3180 res_trans = ir_translation._get_ids(
3181 '%s,%s' % (self._name, f), 'model', context['lang'], ids)
3183 vals[f] = res_trans.get(vals['id'], False) or vals[f]
3185 # apply the symbol_get functions of the fields we just read
3186 for f in fields_pre:
3187 symbol_get = self._columns[f]._symbol_get
3190 vals[f] = symbol_get(vals[f])
3192 # store result in cache for POST fields
3194 record = self.browse(vals['id'])
3195 record._cache.update(record._convert_to_cache(vals))
3197 # determine the fields that must be processed now
3198 fields_post = [f for f in field_names if not self._columns[f]._classic_write]
3200 # Compute POST fields, grouped by multi
3201 by_multi = defaultdict(list)
3202 for f in fields_post:
3203 by_multi[self._columns[f]._multi].append(f)
3205 for multi, fs in by_multi.iteritems():
3207 res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
3208 assert res2 is not None, \
3209 'The function field "%s" on the "%s" model returned None\n' \
3210 '(a dictionary was expected).' % (fs[0], self._name)
3212 # TOCHECK : why got string instend of dict in python2.6
3213 # if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
3214 multi_fields = res2.get(vals['id'], {})
3217 vals[f] = multi_fields.get(f, [])
3220 res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
3223 vals[f] = res2[vals['id']]
3227 # Warn about deprecated fields now that fields_pre and fields_post are computed
3228 for f in field_names:
3229 column = self._columns[f]
3230 if column.deprecated:
3231 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
3233 # store result in cache
3235 record = self.browse(vals.pop('id'))
3236 record._cache.update(record._convert_to_cache(vals))
3238 # store failed values in cache for the records that could not be read
3239 fetched = self.browse(ids)
3240 missing = self - fetched
3242 extras = fetched - self
3245 _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
3246 ', '.join(map(repr, missing._ids)),
3247 ', '.join(map(repr, extras._ids)),
3249 # store an access error exception in existing records
3251 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3252 (self._name, 'read')
3254 forbidden = missing.exists()
3255 forbidden._cache.update(FailedValue(exc))
3256 # store a missing error exception in non-existing records
3258 _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
3260 (missing - forbidden)._cache.update(FailedValue(exc))
3263 def get_metadata(self):
3265 Returns some metadata about the given records.
3267 :return: list of ownership dictionaries for each requested record
3268 :rtype: list of dictionaries with the following keys:
3271 * create_uid: user who created the record
3272 * create_date: date when the record was created
3273 * write_uid: last user who changed the record
3274 * write_date: date of the last change to the record
3275 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3278 if self._log_access:
3279 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3280 quoted_table = '"%s"' % self._table
3281 fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
3282 query = '''SELECT %s, __imd.module, __imd.name
3283 FROM %s LEFT JOIN ir_model_data __imd
3284 ON (__imd.model = %%s and __imd.res_id = %s.id)
3285 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3286 self._cr.execute(query, (self._name, tuple(self.ids)))
3287 res = self._cr.dictfetchall()
3289 uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
3290 names = dict(self.env['res.users'].browse(uids).name_get())
3294 value = r[key] = r[key] or False
3295 if key in ('write_uid', 'create_uid') and value in names:
3296 r[key] = (value, names[value])
3297 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3298 del r['name'], r['module']
3301 def _check_concurrency(self, cr, ids, context):
3304 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3306 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3307 for sub_ids in cr.split_for_in_conditions(ids):
3310 id_ref = "%s,%s" % (self._name, id)
3311 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3313 ids_to_check.extend([id, update_date])
3314 if not ids_to_check:
3316 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3319 # mention the first one only to keep the error message readable
3320 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3322 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3323 """Verify the returned rows after applying record rules matches
3324 the length of `ids`, and raise an appropriate exception if it does not.
3328 ids, result_ids = set(ids), set(result_ids)
3329 missing_ids = ids - result_ids
3331 # Attempt to distinguish record rule restriction vs deleted records,
3332 # to provide a more specific error message - check if the missinf
3333 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3334 forbidden_ids = [x[0] for x in cr.fetchall()]
3336 # the missing ids are (at least partially) hidden by access rules
3337 if uid == SUPERUSER_ID:
3339 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3340 raise except_orm(_('Access Denied'),
3341 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3342 (self._description, operation))
3344 # If we get here, the missing_ids are not in the database
3345 if operation in ('read','unlink'):
3346 # No need to warn about deleting an already deleted record.
3347 # And no error when reading a record that was deleted, to prevent spurious
3348 # errors for non-transactional search/read sequences coming from clients
3350 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3351 raise except_orm(_('Missing document(s)'),
3352 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3355 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3356 """Verifies that the operation given by ``operation`` is allowed for the user
3357 according to the access rights."""
3358 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3360 def check_access_rule(self, cr, uid, ids, operation, context=None):
3361 """Verifies that the operation given by ``operation`` is allowed for the user
3362 according to ir.rules.
3364 :param operation: one of ``write``, ``unlink``
3365 :raise except_orm: * if current ir.rules do not permit this operation.
3366 :return: None if the operation is allowed
3368 if uid == SUPERUSER_ID:
3371 if self.is_transient():
3372 # Only one single implicit access rule for transient models: owner only!
3373 # This is ok to hardcode because we assert that TransientModels always
3374 # have log_access enabled so that the create_uid column is always there.
3375 # And even with _inherits, these fields are always present in the local
3376 # table too, so no need for JOINs.
3377 cr.execute("""SELECT distinct create_uid
3379 WHERE id IN %%s""" % self._table, (tuple(ids),))
3380 uids = [x[0] for x in cr.fetchall()]
3381 if len(uids) != 1 or uids[0] != uid:
3382 raise except_orm(_('Access Denied'),
3383 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3385 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3387 where_clause = ' and ' + ' and '.join(where_clause)
3388 for sub_ids in cr.split_for_in_conditions(ids):
3389 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3390 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3391 [sub_ids] + where_params)
3392 returned_ids = [x['id'] for x in cr.dictfetchall()]
3393 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3395 def create_workflow(self, cr, uid, ids, context=None):
3396 """Create a workflow instance for each given record IDs."""
3397 from openerp import workflow
3399 workflow.trg_create(uid, self._name, res_id, cr)
3400 # self.invalidate_cache(cr, uid, context=context) ?
3403 def delete_workflow(self, cr, uid, ids, context=None):
3404 """Delete the workflow instances bound to the given record IDs."""
3405 from openerp import workflow
3407 workflow.trg_delete(uid, self._name, res_id, cr)
3408 self.invalidate_cache(cr, uid, context=context)
3411 def step_workflow(self, cr, uid, ids, context=None):
3412 """Reevaluate the workflow instances of the given record IDs."""
3413 from openerp import workflow
3415 workflow.trg_write(uid, self._name, res_id, cr)
3416 # self.invalidate_cache(cr, uid, context=context) ?
3419 def signal_workflow(self, cr, uid, ids, signal, context=None):
3420 """Send given workflow signal and return a dict mapping ids to workflow results"""
3421 from openerp import workflow
3424 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3425 # self.invalidate_cache(cr, uid, context=context) ?
3428 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3429 """ Rebind the workflow instance bound to the given 'old' record IDs to
3430 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3432 from openerp import workflow
3433 for old_id, new_id in old_new_ids:
3434 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3435 self.invalidate_cache(cr, uid, context=context)
3438 def unlink(self, cr, uid, ids, context=None):
3440 Delete records with given ids
3442 :param cr: database cursor
3443 :param uid: current user id
3444 :param ids: id or list of ids
3445 :param context: (optional) context arguments, like lang, time zone
3447 :raise AccessError: * if user has no unlink rights on the requested object
3448 * if user tries to bypass access rules for unlink on the requested object
3449 :raise UserError: if the record is default property for other records
3454 if isinstance(ids, (int, long)):
3457 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3459 # for recomputing new-style fields
3460 recs = self.browse(cr, uid, ids, context)
3461 recs.modified(self._fields)
3463 self._check_concurrency(cr, ids, context)
3465 self.check_access_rights(cr, uid, 'unlink')
3467 ir_property = self.pool.get('ir.property')
3469 # Check if the records are used as default properties.
3470 domain = [('res_id', '=', False),
3471 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3473 if ir_property.search(cr, uid, domain, context=context):
3474 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3476 # Delete the records' properties.
3477 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3478 ir_property.unlink(cr, uid, property_ids, context=context)
3480 self.delete_workflow(cr, uid, ids, context=context)
3482 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3483 pool_model_data = self.pool.get('ir.model.data')
3484 ir_values_obj = self.pool.get('ir.values')
3485 for sub_ids in cr.split_for_in_conditions(ids):
3486 cr.execute('delete from ' + self._table + ' ' \
3487 'where id IN %s', (sub_ids,))
3489 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3490 # as these are not connected with real database foreign keys, and would be dangling references.
3491 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3492 # to avoid possible side-effects during admin calls.
3493 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3494 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3495 # Step 2. Marching towards the real deletion of referenced records
3497 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3499 # For the same reason, removing the record relevant to ir_values
3500 ir_value_ids = ir_values_obj.search(cr, uid,
3501 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3504 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3506 # invalidate the *whole* cache, since the orm does not handle all
3507 # changes made in the database, like cascading delete!
3508 recs.invalidate_cache()
3510 for order, obj_name, store_ids, fields in result_store:
3511 if obj_name == self._name:
3512 effective_store_ids = set(store_ids) - set(ids)
3514 effective_store_ids = store_ids
3515 if effective_store_ids:
3516 obj = self.pool[obj_name]
3517 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3518 rids = map(lambda x: x[0], cr.fetchall())
3520 obj._store_set_values(cr, uid, rids, fields, context)
3522 # recompute new-style fields
3531 def write(self, vals):
3533 Update records in `self` with the given field values.
3535 :param vals: field values to update, e.g {'field_name': new_field_value, ...}
3536 :type vals: dictionary
3538 :raise AccessError: * if user has no write rights on the requested object
3539 * if user tries to bypass access rules for write on the requested object
3540 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3541 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3543 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
3545 + For a many2many field, a list of tuples is expected.
3546 Here is the list of tuple that are accepted, with the corresponding semantics ::
3548 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3549 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3550 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3551 (3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
3552 (4, ID) link to existing record with id = ID (adds a relationship)
3553 (5) unlink all (like using (3,ID) for all linked records)
3554 (6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
3557 [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
3559 + For a one2many field, a lits of tuples is expected.
3560 Here is the list of tuple that are accepted, with the corresponding semantics ::
3562 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3563 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3564 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3567 [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
3569 + For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
3570 + For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
3576 cr, uid, context = self.env.args
3577 self._check_concurrency(self._ids)
3578 self.check_access_rights('write')
3580 # No user-driven update of these columns
3581 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3582 vals.pop(field, None)
3584 # split up fields into old-style and pure new-style ones
3585 old_vals, new_vals, unknown = {}, {}, []
3586 for key, val in vals.iteritems():
3587 if key in self._columns:
3589 elif key in self._fields:
3595 _logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3597 # write old-style fields with (low-level) method _write
3599 self._write(old_vals)
3601 # put the values of pure new-style fields into cache, and inverse them
3603 self._cache.update(self._convert_to_cache(new_vals))
3604 for key in new_vals:
3605 self._fields[key].determine_inverse(self)
3609 def _write(self, cr, user, ids, vals, context=None):
3610 # low-level implementation of write()
3615 self.check_field_access_rights(cr, user, 'write', vals.keys())
3616 for field in vals.keys():
3618 if field in self._columns:
3619 fobj = self._columns[field]
3620 elif field in self._inherit_fields:
3621 fobj = self._inherit_fields[field][2]
3628 for group in groups:
3629 module = group.split(".")[0]
3630 grp = group.split(".")[1]
3631 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3632 (grp, module, 'res.groups', user))
3633 readonly = cr.fetchall()
3634 if readonly[0][0] >= 1:
3641 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3643 # for recomputing new-style fields
3644 recs = self.browse(cr, user, ids, context)
3645 modified_fields = list(vals)
3646 if self._log_access:
3647 modified_fields += ['write_date', 'write_uid']
3648 recs.modified(modified_fields)
3650 parents_changed = []
3651 parent_order = self._parent_order or self._order
3652 if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
3653 # The parent_left/right computation may take up to
3654 # 5 seconds. No need to recompute the values if the
3655 # parent is the same.
3656 # Note: to respect parent_order, nodes must be processed in
3657 # order, so ``parents_changed`` must be ordered properly.
3658 parent_val = vals[self._parent_name]
3660 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3661 (self._table, self._parent_name, self._parent_name, parent_order)
3662 cr.execute(query, (tuple(ids), parent_val))
3664 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3665 (self._table, self._parent_name, parent_order)
3666 cr.execute(query, (tuple(ids),))
3667 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3674 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3676 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3677 if field_column and field_column.deprecated:
3678 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3679 if field in self._columns:
3680 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3681 if (not totranslate) or not self._columns[field].translate:
3682 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3683 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3684 direct.append(field)
3686 upd_todo.append(field)
3688 updend.append(field)
3689 if field in self._columns \
3690 and hasattr(self._columns[field], 'selection') \
3692 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3694 if self._log_access:
3695 upd0.append('write_uid=%s')
3696 upd0.append("write_date=(now() at time zone 'UTC')")
3700 self.check_access_rule(cr, user, ids, 'write', context=context)
3701 for sub_ids in cr.split_for_in_conditions(ids):
3702 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3703 'where id IN %s', upd1 + [sub_ids])
3704 if cr.rowcount != len(sub_ids):
3705 raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3710 if self._columns[f].translate:
3711 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3714 # Inserting value to DB
3715 context_wo_lang = dict(context, lang=None)
3716 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3717 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3719 # call the 'set' method of fields which are not classic_write
3720 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3722 # default element in context must be removed when call a one2many or many2many
3723 rel_context = context.copy()
3724 for c in context.items():
3725 if c[0].startswith('default_'):
3726 del rel_context[c[0]]
3728 for field in upd_todo:
3730 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3732 unknown_fields = updend[:]
3733 for table in self._inherits:
3734 col = self._inherits[table]
3736 for sub_ids in cr.split_for_in_conditions(ids):
3737 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3738 'where id IN %s', (sub_ids,))
3739 nids.extend([x[0] for x in cr.fetchall()])
3743 if self._inherit_fields[val][0] == table:
3745 unknown_fields.remove(val)
3747 self.pool[table].write(cr, user, nids, v, context)
3751 'No such field(s) in model %s: %s.',
3752 self._name, ', '.join(unknown_fields))
3754 # check Python constraints
3755 recs._validate_fields(vals)
3757 # TODO: use _order to set dest at the right position and not first node of parent
3758 # We can't defer parent_store computation because the stored function
3759 # fields that are computer may refer (directly or indirectly) to
3760 # parent_left/right (via a child_of domain)
3763 self.pool._init_parent[self._name] = True
3765 order = self._parent_order or self._order
3766 parent_val = vals[self._parent_name]
3768 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3770 clause, params = '%s IS NULL' % (self._parent_name,), ()
3772 for id in parents_changed:
3773 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3774 pleft, pright = cr.fetchone()
3775 distance = pright - pleft + 1
3777 # Positions of current siblings, to locate proper insertion point;
3778 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3779 # after each update, in case several nodes are sequentially inserted one
3780 # next to the other (i.e computed incrementally)
3781 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3782 parents = cr.fetchall()
3784 # Find Position of the element
3786 for (parent_pright, parent_id) in parents:
3789 position = parent_pright and parent_pright + 1 or 1
3791 # It's the first node of the parent
3796 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
3797 position = cr.fetchone()[0] + 1
3799 if pleft < position <= pright:
3800 raise except_orm(_('UserError'), _('Recursivity Detected.'))
3802 if pleft < position:
3803 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3804 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3805 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
3807 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
3808 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
3809 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
3810 recs.invalidate_cache(['parent_left', 'parent_right'])
3812 result += self._store_get_values(cr, user, ids, vals.keys(), context)
3815 # for recomputing new-style fields
3816 recs.modified(modified_fields)
3819 for order, model_name, ids_to_update, fields_to_recompute in result:
3820 key = (model_name, tuple(fields_to_recompute))
3821 done.setdefault(key, {})
3822 # avoid to do several times the same computation
3824 for id in ids_to_update:
3825 if id not in done[key]:
3826 done[key][id] = True
3828 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
3830 # recompute new-style fields
3831 if context.get('recompute', True):
3834 self.step_workflow(cr, user, ids, context=context)
3838 # TODO: Should set perm to user.xxx
3841 @api.returns('self', lambda value: value.id)
3842 def create(self, vals):
3843 """ Create a new record for the model.
3845 The values for the new record are initialized using the dictionary
3846 `vals`, and if necessary the result of :meth:`default_get`.
3848 :param vals: field values like ``{'field_name': field_value, ...}``,
3849 see :meth:`write` for details about the values format
3850 :return: new record created
3851 :raise AccessError: * if user has no create rights on the requested object
3852 * if user tries to bypass access rules for create on the requested object
3853 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3854 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3856 self.check_access_rights('create')
3858 # add missing defaults, and drop fields that may not be set by user
3859 vals = self._add_missing_default_values(vals)
3860 for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
3861 vals.pop(field, None)
3863 # split up fields into old-style and pure new-style ones
3864 old_vals, new_vals, unknown = {}, {}, []
3865 for key, val in vals.iteritems():
3866 if key in self._all_columns:
3868 elif key in self._fields:
3874 _logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
3876 # create record with old-style fields
3877 record = self.browse(self._create(old_vals))
3879 # put the values of pure new-style fields into cache, and inverse them
3880 record._cache.update(record._convert_to_cache(new_vals))
3881 for key in new_vals:
3882 self._fields[key].determine_inverse(record)
3886 def _create(self, cr, user, vals, context=None):
3887 # low-level implementation of create()
3891 if self.is_transient():
3892 self._transient_vacuum(cr, user)
3895 for v in self._inherits:
3896 if self._inherits[v] not in vals:
3899 tocreate[v] = {'id': vals[self._inherits[v]]}
3902 # list of column assignments defined as tuples like:
3903 # (column_name, format_string, column_value)
3904 # (column_name, sql_formula)
3905 # Those tuples will be used by the string formatting for the INSERT
3907 ('id', "nextval('%s')" % self._sequence),
3912 for v in vals.keys():
3913 if v in self._inherit_fields and v not in self._columns:
3914 (table, col, col_detail, original_parent) = self._inherit_fields[v]
3915 tocreate[table][v] = vals[v]
3918 if (v not in self._inherit_fields) and (v not in self._columns):
3920 unknown_fields.append(v)
3923 'No such field(s) in model %s: %s.',
3924 self._name, ', '.join(unknown_fields))
3926 for table in tocreate:
3927 if self._inherits[table] in vals:
3928 del vals[self._inherits[table]]
3930 record_id = tocreate[table].pop('id', None)
3932 if isinstance(record_id, dict):
3933 # Shit happens: this possibly comes from a new record
3934 tocreate[table] = dict(record_id, **tocreate[table])
3937 # When linking/creating parent records, force context without 'no_store_function' key that
3938 # defers stored functions computing, as these won't be computed in batch at the end of create().
3939 parent_context = dict(context)
3940 parent_context.pop('no_store_function', None)
3942 if record_id is None or not record_id:
3943 record_id = self.pool[table].create(cr, user, tocreate[table], context=parent_context)
3945 self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
3947 updates.append((self._inherits[table], '%s', record_id))
3949 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
3950 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
3952 for bool_field in bool_fields:
3953 if bool_field not in vals:
3954 vals[bool_field] = False
3956 for field in vals.keys():
3958 if field in self._columns:
3959 fobj = self._columns[field]
3961 fobj = self._inherit_fields[field][2]
3967 for group in groups:
3968 module = group.split(".")[0]
3969 grp = group.split(".")[1]
3970 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
3971 (grp, module, 'res.groups', user))
3972 readonly = cr.fetchall()
3973 if readonly[0][0] >= 1:
3976 elif readonly[0][0] == 0:
3984 current_field = self._columns[field]
3985 if current_field._classic_write:
3986 updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
3988 #for the function fields that receive a value, we set them directly in the database
3989 #(they may be required), but we also need to trigger the _fct_inv()
3990 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
3991 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
3992 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
3993 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
3994 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
3995 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
3996 #after the release but, definitively, the behavior shouldn't be different for related and function
3998 upd_todo.append(field)
4000 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4001 #related. See the above TODO comment for further explanations.
4002 if not isinstance(current_field, fields.related):
4003 upd_todo.append(field)
4004 if field in self._columns \
4005 and hasattr(current_field, 'selection') \
4007 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4008 if self._log_access:
4009 updates.append(('create_uid', '%s', user))
4010 updates.append(('write_uid', '%s', user))
4011 updates.append(('create_date', "(now() at time zone 'UTC')"))
4012 updates.append(('write_date', "(now() at time zone 'UTC')"))
4014 # the list of tuples used in this formatting corresponds to
4015 # tuple(field_name, format, value)
4016 # In some case, for example (id, create_date, write_date) we does not
4017 # need to read the third value of the tuple, because the real value is
4018 # encoded in the second value (the format).
4020 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4022 ', '.join('"%s"' % u[0] for u in updates),
4023 ', '.join(u[1] for u in updates)
4025 tuple([u[2] for u in updates if len(u) > 2])
4028 id_new, = cr.fetchone()
4029 recs = self.browse(cr, user, id_new, context)
4030 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4032 if self._parent_store and not context.get('defer_parent_store_computation'):
4034 self.pool._init_parent[self._name] = True
4036 parent = vals.get(self._parent_name, False)
4038 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4040 result_p = cr.fetchall()
4041 for (pleft,) in result_p:
4046 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4047 pleft_old = cr.fetchone()[0]
4050 cr.execute('select max(parent_right) from '+self._table)
4051 pleft = cr.fetchone()[0] or 0
4052 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4053 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4054 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4055 recs.invalidate_cache(['parent_left', 'parent_right'])
4057 # default element in context must be remove when call a one2many or many2many
4058 rel_context = context.copy()
4059 for c in context.items():
4060 if c[0].startswith('default_'):
4061 del rel_context[c[0]]
4064 for field in upd_todo:
4065 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4067 # check Python constraints
4068 recs._validate_fields(vals)
4070 if not context.get('no_store_function', False):
4071 result += self._store_get_values(cr, user, [id_new],
4072 list(set(vals.keys() + self._inherits.values())),
4076 for order, model_name, ids, fields2 in result:
4077 if not (model_name, ids, fields2) in done:
4078 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4079 done.append((model_name, ids, fields2))
4081 # recompute new-style fields
4082 modified_fields = list(vals)
4083 if self._log_access:
4084 modified_fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
4085 recs.modified(modified_fields)
4088 if self._log_create and not (context and context.get('no_store_function', False)):
4089 message = self._description + \
4091 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4092 "' " + _("created.")
4093 self.log(cr, user, id_new, message, True, context=context)
4095 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4096 self.create_workflow(cr, user, [id_new], context=context)
4099 def _store_get_values(self, cr, uid, ids, fields, context):
4100 """Returns an ordered list of fields.function to call due to
4101 an update operation on ``fields`` of records with ``ids``,
4102 obtained by calling the 'store' triggers of these fields,
4103 as setup by their 'store' attribute.
4105 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4107 if fields is None: fields = []
4108 stored_functions = self.pool._store_function.get(self._name, [])
4110 # use indexed names for the details of the stored_functions:
4111 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4113 # only keep store triggers that should be triggered for the ``fields``
4115 triggers_to_compute = (
4116 f for f in stored_functions
4117 if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
4121 target_id_results = {}
4122 for store_trigger in triggers_to_compute:
4123 target_func_id_ = id(store_trigger[target_ids_func_])
4124 if target_func_id_ not in target_id_results:
4125 # use admin user for accessing objects having rules defined on store fields
4126 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4127 target_ids = target_id_results[target_func_id_]
4129 # the compound key must consider the priority and model name
4130 key = (store_trigger[priority_], store_trigger[model_name_])
4131 for target_id in target_ids:
4132 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4134 # Here to_compute_map looks like:
4135 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4136 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4137 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4140 # Now we need to generate the batch function calls list
4142 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4144 for ((priority,model), id_map) in to_compute_map.iteritems():
4145 trigger_ids_maps = {}
4146 # function_ids_maps =
4147 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4148 for target_id, triggers in id_map.iteritems():
4149 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4150 for triggers, target_ids in trigger_ids_maps.iteritems():
4151 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4152 [t[func_field_to_compute_] for t in triggers]))
4155 result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
4158 def _store_set_values(self, cr, uid, ids, fields, context):
4159 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4160 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4165 if self._log_access:
4166 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4170 field_dict.setdefault(r[0], [])
4171 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4172 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4173 for i in self.pool._store_function.get(self._name, []):
4175 up_write_date = write_date + datetime.timedelta(hours=i[5])
4176 if datetime.datetime.now() < up_write_date:
4178 field_dict[r[0]].append(i[1])
4184 if self._columns[f]._multi not in keys:
4185 keys.append(self._columns[f]._multi)
4186 todo.setdefault(self._columns[f]._multi, [])
4187 todo[self._columns[f]._multi].append(f)
4191 # use admin user for accessing objects having rules defined on store fields
4192 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4193 for id, value in result.items():
4195 for f in value.keys():
4196 if f in field_dict[id]:
4203 if self._columns[v]._type == 'many2one':
4205 value[v] = value[v][0]
4208 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4209 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4212 cr.execute('update "' + self._table + '" set ' + \
4213 ','.join(upd0) + ' where id = %s', upd1)
4217 # use admin user for accessing objects having rules defined on store fields
4218 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4219 for r in result.keys():
4221 if r in field_dict.keys():
4222 if f in field_dict[r]:
4224 for id, value in result.items():
4225 if self._columns[f]._type == 'many2one':
4230 cr.execute('update "' + self._table + '" set ' + \
4231 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4233 # invalidate the cache for the modified fields
4234 self.browse(cr, uid, ids, context).modified(fields)
4238 # TODO: ameliorer avec NULL
4239 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4240 """Computes the WHERE clause needed to implement an OpenERP domain.
4241 :param domain: the domain to compute
4243 :param active_test: whether the default filtering of records with ``active``
4244 field set to ``False`` should be applied.
4245 :return: the query expressing the given domain as provided in domain
4246 :rtype: osv.query.Query
4251 # if the object has a field named 'active', filter out all inactive
4252 # records unless they were explicitely asked for
4253 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4255 # the item[0] trick below works for domain items and '&'/'|'/'!'
4257 if not any(item[0] == 'active' for item in domain):
4258 domain.insert(0, ('active', '=', 1))
4260 domain = [('active', '=', 1)]
4263 e = expression.expression(cr, user, domain, self, context)
4264 tables = e.get_tables()
4265 where_clause, where_params = e.to_sql()
4266 where_clause = where_clause and [where_clause] or []
4268 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4270 return Query(tables, where_clause, where_params)
4272 def _check_qorder(self, word):
4273 if not regex_order.match(word):
4274 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4277 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4278 """Add what's missing in ``query`` to implement all appropriate ir.rules
4279 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4281 :param query: the current query object
4283 if uid == SUPERUSER_ID:
4286 def apply_rule(added_clause, added_params, added_tables, parent_model=None):
4287 """ :param parent_model: name of the parent model, if the added
4288 clause comes from a parent model
4292 # as inherited rules are being applied, we need to add the missing JOIN
4293 # to reach the parent table (if it was not JOINed yet in the query)
4294 parent_alias = self._inherits_join_add(self, parent_model, query)
4295 # inherited rules are applied on the external table -> need to get the alias and replace
4296 parent_table = self.pool[parent_model]._table
4297 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4298 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4300 for table in added_tables:
4301 # table is just a table name -> switch to the full alias
4302 if table == '"%s"' % parent_table:
4303 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4304 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4306 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4307 added_tables = new_tables
4308 query.where_clause += added_clause
4309 query.where_clause_params += added_params
4310 for table in added_tables:
4311 if table not in query.tables:
4312 query.tables.append(table)
4316 # apply main rules on the object
4317 rule_obj = self.pool.get('ir.rule')
4318 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4319 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4321 # apply ir.rules from the parents (through _inherits)
4322 for inherited_model in self._inherits:
4323 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4324 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4325 parent_model=inherited_model)
4327 def _generate_m2o_order_by(self, order_field, query):
4329 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4330 either native m2o fields or function/related fields that are stored, including
4331 intermediate JOINs for inheritance if required.
4333 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4335 if order_field not in self._columns and order_field in self._inherit_fields:
4336 # also add missing joins for reaching the table containing the m2o field
4337 qualified_field = self._inherits_join_calc(order_field, query)
4338 order_field_column = self._inherit_fields[order_field][2]
4340 qualified_field = '"%s"."%s"' % (self._table, order_field)
4341 order_field_column = self._columns[order_field]
4343 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4344 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4345 _logger.debug("Many2one function/related fields must be stored " \
4346 "to be used as ordering fields! Ignoring sorting for %s.%s",
4347 self._name, order_field)
4350 # figure out the applicable order_by for the m2o
4351 dest_model = self.pool[order_field_column._obj]
4352 m2o_order = dest_model._order
4353 if not regex_order.match(m2o_order):
4354 # _order is complex, can't use it here, so we default to _rec_name
4355 m2o_order = dest_model._rec_name
4357 # extract the field names, to be able to qualify them and add desc/asc
4359 for order_part in m2o_order.split(","):
4360 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4361 m2o_order = m2o_order_list
4363 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4364 # as we don't want to exclude results that have NULL values for the m2o
4365 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4366 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4367 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4368 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4370 def _generate_order_by(self, order_spec, query):
4372 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4373 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4375 :raise" except_orm in case order_spec is malformed
4377 order_by_clause = ''
4378 order_spec = order_spec or self._order
4380 order_by_elements = []
4381 self._check_qorder(order_spec)
4382 for order_part in order_spec.split(','):
4383 order_split = order_part.strip().split(' ')
4384 order_field = order_split[0].strip()
4385 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4387 if order_field == 'id':
4388 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4389 elif order_field in self._columns:
4390 order_column = self._columns[order_field]
4391 if order_column._classic_read:
4392 inner_clause = '"%s"."%s"' % (self._table, order_field)
4393 elif order_column._type == 'many2one':
4394 inner_clause = self._generate_m2o_order_by(order_field, query)
4396 continue # ignore non-readable or "non-joinable" fields
4397 elif order_field in self._inherit_fields:
4398 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4399 order_column = parent_obj._columns[order_field]
4400 if order_column._classic_read:
4401 inner_clause = self._inherits_join_calc(order_field, query)
4402 elif order_column._type == 'many2one':
4403 inner_clause = self._generate_m2o_order_by(order_field, query)
4405 continue # ignore non-readable or "non-joinable" fields
4407 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4409 if isinstance(inner_clause, list):
4410 for clause in inner_clause:
4411 order_by_elements.append("%s %s" % (clause, order_direction))
4413 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4414 if order_by_elements:
4415 order_by_clause = ",".join(order_by_elements)
4417 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4419 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4421 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4422 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4423 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4424 This is ok at the security level because this method is private and not callable through XML-RPC.
4426 :param access_rights_uid: optional user ID to use when checking access rights
4427 (not for ir.rules, this is only for ir.model.access)
4431 self.check_access_rights(cr, access_rights_uid or user, 'read')
4433 # For transient models, restrict acces to the current user, except for the super-user
4434 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4435 args = expression.AND(([('create_uid', '=', user)], args or []))
4437 query = self._where_calc(cr, user, args, context=context)
4438 self._apply_ir_rules(cr, user, query, 'read', context=context)
4439 order_by = self._generate_order_by(order, query)
4440 from_clause, where_clause, where_clause_params = query.get_sql()
4442 limit_str = limit and ' limit %d' % limit or ''
4443 offset_str = offset and ' offset %d' % offset or ''
4444 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4445 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4448 # /!\ the main query must be executed as a subquery, otherwise
4449 # offset and limit apply to the result of count()!
4450 cr.execute('SELECT count(*) FROM (%s) AS count' % query_str, where_clause_params)
4454 cr.execute(query_str, where_clause_params)
4457 # TDE note: with auto_join, we could have several lines about the same result
4458 # i.e. a lead with several unread messages; we uniquify the result using
4459 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4460 def _uniquify_list(seq):
4462 return [x for x in seq if x not in seen and not seen.add(x)]
4464 return _uniquify_list([x[0] for x in res])
4466 # returns the different values ever entered for one field
4467 # this is used, for example, in the client when the user hits enter on
4469 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4472 if field in self._inherit_fields:
4473 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4475 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4477 def copy_data(self, cr, uid, id, default=None, context=None):
4479 Copy given record's data with all its fields values
4481 :param cr: database cursor
4482 :param uid: current user id
4483 :param id: id of the record to copy
4484 :param default: field values to override in the original values of the copied record
4485 :type default: dictionary
4486 :param context: context arguments, like lang, time zone
4487 :type context: dictionary
4488 :return: dictionary containing all the field values
4494 # avoid recursion through already copied records in case of circular relationship
4495 seen_map = context.setdefault('__copy_data_seen', {})
4496 if id in seen_map.setdefault(self._name, []):
4498 seen_map[self._name].append(id)
4502 if 'state' not in default:
4503 if 'state' in self._defaults:
4504 if callable(self._defaults['state']):
4505 default['state'] = self._defaults['state'](self, cr, uid, context)
4507 default['state'] = self._defaults['state']
4509 # build a black list of fields that should not be copied
4510 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4511 def blacklist_given_fields(obj):
4512 # blacklist the fields that are given by inheritance
4513 for other, field_to_other in obj._inherits.items():
4514 blacklist.add(field_to_other)
4515 if field_to_other in default:
4516 # all the fields of 'other' are given by the record: default[field_to_other],
4517 # except the ones redefined in self
4518 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4520 blacklist_given_fields(self.pool[other])
4521 # blacklist deprecated fields
4522 for name, field in obj._columns.items():
4523 if field.deprecated:
4526 blacklist_given_fields(self)
4529 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4532 if f not in blacklist)
4534 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4538 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4541 for f, colinfo in fields_to_copy.iteritems():
4542 field = colinfo.column
4543 if field._type == 'many2one':
4544 res[f] = data[f] and data[f][0]
4545 elif field._type == 'one2many':
4546 other = self.pool[field._obj]
4547 # duplicate following the order of the ids because we'll rely on
4548 # it later for copying translations in copy_translation()!
4549 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4550 # the lines are duplicated using the wrong (old) parent, but then
4551 # are reassigned to the correct one thanks to the (0, 0, ...)
4552 res[f] = [(0, 0, line) for line in lines if line]
4553 elif field._type == 'many2many':
4554 res[f] = [(6, 0, data[f])]
4560 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4564 # avoid recursion through already copied records in case of circular relationship
4565 seen_map = context.setdefault('__copy_translations_seen',{})
4566 if old_id in seen_map.setdefault(self._name,[]):
4568 seen_map[self._name].append(old_id)
4570 trans_obj = self.pool.get('ir.translation')
4571 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4572 fields = self.fields_get(cr, uid, context=context)
4574 for field_name, field_def in fields.items():
4575 # removing the lang to compare untranslated values
4576 context_wo_lang = dict(context, lang=None)
4577 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4578 # we must recursively copy the translations for o2o and o2m
4579 if field_def['type'] == 'one2many':
4580 target_obj = self.pool[field_def['relation']]
4581 # here we rely on the order of the ids to match the translations
4582 # as foreseen in copy_data()
4583 old_children = sorted(r.id for r in old_record[field_name])
4584 new_children = sorted(r.id for r in new_record[field_name])
4585 for (old_child, new_child) in zip(old_children, new_children):
4586 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4587 # and for translatable fields we keep them for copy
4588 elif field_def.get('translate'):
4589 if field_name in self._columns:
4590 trans_name = self._name + "," + field_name
4593 elif field_name in self._inherit_fields:
4594 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4595 # get the id of the parent record to set the translation
4596 inherit_field_name = self._inherit_fields[field_name][1]
4597 target_id = new_record[inherit_field_name].id
4598 source_id = old_record[inherit_field_name].id
4602 trans_ids = trans_obj.search(cr, uid, [
4603 ('name', '=', trans_name),
4604 ('res_id', '=', source_id)
4606 user_lang = context.get('lang')
4607 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4609 # remove source to avoid triggering _set_src
4610 del record['source']
4611 record.update({'res_id': target_id})
4612 if user_lang and user_lang == record['lang']:
4613 # 'source' to force the call to _set_src
4614 # 'value' needed if value is changed in copy(), want to see the new_value
4615 record['source'] = old_record[field_name]
4616 record['value'] = new_record[field_name]
4617 trans_obj.create(cr, uid, record, context=context)
4619 @api.returns('self', lambda value: value.id)
4620 def copy(self, cr, uid, id, default=None, context=None):
4622 Duplicate record with given id updating it with default values
4624 :param cr: database cursor
4625 :param uid: current user id
4626 :param id: id of the record to copy
4627 :param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4628 :type default: dictionary
4629 :param context: context arguments, like lang, time zone
4630 :type context: dictionary
4631 :return: id of the newly created record
4636 context = context.copy()
4637 data = self.copy_data(cr, uid, id, default, context)
4638 new_id = self.create(cr, uid, data, context)
4639 self.copy_translations(cr, uid, id, new_id, context)
4643 @api.returns('self')
4645 """ Return the subset of records in `self` that exist, and mark deleted
4646 records as such in cache. It can be used as a test on records::
4651 By convention, new records are returned as existing.
4653 ids = filter(None, self._ids) # ids to check in database
4656 query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
4657 self._cr.execute(query, (ids,))
4658 ids = ([r[0] for r in self._cr.fetchall()] + # ids in database
4659 [id for id in self._ids if not id]) # new ids
4660 existing = self.browse(ids)
4661 if len(existing) < len(self):
4662 # mark missing records in cache with a failed value
4663 exc = MissingError(_("Record does not exist or has been deleted."))
4664 (self - existing)._cache.update(FailedValue(exc))
4667 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4668 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4670 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4671 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4672 return self._check_recursion(cr, uid, ids, context, parent)
4674 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4676 Verifies that there is no loop in a hierarchical structure of records,
4677 by following the parent relationship using the **parent** field until a loop
4678 is detected or until a top-level record is found.
4680 :param cr: database cursor
4681 :param uid: current user id
4682 :param ids: list of ids of records to check
4683 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4684 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4687 parent = self._parent_name
4689 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4690 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4693 while current_id is not None:
4694 cr.execute(query, (current_id,))
4695 result = cr.fetchone()
4696 current_id = result[0] if result else None
4697 if current_id == id:
4701 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4703 Verifies that there is no loop in a hierarchical structure of records,
4704 by following the parent relationship using the **parent** field until a loop
4705 is detected or until a top-level record is found.
4707 :param cr: database cursor
4708 :param uid: current user id
4709 :param ids: list of ids of records to check
4710 :param field_name: field to check
4711 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4714 field = self._all_columns.get(field_name)
4715 field = field.column if field else None
4716 if not field or field._type != 'many2many' or field._obj != self._name:
4717 # field must be a many2many on itself
4718 raise ValueError('invalid field_name: %r' % (field_name,))
4720 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4724 for i in range(0, len(ids_parent), cr.IN_MAX):
4726 sub_ids_parent = ids_parent[i:j]
4727 cr.execute(query, (tuple(sub_ids_parent),))
4728 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4729 ids_parent = ids_parent2
4730 for i in ids_parent:
4735 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4736 """Retrieve the External ID(s) of any database record.
4738 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4740 :return: map of ids to the list of their fully qualified External IDs
4741 in the form ``module.key``, or an empty list when there's no External
4742 ID for a record, e.g.::
4744 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4747 ir_model_data = self.pool.get('ir.model.data')
4748 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4749 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4752 # can't use dict.fromkeys() as the list would be shared!
4754 for record in data_results:
4755 result[record['res_id']].append('%(module)s.%(name)s' % record)
4758 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4759 """Retrieve the External ID of any database record, if there
4760 is one. This method works as a possible implementation
4761 for a function field, to be able to add it to any
4762 model object easily, referencing it as ``Model.get_external_id``.
4764 When multiple External IDs exist for a record, only one
4765 of them is returned (randomly).
4767 :return: map of ids to their fully qualified XML ID,
4768 defaulting to an empty string when there's none
4769 (to be usable as a function field),
4772 { 'id': 'module.ext_id',
4775 results = self._get_xml_ids(cr, uid, ids)
4776 for k, v in results.iteritems():
4783 # backwards compatibility
4784 get_xml_id = get_external_id
4785 _get_xml_ids = _get_external_ids
4787 def print_report(self, cr, uid, ids, name, data, context=None):
4789 Render the report `name` for the given IDs. The report must be defined
4790 for this model, not another.
4792 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4793 assert self._name == report.table
4794 return report.create(cr, uid, ids, data, context)
4798 def is_transient(cls):
4799 """ Return whether the model is transient.
4801 See :class:`TransientModel`.
4804 return cls._transient
4806 def _transient_clean_rows_older_than(self, cr, seconds):
4807 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4808 # Never delete rows used in last 5 minutes
4809 seconds = max(seconds, 300)
4810 query = ("SELECT id FROM " + self._table + " WHERE"
4811 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
4812 " < ((now() at time zone 'UTC') - interval %s)")
4813 cr.execute(query, ("%s seconds" % seconds,))
4814 ids = [x[0] for x in cr.fetchall()]
4815 self.unlink(cr, SUPERUSER_ID, ids)
4817 def _transient_clean_old_rows(self, cr, max_count):
4818 # Check how many rows we have in the table
4819 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
4821 if res[0][0] <= max_count:
4822 return # max not reached, nothing to do
4823 self._transient_clean_rows_older_than(cr, 300)
4825 def _transient_vacuum(self, cr, uid, force=False):
4826 """Clean the transient records.
4828 This unlinks old records from the transient model tables whenever the
4829 "_transient_max_count" or "_max_age" conditions (if any) are reached.
4830 Actual cleaning will happen only once every "_transient_check_time" calls.
4831 This means this method can be called frequently called (e.g. whenever
4832 a new record is created).
4833 Example with both max_hours and max_count active:
4834 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
4835 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
4836 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
4837 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
4838 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
4839 would immediately cause the maximum to be reached again.
4840 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
4842 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
4843 _transient_check_time = 20 # arbitrary limit on vacuum executions
4844 self._transient_check_count += 1
4845 if not force and (self._transient_check_count < _transient_check_time):
4846 return True # no vacuum cleaning this time
4847 self._transient_check_count = 0
4849 # Age-based expiration
4850 if self._transient_max_hours:
4851 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
4853 # Count-based expiration
4854 if self._transient_max_count:
4855 self._transient_clean_old_rows(cr, self._transient_max_count)
4859 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
4860 """ Serializes one2many and many2many commands into record dictionaries
4861 (as if all the records came from the database via a read()). This
4862 method is aimed at onchange methods on one2many and many2many fields.
4864 Because commands might be creation commands, not all record dicts
4865 will contain an ``id`` field. Commands matching an existing record
4866 will have an ``id``.
4868 :param field_name: name of the one2many or many2many field matching the commands
4869 :type field_name: str
4870 :param commands: one2many or many2many commands to execute on ``field_name``
4871 :type commands: list((int|False, int|False, dict|False))
4872 :param fields: list of fields to read from the database, when applicable
4873 :type fields: list(str)
4874 :returns: records in a shape similar to that returned by ``read()``
4875 (except records may be missing the ``id`` field if they don't exist in db)
4878 result = [] # result (list of dict)
4879 record_ids = [] # ids of records to read
4880 updates = {} # {id: dict} of updates on particular records
4882 for command in commands or []:
4883 if not isinstance(command, (list, tuple)):
4884 record_ids.append(command)
4885 elif command[0] == 0:
4886 result.append(command[2])
4887 elif command[0] == 1:
4888 record_ids.append(command[1])
4889 updates.setdefault(command[1], {}).update(command[2])
4890 elif command[0] in (2, 3):
4891 record_ids = [id for id in record_ids if id != command[1]]
4892 elif command[0] == 4:
4893 record_ids.append(command[1])
4894 elif command[0] == 5:
4895 result, record_ids = [], []
4896 elif command[0] == 6:
4897 result, record_ids = [], list(command[2])
4899 # read the records and apply the updates
4900 other_model = self.pool[self._all_columns[field_name].column._obj]
4901 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
4902 record.update(updates.get(record['id'], {}))
4903 result.append(record)
4907 # for backward compatibility
4908 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
4910 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
4912 Performs a ``search()`` followed by a ``read()``.
4914 :param cr: database cursor
4915 :param user: current user id
4916 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
4917 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
4918 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
4919 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
4920 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
4921 :param context: context arguments.
4922 :return: List of dictionaries containing the asked fields.
4923 :rtype: List of dictionaries.
4926 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
4930 if fields and fields == ['id']:
4931 # shortcut read if we only want the ids
4932 return [{'id': id} for id in record_ids]
4934 # read() ignores active_test, but it would forward it to any downstream search call
4935 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
4936 # was presumably only meant for the main search().
4937 # TODO: Move this to read() directly?
4938 read_ctx = dict(context or {})
4939 read_ctx.pop('active_test', None)
4941 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
4942 if len(result) <= 1:
4946 index = dict((r['id'], r) for r in result)
4947 return [index[x] for x in record_ids if x in index]
4949 def _register_hook(self, cr):
4950 """ stuff to do right after the registry is built """
4953 def _patch_method(self, name, method):
4954 """ Monkey-patch a method for all instances of this model. This replaces
4955 the method called `name` by `method` in `self`'s class.
4956 The original method is then accessible via ``method.origin``, and it
4957 can be restored with :meth:`~._revert_method`.
4962 def do_write(self, values):
4963 # do stuff, and call the original method
4964 return do_write.origin(self, values)
4966 # patch method write of model
4967 model._patch_method('write', do_write)
4969 # this will call do_write
4970 records = model.search([...])
4973 # restore the original method
4974 model._revert_method('write')
4977 origin = getattr(cls, name)
4978 method.origin = origin
4979 # propagate decorators from origin to method, and apply api decorator
4980 wrapped = api.guess(api.propagate(origin, method))
4981 wrapped.origin = origin
4982 setattr(cls, name, wrapped)
4984 def _revert_method(self, name):
4985 """ Revert the original method of `self` called `name`.
4986 See :meth:`~._patch_method`.
4989 method = getattr(cls, name)
4990 setattr(cls, name, method.origin)
4995 # An instance represents an ordered collection of records in a given
4996 # execution environment. The instance object refers to the environment, and
4997 # the records themselves are represented by their cache dictionary. The 'id'
4998 # of each record is found in its corresponding cache dictionary.
5000 # This design has the following advantages:
5001 # - cache access is direct and thus fast;
5002 # - one can consider records without an 'id' (see new records);
5003 # - the global cache is only an index to "resolve" a record 'id'.
5007 def _browse(cls, env, ids):
5008 """ Create an instance attached to `env`; `ids` is a tuple of record
5011 records = object.__new__(cls)
5014 env.prefetch[cls._name].update(ids)
5018 def browse(self, arg=None):
5019 """ Return an instance corresponding to `arg` and attached to
5020 `self.env`; `arg` is either a record id, or a collection of record ids.
5022 ids = _normalize_ids(arg)
5023 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5024 return self._browse(self.env, ids)
5027 def browse(self, cr, uid, arg=None, context=None):
5028 ids = _normalize_ids(arg)
5029 #assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
5030 return self._browse(Environment(cr, uid, context or {}), ids)
5033 # Internal properties, for manipulating the instance's implementation
5038 """ Return the list of non-false record ids of this instance. """
5039 return filter(None, list(self._ids))
5041 # backward-compatibility with former browse records
5042 _cr = property(lambda self: self.env.cr)
5043 _uid = property(lambda self: self.env.uid)
5044 _context = property(lambda self: self.env.context)
5047 # Conversion methods
5050 def ensure_one(self):
5051 """ Return `self` if it is a singleton instance, otherwise raise an
5056 raise except_orm("ValueError", "Expected singleton: %s" % self)
5058 def with_env(self, env):
5059 """ Return an instance equivalent to `self` attached to `env`.
5061 return self._browse(env, self._ids)
5063 def sudo(self, user=SUPERUSER_ID):
5064 """ Return an instance equivalent to `self` attached to an environment
5065 based on `self.env` with the given `user`.
5067 return self.with_env(self.env(user=user))
5069 def with_context(self, *args, **kwargs):
5070 """ Return an instance equivalent to `self` attached to an environment
5071 based on `self.env` with another context. The context is given by
5072 `self._context` or the positional argument if given, and modified by
5075 context = dict(args[0] if args else self._context, **kwargs)
5076 return self.with_env(self.env(context=context))
5078 def _convert_to_cache(self, values):
5079 """ Convert the `values` dictionary into cached values. """
5080 fields = self._fields
5082 name: fields[name].convert_to_cache(value, self.env)
5083 for name, value in values.iteritems()
5087 def _convert_to_write(self, values):
5088 """ Convert the `values` dictionary into the format of :meth:`write`. """
5089 fields = self._fields
5091 (name, fields[name].convert_to_write(value))
5092 for name, value in values.iteritems()
5093 if name in self._fields
5097 # Record traversal and update
5100 def _mapped_func(self, func):
5101 """ Apply function `func` on all records in `self`, and return the
5102 result as a list or a recordset (if `func` return recordsets).
5104 vals = [func(rec) for rec in self]
5105 val0 = vals[0] if vals else func(self)
5106 if isinstance(val0, BaseModel):
5107 return reduce(operator.or_, vals, val0)
5110 def mapped(self, func):
5111 """ Apply `func` on all records in `self`, and return the result as a
5112 list or a recordset (if `func` return recordsets). In the latter
5113 case, the order of the returned recordset is arbritrary.
5115 :param func: a function or a dot-separated sequence of field names
5117 if isinstance(func, basestring):
5119 for name in func.split('.'):
5120 recs = recs._mapped_func(operator.itemgetter(name))
5123 return self._mapped_func(func)
5125 def _mapped_cache(self, name_seq):
5126 """ Same as `~.mapped`, but `name_seq` is a dot-separated sequence of
5127 field names, and only cached values are used.
5130 for name in name_seq.split('.'):
5131 field = recs._fields[name]
5132 null = field.null(self.env)
5133 recs = recs.mapped(lambda rec: rec._cache.get(field, null))
5136 def filtered(self, func):
5137 """ Select the records in `self` such that `func(rec)` is true, and
5138 return them as a recordset.
5140 :param func: a function or a dot-separated sequence of field names
5142 if isinstance(func, basestring):
5144 func = lambda rec: filter(None, rec.mapped(name))
5145 return self.browse([rec.id for rec in self if func(rec)])
5147 def sorted(self, key=None):
5148 """ Return the recordset `self` ordered by `key` """
5150 return self.search([('id', 'in', self.ids)])
5152 return self.browse(map(int, sorted(self, key=key)))
5154 def update(self, values):
5155 """ Update record `self[0]` with `values`. """
5156 for name, value in values.iteritems():
5160 # New records - represent records that do not exist in the database yet;
5161 # they are used to compute default values and perform onchanges.
5165 def new(self, values={}):
5166 """ Return a new record instance attached to `self.env`, and
5167 initialized with the `values` dictionary. Such a record does not
5168 exist in the database.
5170 record = self.browse([NewId()])
5171 record._cache.update(self._convert_to_cache(values))
5173 if record.env.in_onchange:
5174 # The cache update does not set inverse fields, so do it manually.
5175 # This is useful for computing a function field on secondary
5176 # records, if that field depends on the main record.
5178 field = self._fields.get(name)
5179 if field and field.inverse_field:
5180 field.inverse_field._update(record[name], record)
5185 # Dirty flag, to mark records modified (in draft mode)
5190 """ Return whether any record in `self` is dirty. """
5191 dirty = self.env.dirty
5192 return any(record in dirty for record in self)
5195 def _dirty(self, value):
5196 """ Mark the records in `self` as dirty. """
5198 map(self.env.dirty.add, self)
5200 map(self.env.dirty.discard, self)
5206 def __nonzero__(self):
5207 """ Test whether `self` is nonempty. """
5208 return bool(getattr(self, '_ids', True))
5211 """ Return the size of `self`. """
5212 return len(self._ids)
5215 """ Return an iterator over `self`. """
5216 for id in self._ids:
5217 yield self._browse(self.env, (id,))
5219 def __contains__(self, item):
5220 """ Test whether `item` is a subset of `self` or a field name. """
5221 if isinstance(item, BaseModel):
5222 if self._name == item._name:
5223 return set(item._ids) <= set(self._ids)
5224 raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
5225 if isinstance(item, basestring):
5226 return item in self._fields
5227 return item in self.ids
5229 def __add__(self, other):
5230 """ Return the concatenation of two recordsets. """
5231 if not isinstance(other, BaseModel) or self._name != other._name:
5232 raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
5233 return self.browse(self._ids + other._ids)
5235 def __sub__(self, other):
5236 """ Return the recordset of all the records in `self` that are not in `other`. """
5237 if not isinstance(other, BaseModel) or self._name != other._name:
5238 raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
5239 other_ids = set(other._ids)
5240 return self.browse([id for id in self._ids if id not in other_ids])
5242 def __and__(self, other):
5243 """ Return the intersection of two recordsets.
5244 Note that recordset order is not preserved.
5246 if not isinstance(other, BaseModel) or self._name != other._name:
5247 raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
5248 return self.browse(set(self._ids) & set(other._ids))
5250 def __or__(self, other):
5251 """ Return the union of two recordsets.
5252 Note that recordset order is not preserved.
5254 if not isinstance(other, BaseModel) or self._name != other._name:
5255 raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
5256 return self.browse(set(self._ids) | set(other._ids))
5258 def __eq__(self, other):
5259 """ Test whether two recordsets are equivalent (up to reordering). """
5260 if not isinstance(other, BaseModel):
5262 _logger.warning("Comparing apples and oranges: %s == %s", self, other)
5264 return self._name == other._name and set(self._ids) == set(other._ids)
5266 def __ne__(self, other):
5267 return not self == other
5269 def __lt__(self, other):
5270 if not isinstance(other, BaseModel) or self._name != other._name:
5271 raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
5272 return set(self._ids) < set(other._ids)
5274 def __le__(self, other):
5275 if not isinstance(other, BaseModel) or self._name != other._name:
5276 raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
5277 return set(self._ids) <= set(other._ids)
5279 def __gt__(self, other):
5280 if not isinstance(other, BaseModel) or self._name != other._name:
5281 raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
5282 return set(self._ids) > set(other._ids)
5284 def __ge__(self, other):
5285 if not isinstance(other, BaseModel) or self._name != other._name:
5286 raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
5287 return set(self._ids) >= set(other._ids)
5293 return "%s%s" % (self._name, getattr(self, '_ids', ""))
5295 def __unicode__(self):
5296 return unicode(str(self))
5301 if hasattr(self, '_ids'):
5302 return hash((self._name, frozenset(self._ids)))
5304 return hash(self._name)
5306 def __getitem__(self, key):
5307 """ If `key` is an integer or a slice, return the corresponding record
5308 selection as an instance (attached to `self.env`).
5309 Otherwise read the field `key` of the first record in `self`.
5313 inst = model.search(dom) # inst is a recordset
5314 r4 = inst[3] # fourth record in inst
5315 rs = inst[10:20] # subset of inst
5316 nm = rs['name'] # name of first record in inst
5318 if isinstance(key, basestring):
5319 # important: one must call the field's getter
5320 return self._fields[key].__get__(self, type(self))
5321 elif isinstance(key, slice):
5322 return self._browse(self.env, self._ids[key])
5324 return self._browse(self.env, (self._ids[key],))
5326 def __setitem__(self, key, value):
5327 """ Assign the field `key` to `value` in record `self`. """
5328 # important: one must call the field's setter
5329 return self._fields[key].__set__(self, value)
5332 # Cache and recomputation management
5337 """ Return the cache of `self`, mapping field names to values. """
5338 return RecordCache(self)
5341 def _in_cache_without(self, field):
5342 """ Make sure `self` is present in cache (for prefetching), and return
5343 the records of model `self` in cache that have no value for `field`
5344 (:class:`Field` instance).
5347 prefetch_ids = env.prefetch[self._name]
5348 prefetch_ids.update(self._ids)
5349 ids = filter(None, prefetch_ids - set(env.cache[field]))
5350 return self.browse(ids)
5354 """ Clear the records cache.
5357 The record cache is automatically invalidated.
5359 self.invalidate_cache()
5362 def invalidate_cache(self, fnames=None, ids=None):
5363 """ Invalidate the record caches after some records have been modified.
5364 If both `fnames` and `ids` are ``None``, the whole cache is cleared.
5366 :param fnames: the list of modified fields, or ``None`` for all fields
5367 :param ids: the list of modified record ids, or ``None`` for all
5371 return self.env.invalidate_all()
5372 fields = self._fields.values()
5374 fields = map(self._fields.__getitem__, fnames)
5376 # invalidate fields and inverse fields, too
5377 spec = [(f, ids) for f in fields] + \
5378 [(f.inverse_field, None) for f in fields if f.inverse_field]
5379 self.env.invalidate(spec)
5382 def modified(self, fnames):
5383 """ Notify that fields have been modified on `self`. This invalidates
5384 the cache, and prepares the recomputation of stored function fields
5385 (new-style fields only).
5387 :param fnames: iterable of field names that have been modified on
5390 # each field knows what to invalidate and recompute
5392 for fname in fnames:
5393 spec += self._fields[fname].modified(self)
5397 for env in self.env.all
5398 for field in env.cache
5400 # invalidate non-stored fields.function which are currently cached
5401 spec += [(f, None) for f in self.pool.pure_function_fields
5402 if f in cached_fields]
5404 self.env.invalidate(spec)
5406 def _recompute_check(self, field):
5407 """ If `field` must be recomputed on some record in `self`, return the
5408 corresponding records that must be recomputed.
5410 for env in [self.env] + list(iter(self.env.all)):
5411 if env.todo.get(field) and env.todo[field] & self:
5412 return env.todo[field]
5414 def _recompute_todo(self, field):
5415 """ Mark `field` to be recomputed. """
5416 todo = self.env.todo
5417 todo[field] = (todo.get(field) or self.browse()) | self
5419 def _recompute_done(self, field):
5420 """ Mark `field` as being recomputed. """
5421 todo = self.env.todo
5423 recs = todo.pop(field) - self
5428 def recompute(self):
5429 """ Recompute stored function fields. The fields and records to
5430 recompute have been determined by method :meth:`modified`.
5432 for env in list(iter(self.env.all)):
5434 field, recs = next(env.todo.iteritems())
5435 # evaluate the fields to recompute, and save them to database
5436 for rec, rec1 in zip(recs, recs.with_context(recompute=False)):
5438 values = rec._convert_to_write({
5439 f.name: rec[f.name] for f in field.computed_fields
5442 except MissingError:
5444 # mark the computed fields as done
5445 map(recs._recompute_done, field.computed_fields)
5448 # Generic onchange method
5451 def _has_onchange(self, field, other_fields):
5452 """ Return whether `field` should trigger an onchange event in the
5453 presence of `other_fields`.
5455 # test whether self has an onchange method for field, or field is a
5456 # dependency of any field in other_fields
5457 return field.name in self._onchange_methods or \
5458 any(dep in other_fields for dep in field.dependents)
5461 def _onchange_spec(self, view_info=None):
5462 """ Return the onchange spec from a view description; if not given, the
5463 result of ``self.fields_view_get()`` is used.
5467 # for traversing the XML arch and populating result
5468 def process(node, info, prefix):
5469 if node.tag == 'field':
5470 name = node.attrib['name']
5471 names = "%s.%s" % (prefix, name) if prefix else name
5472 if not result.get(names):
5473 result[names] = node.attrib.get('on_change')
5474 # traverse the subviews included in relational fields
5475 for subinfo in info['fields'][name].get('views', {}).itervalues():
5476 process(etree.fromstring(subinfo['arch']), subinfo, names)
5479 process(child, info, prefix)
5481 if view_info is None:
5482 view_info = self.fields_view_get()
5483 process(etree.fromstring(view_info['arch']), view_info, '')
5486 def _onchange_eval(self, field_name, onchange, result):
5487 """ Apply onchange method(s) for field `field_name` with spec `onchange`
5488 on record `self`. Value assignments are applied on `self`, while
5489 domain and warning messages are put in dictionary `result`.
5491 onchange = onchange.strip()
5494 if onchange in ("1", "true"):
5495 for method in self._onchange_methods.get(field_name, ()):
5496 method_res = method(self)
5499 if 'domain' in method_res:
5500 result.setdefault('domain', {}).update(method_res['domain'])
5501 if 'warning' in method_res:
5502 result['warning'] = method_res['warning']
5506 match = onchange_v7.match(onchange)
5508 method, params = match.groups()
5510 # evaluate params -> tuple
5511 global_vars = {'context': self._context, 'uid': self._uid}
5512 if self._context.get('field_parent'):
5513 class RawRecord(object):
5514 def __init__(self, record):
5515 self._record = record
5516 def __getattr__(self, name):
5517 field = self._record._fields[name]
5518 value = self._record[name]
5519 return field.convert_to_onchange(value)
5520 record = self[self._context['field_parent']]
5521 global_vars['parent'] = RawRecord(record)
5523 key: self._fields[key].convert_to_onchange(val)
5524 for key, val in self._cache.iteritems()
5526 params = eval("[%s]" % params, global_vars, field_vars)
5528 # call onchange method
5529 args = (self._cr, self._uid, self._origin.ids) + tuple(params)
5530 method_res = getattr(self._model, method)(*args)
5531 if not isinstance(method_res, dict):
5533 if 'value' in method_res:
5534 method_res['value'].pop('id', None)
5535 self.update(self._convert_to_cache(method_res['value']))
5536 if 'domain' in method_res:
5537 result.setdefault('domain', {}).update(method_res['domain'])
5538 if 'warning' in method_res:
5539 result['warning'] = method_res['warning']
5542 def onchange(self, values, field_name, field_onchange):
5543 """ Perform an onchange on the given field.
5545 :param values: dictionary mapping field names to values, giving the
5546 current state of modification
5547 :param field_name: name of the modified field_name
5548 :param field_onchange: dictionary mapping field names to their
5553 if field_name and field_name not in self._fields:
5556 # determine subfields for field.convert_to_write() below
5558 subfields = defaultdict(set)
5559 for dotname in field_onchange:
5561 secondary.append(dotname)
5562 name, subname = dotname.split('.')
5563 subfields[name].add(subname)
5565 # create a new record with values, and attach `self` to it
5566 with env.do_in_onchange():
5567 record = self.new(values)
5568 values = dict(record._cache)
5569 # attach `self` with a different context (for cache consistency)
5570 record._origin = self.with_context(__onchange=True)
5572 # determine which field should be triggered an onchange
5573 todo = set([field_name]) if field_name else set(values)
5576 # dummy assignment: trigger invalidations on the record
5578 record[name] = record[name]
5580 result = {'value': {}}
5588 with env.do_in_onchange():
5589 # apply field-specific onchange methods
5590 if field_onchange.get(name):
5591 record._onchange_eval(name, field_onchange[name], result)
5593 # force re-evaluation of function fields on secondary records
5594 for field_seq in secondary:
5595 record.mapped(field_seq)
5597 # determine which fields have been modified
5598 for name, oldval in values.iteritems():
5599 newval = record[name]
5600 if newval != oldval or getattr(newval, '_dirty', False):
5601 field = self._fields[name]
5602 result['value'][name] = field.convert_to_write(
5603 newval, record._origin, subfields[name],
5607 # At the moment, the client does not support updates on a *2many field
5608 # while this one is modified by the user.
5609 if field_name and self._fields[field_name].type in ('one2many', 'many2many'):
5610 result['value'].pop(field_name, None)
5615 class RecordCache(MutableMapping):
5616 """ Implements a proxy dictionary to read/update the cache of a record.
5617 Upon iteration, it looks like a dictionary mapping field names to
5618 values. However, fields may be used as keys as well.
5620 def __init__(self, records):
5621 self._recs = records
5623 def __contains__(self, field):
5624 """ Return whether `records[0]` has a value for `field` in cache. """
5625 if isinstance(field, basestring):
5626 field = self._recs._fields[field]
5627 return self._recs.id in self._recs.env.cache[field]
5629 def __getitem__(self, field):
5630 """ Return the cached value of `field` for `records[0]`. """
5631 if isinstance(field, basestring):
5632 field = self._recs._fields[field]
5633 value = self._recs.env.cache[field][self._recs.id]
5634 return value.get() if isinstance(value, SpecialValue) else value
5636 def __setitem__(self, field, value):
5637 """ Assign the cached value of `field` for all records in `records`. """
5638 if isinstance(field, basestring):
5639 field = self._recs._fields[field]
5640 values = dict.fromkeys(self._recs._ids, value)
5641 self._recs.env.cache[field].update(values)
5643 def update(self, *args, **kwargs):
5644 """ Update the cache of all records in `records`. If the argument is a
5645 `SpecialValue`, update all fields (except "magic" columns).
5647 if args and isinstance(args[0], SpecialValue):
5648 values = dict.fromkeys(self._recs._ids, args[0])
5649 for name, field in self._recs._fields.iteritems():
5650 if name not in MAGIC_COLUMNS:
5651 self._recs.env.cache[field].update(values)
5653 return super(RecordCache, self).update(*args, **kwargs)
5655 def __delitem__(self, field):
5656 """ Remove the cached value of `field` for all `records`. """
5657 if isinstance(field, basestring):
5658 field = self._recs._fields[field]
5659 field_cache = self._recs.env.cache[field]
5660 for id in self._recs._ids:
5661 field_cache.pop(id, None)
5664 """ Iterate over the field names with a regular value in cache. """
5665 cache, id = self._recs.env.cache, self._recs.id
5666 dummy = SpecialValue(None)
5667 for name, field in self._recs._fields.iteritems():
5668 if name not in MAGIC_COLUMNS and \
5669 not isinstance(cache[field].get(id, dummy), SpecialValue):
5673 """ Return the number of fields with a regular value in cache. """
5674 return sum(1 for name in self)
5676 class Model(BaseModel):
5677 """Main super-class for regular database-persisted OpenERP models.
5679 OpenERP models are created by inheriting from this class::
5684 The system will later instantiate the class once per database (on
5685 which the class' module is installed).
5688 _register = False # not visible in ORM registry, meant to be python-inherited only
5689 _transient = False # True in a TransientModel
5691 class TransientModel(BaseModel):
5692 """Model super-class for transient records, meant to be temporarily
5693 persisted, and regularly vaccuum-cleaned.
5695 A TransientModel has a simplified access rights management,
5696 all users can create new records, and may only access the
5697 records they created. The super-user has unrestricted access
5698 to all TransientModel records.
5701 _register = False # not visible in ORM registry, meant to be python-inherited only
5704 class AbstractModel(BaseModel):
5705 """Abstract Model super-class for creating an abstract class meant to be
5706 inherited by regular models (Models or TransientModels) but not meant to
5707 be usable on its own, or persisted.
5709 Technical note: we don't want to make AbstractModel the super-class of
5710 Model or BaseModel because it would not make sense to put the main
5711 definition of persistence methods such as create() in it, and still we
5712 should be able to override them within an AbstractModel.
5714 _auto = False # don't create any database backend for AbstractModels
5715 _register = False # not visible in ORM registry, meant to be python-inherited only
5718 def itemgetter_tuple(items):
5719 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5720 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5725 return lambda gettable: (gettable[items[0]],)
5726 return operator.itemgetter(*items)
5728 def convert_pgerror_23502(model, fields, info, e):
5729 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5730 r'not-null constraint\n',
5732 field_name = m and m.group('field')
5733 if not m or field_name not in fields:
5734 return {'message': unicode(e)}
5735 message = _(u"Missing required value for the field '%s'.") % field_name
5736 field = fields.get(field_name)
5738 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5741 'field': field_name,
5744 def convert_pgerror_23505(model, fields, info, e):
5745 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5747 field_name = m and m.group('field')
5748 if not m or field_name not in fields:
5749 return {'message': unicode(e)}
5750 message = _(u"The value for the field '%s' already exists.") % field_name
5751 field = fields.get(field_name)
5753 message = _(u"%s This might be '%s' in the current model, or a field "
5754 u"of the same name in an o2m.") % (message, field['string'])
5757 'field': field_name,
5760 PGERROR_TO_OE = defaultdict(
5761 # shape of mapped converters
5762 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5763 # not_null_violation
5764 '23502': convert_pgerror_23502,
5765 # unique constraint error
5766 '23505': convert_pgerror_23505,
5769 def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
5770 """ Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
5772 Various implementations were tested on the corpus of all browse() calls
5773 performed during a full crawler run (after having installed all website_*
5774 modules) and this one was the most efficient overall.
5776 A possible bit of correctness was sacrificed by not doing any test on
5777 Iterable and just assuming that any non-atomic type was an iterable of
5782 # much of the corpus is falsy objects (empty list, tuple or set, None)
5786 # `type in set` is significantly faster (because more restrictive) than
5787 # isinstance(arg, set) or issubclass(type, set); and for new-style classes
5788 # obj.__class__ is equivalent to but faster than type(obj). Not relevant
5789 # (and looks much worse) in most cases, but over millions of calls it
5790 # does have a very minor effect.
5791 if arg.__class__ in atoms:
5796 # keep those imports here to avoid dependency cycle errors
5797 from .osv import expression
5798 from .fields import Field, SpecialValue, FailedValue
5800 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: