1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object relational mapping to database (postgresql) module
25 * Hierarchical structure
26 * Constraints consistency, validations
27 * Object meta Data depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default fields value
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * 2 different inheritancies
36 - classicals (varchar, integer, boolean, ...)
37 - relations (one2many, many2one, many2many)
58 import dateutil.relativedelta
60 from lxml import etree
64 import openerp.tools as tools
65 from openerp.tools.config import config
66 from openerp.tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
67 from openerp.tools.safe_eval import safe_eval as eval
68 from openerp.tools.translate import _
69 from openerp import SUPERUSER_ID
70 from query import Query
72 _logger = logging.getLogger(__name__)
73 _schema = logging.getLogger(__name__ + '.schema')
75 # List of etree._Element subclasses that we choose to ignore when parsing XML.
76 from openerp.tools import SKIPPED_ELEMENT_TYPES
78 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
79 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
81 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def transfer_field_to_modifiers(field, modifiers):
86 for attr in ('invisible', 'readonly', 'required'):
87 state_exceptions[attr] = []
88 default_values[attr] = bool(field.get(attr))
89 for state, modifs in (field.get("states",{})).items():
91 if default_values[modif[0]] != modif[1]:
92 state_exceptions[modif[0]].append(state)
94 for attr, default_value in default_values.items():
95 if state_exceptions[attr]:
96 modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
98 modifiers[attr] = default_value
101 # Don't deal with groups, it is done by check_group().
102 # Need the context to evaluate the invisible attribute on tree views.
103 # For non-tree views, the context shouldn't be given.
104 def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
105 if node.get('attrs'):
106 modifiers.update(eval(node.get('attrs')))
108 if node.get('states'):
109 if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
110 # TODO combine with AND or OR, use implicit AND for now.
111 modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
113 modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
115 for a in ('invisible', 'readonly', 'required'):
117 v = bool(eval(node.get(a), {'context': context or {}}))
118 if in_tree_view and a == 'invisible':
119 # Invisible in a tree view has a specific meaning, make it a
120 # new key in the modifiers attribute.
121 modifiers['tree_invisible'] = v
122 elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
123 # Don't set the attribute to False if a dynamic value was
124 # provided (i.e. a domain from attrs or states).
128 def simplify_modifiers(modifiers):
129 for a in ('invisible', 'readonly', 'required'):
130 if a in modifiers and not modifiers[a]:
134 def transfer_modifiers_to_node(modifiers, node):
136 simplify_modifiers(modifiers)
137 node.set('modifiers', simplejson.dumps(modifiers))
139 def setup_modifiers(node, field=None, context=None, in_tree_view=False):
140 """ Processes node attributes and field descriptors to generate
141 the ``modifiers`` node attribute and set it on the provided node.
143 Alters its first argument in-place.
145 :param node: ``field`` node from an OpenERP view
146 :type node: lxml.etree._Element
147 :param dict field: field descriptor corresponding to the provided node
148 :param dict context: execution context used to evaluate node attributes
149 :param bool in_tree_view: triggers the ``tree_invisible`` code
150 path (separate from ``invisible``): in
151 tree view there are two levels of
152 invisibility, cell content (a column is
153 present but the cell itself is not
154 displayed) with ``invisible`` and column
155 invisibility (the whole column is
156 hidden) with ``tree_invisible``.
160 if field is not None:
161 transfer_field_to_modifiers(field, modifiers)
162 transfer_node_to_modifiers(
163 node, modifiers, context=context, in_tree_view=in_tree_view)
164 transfer_modifiers_to_node(modifiers, node)
166 def test_modifiers(what, expected):
168 if isinstance(what, basestring):
169 node = etree.fromstring(what)
170 transfer_node_to_modifiers(node, modifiers)
171 simplify_modifiers(modifiers)
172 json = simplejson.dumps(modifiers)
173 assert json == expected, "%s != %s" % (json, expected)
174 elif isinstance(what, dict):
175 transfer_field_to_modifiers(what, modifiers)
176 simplify_modifiers(modifiers)
177 json = simplejson.dumps(modifiers)
178 assert json == expected, "%s != %s" % (json, expected)
183 # openerp.osv.orm.modifiers_tests()
184 def modifiers_tests():
185 test_modifiers('<field name="a"/>', '{}')
186 test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
187 test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
188 test_modifiers('<field name="a" required="1"/>', '{"required": true}')
189 test_modifiers('<field name="a" invisible="0"/>', '{}')
190 test_modifiers('<field name="a" readonly="0"/>', '{}')
191 test_modifiers('<field name="a" required="0"/>', '{}')
192 test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
193 test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
194 test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
195 test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
197 # The dictionary is supposed to be the result of fields_get().
198 test_modifiers({}, '{}')
199 test_modifiers({"invisible": True}, '{"invisible": true}')
200 test_modifiers({"invisible": False}, '{}')
203 def check_object_name(name):
204 """ Check if the given name is a valid openerp object name.
206 The _name attribute in osv and osv_memory object is subject to
207 some restrictions. This function returns True or False whether
208 the given name is allowed or not.
210 TODO: this is an approximation. The goal in this approximation
211 is to disallow uppercase characters (in some places, we quote
212 table/column names and in other not, which leads to this kind
215 psycopg2.ProgrammingError: relation "xxx" does not exist).
217 The same restriction should apply to both osv and osv_memory
218 objects for consistency.
221 if regex_object_name.match(name) is None:
225 def raise_on_invalid_object_name(name):
226 if not check_object_name(name):
227 msg = "The _name attribute %s is not valid." % name
229 raise except_orm('ValueError', msg)
231 POSTGRES_CONFDELTYPES = {
239 def intersect(la, lb):
240 return filter(lambda x: x in lb, la)
242 def fix_import_export_id_paths(fieldname):
244 Fixes the id fields in import and exports, and splits field paths
247 :param str fieldname: name of the field to import/export
248 :return: split field name
251 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
252 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
253 return fixed_external_id.split('/')
255 class except_orm(Exception):
256 def __init__(self, name, value):
259 self.args = (name, value)
261 class BrowseRecordError(Exception):
264 class browse_null(object):
265 """ Readonly python database object browser
271 def __getitem__(self, name):
274 def __getattr__(self, name):
275 return None # XXX: return self ?
283 def __nonzero__(self):
286 def __unicode__(self):
290 raise NotImplementedError("Iteration is not allowed on %s" % self)
294 # TODO: execute an object method on browse_record_list
296 class browse_record_list(list):
297 """ Collection of browse objects
299 Such an instance will be returned when doing a ``browse([ids..])``
300 and will be iterable, yielding browse() objects
303 def __init__(self, lst, context=None):
306 super(browse_record_list, self).__init__(lst)
307 self.context = context
310 class browse_record(object):
311 """ An object that behaves like a row of an object's table.
312 It has attributes after the columns of the corresponding object.
316 uobj = pool.get('res.users')
317 user_rec = uobj.browse(cr, uid, 104)
321 def __init__(self, cr, uid, id, table, cache, context=None,
322 list_class=browse_record_list, fields_process=None):
324 :param table: the browsed object (inherited from orm)
325 :param dict cache: a dictionary of model->field->data to be shared
326 across browse objects, thus reducing the SQL
327 read()s. It can speed up things a lot, but also be
328 disastrous if not discarded after write()/unlink()
330 :param dict context: dictionary with an optional context
332 if fields_process is None:
336 self._list_class = list_class
340 self._table = table # deprecated, use _model!
342 self._table_name = self._table._name
343 self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name)
344 self._context = context
345 self._fields_process = fields_process
347 cache.setdefault(table._name, {})
348 self._data = cache[table._name]
350 # if not (id and isinstance(id, (int, long,))):
351 # raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
352 # if not table.exists(cr, uid, id, context):
353 # raise BrowseRecordError(_('Object %s does not exists') % (self,))
355 if id not in self._data:
356 self._data[id] = {'id': id}
360 def __getitem__(self, name):
364 if name not in self._data[self._id]:
365 # build the list of fields we will fetch
367 # fetch the definition of the field which was asked for
368 if name in self._table._columns:
369 col = self._table._columns[name]
370 elif name in self._table._inherit_fields:
371 col = self._table._inherit_fields[name][2]
372 elif hasattr(self._table, str(name)):
373 attr = getattr(self._table, name)
374 if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)):
375 def function_proxy(*args, **kwargs):
376 if 'context' not in kwargs and self._context:
377 kwargs.update(context=self._context)
378 return attr(self._cr, self._uid, [self._id], *args, **kwargs)
379 return function_proxy
383 error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
384 self.__logger.warning(error_msg)
385 if self.__logger.isEnabledFor(logging.DEBUG):
386 self.__logger.debug(''.join(traceback.format_stack()))
387 raise KeyError(error_msg)
389 prefetchable = lambda f: f._classic_write and f._prefetch and not f.groups and not f.deprecated
391 # if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
392 if prefetchable(col):
393 # gen the list of "local" (ie not inherited) fields which are classic or many2one
394 field_filter = lambda x: prefetchable(x[1])
395 fields_to_fetch = filter(field_filter, self._table._columns.items())
396 # gen the list of inherited fields
397 inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
398 # complete the field list with the inherited fields which are classic or many2one
399 fields_to_fetch += filter(field_filter, inherits)
400 # otherwise we fetch only that field
402 fields_to_fetch = [(name, col)]
404 ids = filter(lambda id: name not in self._data[id], self._data.keys())
406 field_names = map(lambda x: x[0], fields_to_fetch)
408 field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
409 except (openerp.exceptions.AccessError, except_orm):
412 # prefetching attempt failed, perhaps we're violating ACL restrictions involuntarily
413 _logger.info('Prefetching attempt for fields %s on %s failed for ids %s, re-trying just for id %s', field_names, self._model._name, ids, self._id)
415 field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
417 # TODO: improve this, very slow for reports
418 if self._fields_process:
419 lang = self._context.get('lang', 'en_US') or 'en_US'
420 lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid, [('code', '=', lang)])
422 raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,))
423 lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid, lang_obj_ids[0])
425 for field_name, field_column in fields_to_fetch:
426 if field_column._type in self._fields_process:
427 for result_line in field_values:
428 result_line[field_name] = self._fields_process[field_column._type](result_line[field_name])
429 if result_line[field_name]:
430 result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj)
433 # Where did those ids come from? Perhaps old entries in ir_model_dat?
434 _logger.warning("No field_values found for ids %s in %s", ids, self)
435 raise KeyError('Field %s not found in %s'%(name, self))
436 # create browse records for 'remote' objects
437 for result_line in field_values:
439 for field_name, field_column in fields_to_fetch:
440 if field_column._type == 'many2one':
441 if result_line[field_name]:
442 obj = self._table.pool[field_column._obj]
443 if isinstance(result_line[field_name], (list, tuple)):
444 value = result_line[field_name][0]
446 value = result_line[field_name]
448 # FIXME: this happen when a _inherits object
449 # overwrite a field of it parent. Need
450 # testing to be sure we got the right
451 # object and not the parent one.
452 if not isinstance(value, browse_record):
454 # In some cases the target model is not available yet, so we must ignore it,
455 # which is safe in most cases, this value will just be loaded later when needed.
456 # This situation can be caused by custom fields that connect objects with m2o without
457 # respecting module dependencies, causing relationships to be connected to soon when
458 # the target is not loaded yet.
460 new_data[field_name] = browse_record(self._cr,
461 self._uid, value, obj, self._cache,
462 context=self._context,
463 list_class=self._list_class,
464 fields_process=self._fields_process)
466 new_data[field_name] = value
468 new_data[field_name] = browse_null()
470 new_data[field_name] = browse_null()
471 elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
472 new_data[field_name] = self._list_class(
473 (browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj),
474 self._cache, context=self._context, list_class=self._list_class,
475 fields_process=self._fields_process)
476 for id in result_line[field_name]),
477 context=self._context)
478 elif field_column._type == 'reference':
479 if result_line[field_name]:
480 if isinstance(result_line[field_name], browse_record):
481 new_data[field_name] = result_line[field_name]
483 ref_obj, ref_id = result_line[field_name].split(',')
484 ref_id = long(ref_id)
486 obj = self._table.pool[ref_obj]
487 new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
489 new_data[field_name] = browse_null()
491 new_data[field_name] = browse_null()
493 new_data[field_name] = result_line[field_name]
494 self._data[result_line['id']].update(new_data)
496 if not name in self._data[self._id]:
497 # How did this happen? Could be a missing model due to custom fields used too soon, see above.
498 self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values)
499 self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table)
500 raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
501 return self._data[self._id][name]
503 def __getattr__(self, name):
508 exc_info = sys.exc_info()
509 raise AttributeError, "Got %r while trying to get attribute %s on a %s record." % (e, name, self._table._name), exc_info[2]
511 def __contains__(self, name):
512 return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
515 raise NotImplementedError("Iteration is not allowed on %s" % self)
517 def __hasattr__(self, name):
524 return "browse_record(%s, %s)" % (self._table_name, self._id)
526 def __eq__(self, other):
527 if not isinstance(other, browse_record):
529 return (self._table_name, self._id) == (other._table_name, other._id)
531 def __ne__(self, other):
532 if not isinstance(other, browse_record):
534 return (self._table_name, self._id) != (other._table_name, other._id)
536 # we need to define __unicode__ even though we've already defined __str__
537 # because we have overridden __getattr__
538 def __unicode__(self):
539 return unicode(str(self))
542 return hash((self._table_name, self._id))
547 """Force refreshing this browse_record's data and all the data of the
548 records that belong to the same cache, by emptying the cache completely,
549 preserving only the record identifiers (for prefetching optimizations).
551 for model, model_cache in self._cache.iteritems():
552 # only preserve the ids of the records that were in the cache
553 cached_ids = dict([(i, {'id': i}) for i in model_cache.keys()])
554 self._cache[model].clear()
555 self._cache[model].update(cached_ids)
557 def pg_varchar(size=0):
558 """ Returns the VARCHAR declaration for the provided size:
560 * If no size (or an empty or negative size is provided) return an
562 * Otherwise return a VARCHAR(n)
564 :type int size: varchar size, optional
568 if not isinstance(size, int):
569 raise TypeError("VARCHAR parameter should be an int, got %s"
572 return 'VARCHAR(%d)' % size
575 FIELDS_TO_PGTYPES = {
576 fields.boolean: 'bool',
577 fields.integer: 'int4',
581 fields.datetime: 'timestamp',
582 fields.binary: 'bytea',
583 fields.many2one: 'int4',
584 fields.serialized: 'text',
587 def get_pg_type(f, type_override=None):
589 :param fields._column f: field to get a Postgres type for
590 :param type type_override: use the provided type for dispatching instead of the field's own type
591 :returns: (postgres_identification_type, postgres_type_specification)
594 field_type = type_override or type(f)
596 if field_type in FIELDS_TO_PGTYPES:
597 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
598 elif issubclass(field_type, fields.float):
600 pg_type = ('numeric', 'NUMERIC')
602 pg_type = ('float8', 'DOUBLE PRECISION')
603 elif issubclass(field_type, (fields.char, fields.reference)):
604 pg_type = ('varchar', pg_varchar(f.size))
605 elif issubclass(field_type, fields.selection):
606 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
607 or getattr(f, 'size', None) == -1:
608 pg_type = ('int4', 'INTEGER')
610 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
611 elif issubclass(field_type, fields.function):
612 if f._type == 'selection':
613 pg_type = ('varchar', pg_varchar())
615 pg_type = get_pg_type(f, getattr(fields, f._type))
617 _logger.warning('%s type not supported!', field_type)
623 class MetaModel(type):
624 """ Metaclass for the Model.
626 This class is used as the metaclass for the Model class to discover
627 the models defined in a module (i.e. without instanciating them).
628 If the automatic discovery is not needed, it is possible to set the
629 model's _register attribute to False.
633 module_to_models = {}
635 def __init__(self, name, bases, attrs):
636 if not self._register:
637 self._register = True
638 super(MetaModel, self).__init__(name, bases, attrs)
641 # The (OpenERP) module name can be in the `openerp.addons` namespace
642 # or not. For instance module `sale` can be imported as
643 # `openerp.addons.sale` (the good way) or `sale` (for backward
645 module_parts = self.__module__.split('.')
646 if len(module_parts) > 2 and module_parts[0] == 'openerp' and \
647 module_parts[1] == 'addons':
648 module_name = self.__module__.split('.')[2]
650 module_name = self.__module__.split('.')[0]
651 if not hasattr(self, '_module'):
652 self._module = module_name
654 # Remember which models to instanciate for this module.
656 self.module_to_models.setdefault(self._module, []).append(self)
659 # Definition of log access columns, automatically added to models if
660 # self._log_access is True
661 LOG_ACCESS_COLUMNS = {
662 'create_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
663 'create_date': 'TIMESTAMP',
664 'write_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
665 'write_date': 'TIMESTAMP'
667 # special columns automatically created by the ORM
668 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS.keys()
670 class BaseModel(object):
671 """ Base class for OpenERP models.
673 OpenERP models are created by inheriting from this class' subclasses:
675 * Model: for regular database-persisted models
676 * TransientModel: for temporary data, stored in the database but automatically
677 vaccuumed every so often
678 * AbstractModel: for abstract super classes meant to be shared by multiple
679 _inheriting classes (usually Models or TransientModels)
681 The system will later instantiate the class once per database (on
682 which the class' module is installed).
684 To create a class that should not be instantiated, the _register class attribute
687 __metaclass__ = MetaModel
688 _auto = True # create database backend
689 _register = False # Set to false if the model shouldn't be automatically discovered.
696 _parent_name = 'parent_id'
697 _parent_store = False
698 _parent_order = False
705 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
706 # to include in the _read_group, if grouped on this field
710 _transient = False # True in a TransientModel
713 # { 'parent_model': 'm2o_field', ... }
716 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
717 # model from which it is inherits'd, r is the (local) field towards m, f
718 # is the _column object itself, and n is the original (i.e. top-most)
721 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
722 # field_column_obj, origina_parent_model), ... }
725 # Mapping field name/column_info object
726 # This is similar to _inherit_fields but:
727 # 1. includes self fields,
728 # 2. uses column_info instead of a triple.
734 _sql_constraints = []
735 _protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
737 CONCURRENCY_CHECK_FIELD = '__last_update'
739 def log(self, cr, uid, id, message, secondary=False, context=None):
740 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
742 def view_init(self, cr, uid, fields_list, context=None):
743 """Override this method to do specific things when a view on the object is opened."""
746 def _field_create(self, cr, context=None):
747 """ Create entries in ir_model_fields for all the model's fields.
749 If necessary, also create an entry in ir_model, and if called from the
750 modules loading scheme (by receiving 'module' in the context), also
751 create entries in ir_model_data (for the model and the fields).
753 - create an entry in ir_model (if there is not already one),
754 - create an entry in ir_model_data (if there is not already one, and if
755 'module' is in the context),
756 - update ir_model_fields with the fields found in _columns
757 (TODO there is some redundancy as _columns is updated from
758 ir_model_fields in __init__).
763 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
765 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
766 model_id = cr.fetchone()[0]
767 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
769 model_id = cr.fetchone()[0]
770 if 'module' in context:
771 name_id = 'model_'+self._name.replace('.', '_')
772 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
774 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
775 (name_id, context['module'], 'ir.model', model_id)
778 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
780 for rec in cr.dictfetchall():
781 cols[rec['name']] = rec
783 ir_model_fields_obj = self.pool.get('ir.model.fields')
785 # sparse field should be created at the end, as it depends on its serialized field already existing
786 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
787 for (k, f) in model_fields:
789 'model_id': model_id,
792 'field_description': f.string,
794 'relation': f._obj or '',
795 'select_level': tools.ustr(f.select or 0),
796 'readonly': (f.readonly and 1) or 0,
797 'required': (f.required and 1) or 0,
798 'selectable': (f.selectable and 1) or 0,
799 'translate': (f.translate and 1) or 0,
800 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
801 'serialization_field_id': None,
803 if getattr(f, 'serialization_field', None):
804 # resolve link to serialization_field if specified by name
805 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
806 if not serialization_field_id:
807 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
808 vals['serialization_field_id'] = serialization_field_id[0]
810 # When its a custom field,it does not contain f.select
811 if context.get('field_state', 'base') == 'manual':
812 if context.get('field_name', '') == k:
813 vals['select_level'] = context.get('select', '0')
814 #setting value to let the problem NOT occur next time
816 vals['select_level'] = cols[k]['select_level']
819 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
820 id = cr.fetchone()[0]
822 cr.execute("""INSERT INTO ir_model_fields (
823 id, model_id, model, name, field_description, ttype,
824 relation,state,select_level,relation_field, translate, serialization_field_id
826 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
828 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
829 vals['relation'], 'base',
830 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
832 if 'module' in context:
833 name1 = 'field_' + self._table + '_' + k
834 cr.execute("select name from ir_model_data where name=%s", (name1,))
836 name1 = name1 + "_" + str(id)
837 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
838 (name1, context['module'], 'ir.model.fields', id)
841 for key, val in vals.items():
842 if cols[k][key] != vals[key]:
843 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
844 cr.execute("""UPDATE ir_model_fields SET
845 model_id=%s, field_description=%s, ttype=%s, relation=%s,
846 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
848 model=%s AND name=%s""", (
849 vals['model_id'], vals['field_description'], vals['ttype'],
851 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
856 # Goal: try to apply inheritance at the instanciation level and
857 # put objects in the pool var
860 def create_instance(cls, pool, cr):
861 """ Instanciate a given model.
863 This class method instanciates the class of some model (i.e. a class
864 deriving from osv or osv_memory). The class might be the class passed
865 in argument or, if it inherits from another class, a class constructed
866 by combining the two classes.
868 The ``attributes`` argument specifies which parent class attributes
871 TODO: the creation of the combined class is repeated at each call of
872 this method. This is probably unnecessary.
875 attributes = ['_columns', '_defaults', '_inherits', '_constraints',
878 parent_names = getattr(cls, '_inherit', None)
880 if isinstance(parent_names, (str, unicode)):
881 name = cls._name or parent_names
882 parent_names = [parent_names]
886 raise TypeError('_name is mandatory in case of multiple inheritance')
888 for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
889 if parent_name not in pool:
890 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
891 'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
892 parent_model = pool[parent_name]
893 if not getattr(cls, '_original_module', None) and name == parent_model._name:
894 cls._original_module = parent_model._original_module
895 parent_class = parent_model.__class__
898 new = copy.copy(getattr(parent_model, s, {}))
900 # Don't _inherit custom fields.
904 if hasattr(new, 'update'):
905 new.update(cls.__dict__.get(s, {}))
906 elif s=='_constraints':
907 for c in cls.__dict__.get(s, []):
909 for c2 in range(len(new)):
910 #For _constraints, we should check field and methods as well
911 if new[c2][2]==c[2] and (new[c2][0] == c[0] \
912 or getattr(new[c2][0],'__name__', True) == \
913 getattr(c[0],'__name__', False)):
914 # If new class defines a constraint with
915 # same function name, we let it override
924 new.extend(cls.__dict__.get(s, []))
927 # Keep links to non-inherited constraints, e.g. useful when exporting translations
928 nattr['_local_constraints'] = cls.__dict__.get('_constraints', [])
929 nattr['_local_sql_constraints'] = cls.__dict__.get('_sql_constraints', [])
931 cls = type(name, (cls, parent_class), dict(nattr, _register=False))
933 cls._local_constraints = getattr(cls, '_constraints', [])
934 cls._local_sql_constraints = getattr(cls, '_sql_constraints', [])
936 if not getattr(cls, '_original_module', None):
937 cls._original_module = cls._module
938 obj = object.__new__(cls)
940 if hasattr(obj, '_columns'):
941 # float fields are registry-dependent (digit attribute). Duplicate them to avoid issues.
942 for c, f in obj._columns.items():
943 if f._type == 'float':
944 obj._columns[c] = copy.copy(f)
946 obj.__init__(pool, cr)
950 """Register this model.
952 This doesn't create an instance but simply register the model
953 as being part of the module where it is defined.
958 # Set the module name (e.g. base, sale, accounting, ...) on the class.
959 module = cls.__module__.split('.')[0]
960 if not hasattr(cls, '_module'):
963 # Record this class in the list of models to instantiate for this module,
964 # managed by the metaclass.
965 module_model_list = MetaModel.module_to_models.setdefault(cls._module, [])
966 if cls not in module_model_list:
968 module_model_list.append(cls)
970 # Since we don't return an instance here, the __init__
971 # method won't be called.
974 def __init__(self, pool, cr):
975 """ Initialize a model and make it part of the given registry.
977 - copy the stored fields' functions in the osv_pool,
978 - update the _columns with the fields found in ir_model_fields,
979 - ensure there is a many2one for each _inherits'd parent,
980 - update the children's _columns,
981 - give a chance to each field to initialize itself.
984 pool.add(self._name, self)
987 if not self._name and not hasattr(self, '_inherit'):
988 name = type(self).__name__.split('.')[0]
989 msg = "The class %s has to have a _name attribute" % name
992 raise except_orm('ValueError', msg)
994 if not self._description:
995 self._description = self._name
997 self._table = self._name.replace('.', '_')
999 if not hasattr(self, '_log_access'):
1000 # If _log_access is not specified, it is the same value as _auto.
1001 self._log_access = getattr(self, "_auto", True)
1003 self._columns = self._columns.copy()
1004 for store_field in self._columns:
1005 f = self._columns[store_field]
1006 if hasattr(f, 'digits_change'):
1008 def not_this_field(stored_func):
1009 x, y, z, e, f, l = stored_func
1010 return x != self._name or y != store_field
1011 self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, []))
1012 if not isinstance(f, fields.function):
1018 sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, f.priority, None)}
1019 for object, aa in sm.items():
1021 (fnct, fields2, order, length) = aa
1023 (fnct, fields2, order) = aa
1026 raise except_orm('Error',
1027 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
1028 self.pool._store_function.setdefault(object, [])
1029 t = (self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)
1030 if not t in self.pool._store_function[object]:
1031 self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
1032 self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
1034 for (key, _, msg) in self._sql_constraints:
1035 self.pool._sql_error[self._table+'_'+key] = msg
1037 # Load manual fields
1039 # Check the query is already done for all modules of if we need to
1041 if self.pool.fields_by_model is not None:
1042 manual_fields = self.pool.fields_by_model.get(self._name, [])
1044 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual'))
1045 manual_fields = cr.dictfetchall()
1046 for field in manual_fields:
1047 if field['name'] in self._columns:
1050 'string': field['field_description'],
1051 'required': bool(field['required']),
1052 'readonly': bool(field['readonly']),
1053 'domain': eval(field['domain']) if field['domain'] else None,
1054 'size': field['size'] or None,
1055 'ondelete': field['on_delete'],
1056 'translate': (field['translate']),
1059 #'select': int(field['select_level'])
1062 if field['serialization_field_id']:
1063 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
1064 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
1065 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
1066 attrs.update({'relation': field['relation']})
1067 self._columns[field['name']] = fields.sparse(**attrs)
1068 elif field['ttype'] == 'selection':
1069 self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
1070 elif field['ttype'] == 'reference':
1071 self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
1072 elif field['ttype'] == 'many2one':
1073 self._columns[field['name']] = fields.many2one(field['relation'], **attrs)
1074 elif field['ttype'] == 'one2many':
1075 self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
1076 elif field['ttype'] == 'many2many':
1077 _rel1 = field['relation'].replace('.', '_')
1078 _rel2 = field['model'].replace('.', '_')
1079 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
1080 self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
1082 self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
1084 self._inherits_check()
1085 self._inherits_reload()
1086 if not self._sequence:
1087 self._sequence = self._table + '_id_seq'
1088 for k in self._defaults:
1089 assert (k in self._columns) or (k in self._inherit_fields), 'Default function defined in %s but field %s does not exist !' % (self._name, k,)
1090 for f in self._columns:
1091 self._columns[f].restart()
1094 if self.is_transient():
1095 self._transient_check_count = 0
1096 self._transient_max_count = config.get('osv_memory_count_limit')
1097 self._transient_max_hours = config.get('osv_memory_age_limit')
1098 assert self._log_access, "TransientModels must have log_access turned on, "\
1099 "in order to implement their access rights policy"
1102 if self._rec_name is not None:
1103 assert self._rec_name in self._all_columns.keys() + ['id'], "Invalid rec_name %s for model %s" % (self._rec_name, self._name)
1105 self._rec_name = 'name'
1108 def __export_row(self, cr, uid, row, fields, raw_data=False, context=None):
1112 def check_type(field_type):
1113 if field_type == 'float':
1115 elif field_type == 'integer':
1117 elif field_type == 'boolean':
1121 def selection_field(in_field):
1122 col_obj = self.pool[in_field.keys()[0]]
1123 if f[i] in col_obj._columns.keys():
1124 return col_obj._columns[f[i]]
1125 elif f[i] in col_obj._inherits.keys():
1126 selection_field(col_obj._inherits)
1130 def _get_xml_id(self, cr, uid, r):
1131 model_data = self.pool.get('ir.model.data')
1132 data_ids = model_data.search(cr, uid, [('model', '=', r._model._name), ('res_id', '=', r['id'])])
1134 d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
1136 r = '%s.%s' % (d['module'], d['name'])
1142 n = r._model._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
1143 if not model_data.search(cr, uid, [('name', '=', n)]):
1146 model_data.create(cr, SUPERUSER_ID, {
1148 'model': r._model._name,
1150 'module': '__export__',
1156 data = map(lambda x: '', range(len(fields)))
1158 for fpos in range(len(fields)):
1168 r = _get_xml_id(self, cr, uid, r)
1171 # To display external name of selection field when its exported
1172 if f[i] in self._columns.keys():
1173 cols = self._columns[f[i]]
1174 elif f[i] in self._inherit_fields.keys():
1175 cols = selection_field(self._inherits)
1176 if cols and cols._type == 'selection':
1177 sel_list = cols.selection
1178 if r and type(sel_list) == type([]):
1179 r = [x[1] for x in sel_list if r==x[0]]
1180 r = r and r[0] or False
1182 if f[i] in self._columns:
1183 r = check_type(self._columns[f[i]]._type)
1184 elif f[i] in self._inherit_fields:
1185 r = check_type(self._inherit_fields[f[i]][2]._type)
1186 data[fpos] = r or False
1188 if isinstance(r, (browse_record_list, list)):
1190 fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \
1193 if [x for x in fields2 if x]:
1195 done.append(fields2)
1196 if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'):
1197 data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r])
1201 lines2 = row2._model.__export_row(cr, uid, row2, fields2, context=context)
1203 for fpos2 in range(len(fields)):
1204 if lines2 and lines2[0][fpos2]:
1205 data[fpos2] = lines2[0][fpos2]
1209 name_relation = self.pool[rr._table_name]._rec_name
1210 if isinstance(rr[name_relation], browse_record):
1211 rr = rr[name_relation]
1212 rr_name = self.pool[rr._table_name].name_get(cr, uid, [rr.id], context=context)
1213 rr_name = rr_name and rr_name[0] and rr_name[0][1] or ''
1214 dt += tools.ustr(rr_name or '') + ','
1215 data[fpos] = dt[:-1]
1225 if isinstance(r, browse_record):
1226 r = self.pool[r._table_name].name_get(cr, uid, [r.id], context=context)
1227 r = r and r[0] and r[0][1] or ''
1228 if raw_data and cols and cols._type in ('integer', 'boolean', 'float'):
1230 elif raw_data and cols and cols._type == 'date':
1231 data[fpos] = datetime.datetime.strptime(r, tools.DEFAULT_SERVER_DATE_FORMAT).date()
1232 elif raw_data and cols and cols._type == 'datetime':
1233 data[fpos] = datetime.datetime.strptime(r, tools.DEFAULT_SERVER_DATETIME_FORMAT)
1235 data[fpos] = tools.ustr(r or '')
1236 return [data] + lines
1238 def export_data(self, cr, uid, ids, fields_to_export, raw_data=False, context=None):
1240 Export fields for selected objects
1242 :param cr: database cursor
1243 :param uid: current user id
1244 :param ids: list of ids
1245 :param fields_to_export: list of fields
1246 :param raw_data: True to return value in fields type, False for string values
1247 :param context: context arguments, like lang, time zone
1248 :rtype: dictionary with a *datas* matrix
1250 This method is used when exporting data via client menu
1255 cols = self._columns.copy()
1256 for f in self._inherit_fields:
1257 cols.update({f: self._inherit_fields[f][2]})
1258 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
1260 for row in self.browse(cr, uid, ids, context):
1261 datas += self.__export_row(cr, uid, row, fields_to_export, raw_data=raw_data, context=context)
1262 return {'datas': datas}
1264 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
1267 Use :meth:`~load` instead
1269 Import given data in given module
1271 This method is used when importing data via client menu.
1273 Example of fields to import for a sale.order::
1276 partner_id, (=name_search)
1277 order_line/.id, (=database_id)
1279 order_line/product_id/id, (=xml id)
1280 order_line/price_unit,
1281 order_line/product_uom_qty,
1282 order_line/product_uom/id (=xml_id)
1284 This method returns a 4-tuple with the following structure::
1286 (return_code, errored_resource, error_message, unused)
1288 * The first item is a return code, it is ``-1`` in case of
1289 import error, or the last imported row number in case of success
1290 * The second item contains the record data dict that failed to import
1291 in case of error, otherwise it's 0
1292 * The third item contains an error message string in case of error,
1294 * The last item is currently unused, with no specific semantics
1296 :param fields: list of fields to import
1297 :param datas: data to import
1298 :param mode: 'init' or 'update' for record creation
1299 :param current_module: module name
1300 :param noupdate: flag for record creation
1301 :param filename: optional file to store partial import state for recovery
1302 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1303 :rtype: (int, dict or 0, str or 0, str or 0)
1305 context = dict(context) if context is not None else {}
1306 context['_import_current_module'] = current_module
1308 fields = map(fix_import_export_id_paths, fields)
1309 ir_model_data_obj = self.pool.get('ir.model.data')
1312 if m['type'] == 'error':
1313 raise Exception(m['message'])
1315 if config.get('import_partial') and filename:
1316 with open(config.get('import_partial'), 'rb') as partial_import_file:
1317 data = pickle.load(partial_import_file)
1318 position = data.get(filename, 0)
1322 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1323 self._extract_records(cr, uid, fields, datas,
1324 context=context, log=log),
1325 context=context, log=log):
1326 ir_model_data_obj._update(cr, uid, self._name,
1327 current_module, res, mode=mode, xml_id=xml_id,
1328 noupdate=noupdate, res_id=res_id, context=context)
1329 position = info.get('rows', {}).get('to', 0) + 1
1330 if config.get('import_partial') and filename and (not (position%100)):
1331 with open(config.get('import_partial'), 'rb') as partial_import:
1332 data = pickle.load(partial_import)
1333 data[filename] = position
1334 with open(config.get('import_partial'), 'wb') as partial_import:
1335 pickle.dump(data, partial_import)
1336 if context.get('defer_parent_store_computation'):
1337 self._parent_store_compute(cr)
1339 except Exception, e:
1341 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1343 if context.get('defer_parent_store_computation'):
1344 self._parent_store_compute(cr)
1345 return position, 0, 0, 0
1347 def load(self, cr, uid, fields, data, context=None):
1349 Attempts to load the data matrix, and returns a list of ids (or
1350 ``False`` if there was an error and no id could be generated) and a
1353 The ids are those of the records created and saved (in database), in
1354 the same order they were extracted from the file. They can be passed
1355 directly to :meth:`~read`
1357 :param fields: list of fields to import, at the same index as the corresponding data
1358 :type fields: list(str)
1359 :param data: row-major matrix of data to import
1360 :type data: list(list(str))
1361 :param dict context:
1362 :returns: {ids: list(int)|False, messages: [Message]}
1364 cr.execute('SAVEPOINT model_load')
1367 fields = map(fix_import_export_id_paths, fields)
1368 ModelData = self.pool['ir.model.data'].clear_caches()
1370 fg = self.fields_get(cr, uid, context=context)
1377 for id, xid, record, info in self._convert_records(cr, uid,
1378 self._extract_records(cr, uid, fields, data,
1379 context=context, log=messages.append),
1380 context=context, log=messages.append):
1382 cr.execute('SAVEPOINT model_load_save')
1383 except psycopg2.InternalError, e:
1384 # broken transaction, exit and hope the source error was
1386 if not any(message['type'] == 'error' for message in messages):
1387 messages.append(dict(info, type='error',message=
1388 u"Unknown database error: '%s'" % e))
1391 ids.append(ModelData._update(cr, uid, self._name,
1392 current_module, record, mode=mode, xml_id=xid,
1393 noupdate=noupdate, res_id=id, context=context))
1394 cr.execute('RELEASE SAVEPOINT model_load_save')
1395 except psycopg2.Warning, e:
1396 messages.append(dict(info, type='warning', message=str(e)))
1397 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1398 except psycopg2.Error, e:
1399 messages.append(dict(
1401 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1402 # Failed to write, log to messages, rollback savepoint (to
1403 # avoid broken transaction) and keep going
1404 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1405 if any(message['type'] == 'error' for message in messages):
1406 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1408 return {'ids': ids, 'messages': messages}
1409 def _extract_records(self, cr, uid, fields_, data,
1410 context=None, log=lambda a: None):
1411 """ Generates record dicts from the data sequence.
1413 The result is a generator of dicts mapping field names to raw
1414 (unconverted, unvalidated) values.
1416 For relational fields, if sub-fields were provided the value will be
1417 a list of sub-records
1419 The following sub-fields may be set on the record (by key):
1420 * None is the name_get for the record (to use with name_create/name_search)
1421 * "id" is the External ID for the record
1422 * ".id" is the Database ID for the record
1424 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1425 # Fake columns to avoid special cases in extractor
1426 columns[None] = fields.char('rec_name')
1427 columns['id'] = fields.char('External ID')
1428 columns['.id'] = fields.integer('Database ID')
1430 # m2o fields can't be on multiple lines so exclude them from the
1431 # is_relational field rows filter, but special-case it later on to
1432 # be handled with relational fields (as it can have subfields)
1433 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1434 get_o2m_values = itemgetter_tuple(
1435 [index for index, field in enumerate(fields_)
1436 if columns[field[0]]._type == 'one2many'])
1437 get_nono2m_values = itemgetter_tuple(
1438 [index for index, field in enumerate(fields_)
1439 if columns[field[0]]._type != 'one2many'])
1440 # Checks if the provided row has any non-empty non-relational field
1441 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1442 return any(g(row)) and not any(f(row))
1446 if index >= len(data): return
1449 # copy non-relational fields to record dict
1450 record = dict((field[0], value)
1451 for field, value in itertools.izip(fields_, row)
1452 if not is_relational(field[0]))
1454 # Get all following rows which have relational values attached to
1455 # the current record (no non-relational values)
1456 record_span = itertools.takewhile(
1457 only_o2m_values, itertools.islice(data, index + 1, None))
1458 # stitch record row back on for relational fields
1459 record_span = list(itertools.chain([row], record_span))
1460 for relfield in set(
1461 field[0] for field in fields_
1462 if is_relational(field[0])):
1463 column = columns[relfield]
1464 # FIXME: how to not use _obj without relying on fields_get?
1465 Model = self.pool[column._obj]
1467 # get only cells for this sub-field, should be strictly
1468 # non-empty, field path [None] is for name_get column
1469 indices, subfields = zip(*((index, field[1:] or [None])
1470 for index, field in enumerate(fields_)
1471 if field[0] == relfield))
1473 # return all rows which have at least one value for the
1474 # subfields of relfield
1475 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1476 record[relfield] = [subrecord
1477 for subrecord, _subinfo in Model._extract_records(
1478 cr, uid, subfields, relfield_data,
1479 context=context, log=log)]
1481 yield record, {'rows': {
1483 'to': index + len(record_span) - 1
1485 index += len(record_span)
1486 def _convert_records(self, cr, uid, records,
1487 context=None, log=lambda a: None):
1488 """ Converts records from the source iterable (recursive dicts of
1489 strings) into forms which can be written to the database (via
1490 self.create or (ir.model.data)._update)
1492 :returns: a list of triplets of (id, xid, record)
1493 :rtype: list((int|None, str|None, dict))
1495 if context is None: context = {}
1496 Converter = self.pool['ir.fields.converter']
1497 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1498 Translation = self.pool['ir.translation']
1500 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1501 context.get('lang'))
1503 for f, column in columns.iteritems())
1505 convert = Converter.for_model(cr, uid, self, context=context)
1507 def _log(base, field, exception):
1508 type = 'warning' if isinstance(exception, Warning) else 'error'
1509 # logs the logical (not human-readable) field name for automated
1510 # processing of response, but injects human readable in message
1511 record = dict(base, type=type, field=field,
1512 message=unicode(exception.args[0]) % base)
1513 if len(exception.args) > 1 and exception.args[1]:
1514 record.update(exception.args[1])
1517 stream = CountingStream(records)
1518 for record, extras in stream:
1521 # name_get/name_create
1522 if None in record: pass
1529 dbid = int(record['.id'])
1531 # in case of overridden id column
1532 dbid = record['.id']
1533 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1536 record=stream.index,
1538 message=_(u"Unknown database identifier '%s'") % dbid))
1541 converted = convert(record, lambda field, err:\
1542 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1544 yield dbid, xid, converted, dict(extras, record=stream.index)
1546 def get_invalid_fields(self, cr, uid):
1547 return list(self._invalids)
1549 def _validate(self, cr, uid, ids, context=None):
1550 context = context or {}
1551 lng = context.get('lang')
1552 trans = self.pool.get('ir.translation')
1554 for constraint in self._constraints:
1555 fun, msg, fields = constraint
1557 # We don't pass around the context here: validation code
1558 # must always yield the same results.
1559 valid = fun(self, cr, uid, ids)
1561 except Exception, e:
1562 _logger.debug('Exception while validating constraint', exc_info=True)
1564 extra_error = tools.ustr(e)
1566 # Check presence of __call__ directly instead of using
1567 # callable() because it will be deprecated as of Python 3.0
1568 if hasattr(msg, '__call__'):
1569 tmp_msg = msg(self, cr, uid, ids, context=context)
1570 if isinstance(tmp_msg, tuple):
1571 tmp_msg, params = tmp_msg
1572 translated_msg = tmp_msg % params
1574 translated_msg = tmp_msg
1576 translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
1578 translated_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1580 _("The field(s) `%s` failed against a constraint: %s") % (', '.join(fields), translated_msg)
1582 self._invalids.update(fields)
1584 raise except_orm('ValidateError', '\n'.join(error_msgs))
1586 self._invalids.clear()
1588 def default_get(self, cr, uid, fields_list, context=None):
1590 Returns default values for the fields in fields_list.
1592 :param fields_list: list of fields to get the default values for (example ['field1', 'field2',])
1593 :type fields_list: list
1594 :param context: optional context dictionary - it may contains keys for specifying certain options
1595 like ``context_lang`` (language) or ``context_tz`` (timezone) to alter the results of the call.
1596 It may contain keys in the form ``default_XXX`` (where XXX is a field name), to set
1597 or override a default value for a field.
1598 A special ``bin_size`` boolean flag may also be passed in the context to request the
1599 value of all fields.binary columns to be returned as the size of the binary instead of its
1600 contents. This can also be selectively overriden by passing a field-specific flag
1601 in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
1602 Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
1603 :return: dictionary of the default values (set on the object model class, through user preferences, or in the context)
1605 # trigger view init hook
1606 self.view_init(cr, uid, fields_list, context)
1612 # get the default values for the inherited fields
1613 for t in self._inherits.keys():
1614 defaults.update(self.pool[t].default_get(cr, uid, fields_list, context))
1616 # get the default values defined in the object
1617 for f in fields_list:
1618 if f in self._defaults:
1619 if callable(self._defaults[f]):
1620 defaults[f] = self._defaults[f](self, cr, uid, context)
1622 defaults[f] = self._defaults[f]
1624 fld_def = ((f in self._columns) and self._columns[f]) \
1625 or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
1628 if isinstance(fld_def, fields.property):
1629 property_obj = self.pool.get('ir.property')
1630 prop_value = property_obj.get(cr, uid, f, self._name, context=context)
1632 if isinstance(prop_value, (browse_record, browse_null)):
1633 defaults[f] = prop_value.id
1635 defaults[f] = prop_value
1637 if f not in defaults:
1640 # get the default values set by the user and override the default
1641 # values defined in the object
1642 ir_values_obj = self.pool.get('ir.values')
1643 res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
1644 for id, field, field_value in res:
1645 if field in fields_list:
1646 fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
1647 if fld_def._type == 'many2one':
1648 obj = self.pool[fld_def._obj]
1649 if not obj.search(cr, uid, [('id', '=', field_value or False)]):
1651 if fld_def._type == 'many2many':
1652 obj = self.pool[fld_def._obj]
1654 for i in range(len(field_value or [])):
1655 if not obj.search(cr, uid, [('id', '=',
1658 field_value2.append(field_value[i])
1659 field_value = field_value2
1660 if fld_def._type == 'one2many':
1661 obj = self.pool[fld_def._obj]
1663 for i in range(len(field_value or [])):
1664 field_value2.append({})
1665 for field2 in field_value[i]:
1666 if field2 in obj._columns.keys() and obj._columns[field2]._type == 'many2one':
1667 obj2 = self.pool[obj._columns[field2]._obj]
1668 if not obj2.search(cr, uid,
1669 [('id', '=', field_value[i][field2])]):
1671 elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type == 'many2one':
1672 obj2 = self.pool[obj._inherit_fields[field2][2]._obj]
1673 if not obj2.search(cr, uid,
1674 [('id', '=', field_value[i][field2])]):
1676 # TODO add test for many2many and one2many
1677 field_value2[i][field2] = field_value[i][field2]
1678 field_value = field_value2
1679 defaults[field] = field_value
1681 # get the default values from the context
1682 for key in context or {}:
1683 if key.startswith('default_') and (key[8:] in fields_list):
1684 defaults[key[8:]] = context[key]
1687 def fields_get_keys(self, cr, user, context=None):
1688 res = self._columns.keys()
1689 # TODO I believe this loop can be replace by
1690 # res.extend(self._inherit_fields.key())
1691 for parent in self._inherits:
1692 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1695 def _rec_name_fallback(self, cr, uid, context=None):
1696 rec_name = self._rec_name
1697 if rec_name not in self._columns:
1698 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1702 # Overload this method if you need a window title which depends on the context
1704 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1707 def user_has_groups(self, cr, uid, groups, context=None):
1708 """Return true if the user is at least member of one of the groups
1709 in groups_str. Typically used to resolve ``groups`` attribute
1710 in view and model definitions.
1712 :param str groups: comma-separated list of fully-qualified group
1713 external IDs, e.g.: ``base.group_user,base.group_system``
1714 :return: True if the current user is a member of one of the
1717 return any([self.pool.get('res.users').has_group(cr, uid, group_ext_id)
1718 for group_ext_id in groups.split(',')])
1720 def _get_default_form_view(self, cr, user, context=None):
1721 """ Generates a default single-line form view using all fields
1722 of the current model except the m2m and o2m ones.
1724 :param cr: database cursor
1725 :param int user: user id
1726 :param dict context: connection context
1727 :returns: a form view as an lxml document
1728 :rtype: etree._Element
1730 view = etree.Element('form', string=self._description)
1731 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
1732 for field, descriptor in self.fields_get(cr, user, context=context).iteritems():
1733 if descriptor['type'] in ('one2many', 'many2many'):
1735 etree.SubElement(view, 'field', name=field)
1736 if descriptor['type'] == 'text':
1737 etree.SubElement(view, 'newline')
1740 def _get_default_search_view(self, cr, user, context=None):
1741 """ Generates a single-field search view, based on _rec_name.
1743 :param cr: database cursor
1744 :param int user: user id
1745 :param dict context: connection context
1746 :returns: a tree view as an lxml document
1747 :rtype: etree._Element
1749 view = etree.Element('search', string=self._description)
1750 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1753 def _get_default_tree_view(self, cr, user, context=None):
1754 """ Generates a single-field tree view, based on _rec_name.
1756 :param cr: database cursor
1757 :param int user: user id
1758 :param dict context: connection context
1759 :returns: a tree view as an lxml document
1760 :rtype: etree._Element
1762 view = etree.Element('tree', string=self._description)
1763 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1766 def _get_default_calendar_view(self, cr, user, context=None):
1767 """ Generates a default calendar view by trying to infer
1768 calendar fields from a number of pre-set attribute names
1770 :param cr: database cursor
1771 :param int user: user id
1772 :param dict context: connection context
1773 :returns: a calendar view
1774 :rtype: etree._Element
1776 def set_first_of(seq, in_, to):
1777 """Sets the first value of ``seq`` also found in ``in_`` to
1778 the ``to`` attribute of the view being closed over.
1780 Returns whether it's found a suitable value (and set it on
1781 the attribute) or not
1789 view = etree.Element('calendar', string=self._description)
1790 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1792 if self._date_name not in self._columns:
1794 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1795 if dt in self._columns:
1796 self._date_name = dt
1801 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1802 view.set('date_start', self._date_name)
1804 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1805 self._columns, 'color')
1807 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1808 self._columns, 'date_stop'):
1809 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1810 self._columns, 'date_delay'):
1812 _('Invalid Object Architecture!'),
1813 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1817 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1819 Get the detailed composition of the requested view like fields, model, view architecture
1821 :param view_id: id of the view or None
1822 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1823 :param toolbar: true to include contextual actions
1824 :param submenu: deprecated
1825 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1826 :raise AttributeError:
1827 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1828 * if some tag other than 'position' is found in parent view
1829 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1833 View = self.pool['ir.ui.view']
1836 'model': self._name,
1837 'field_parent': False,
1840 # try to find a view_id if none provided
1842 # <view_type>_view_ref in context can be used to overrride the default view
1843 view_ref_key = view_type + '_view_ref'
1844 view_ref = context.get(view_ref_key)
1847 module, view_ref = view_ref.split('.', 1)
1848 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1849 view_ref_res = cr.fetchone()
1851 view_id = view_ref_res[0]
1853 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1854 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1858 # otherwise try to find the lowest priority matching ir.ui.view
1859 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1861 # context for post-processing might be overriden
1864 # read the view with inherited views applied
1865 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1866 result['arch'] = root_view['arch']
1867 result['name'] = root_view['name']
1868 result['type'] = root_view['type']
1869 result['view_id'] = root_view['id']
1870 result['field_parent'] = root_view['field_parent']
1871 # override context fro postprocessing
1872 if root_view.get('model') != self._name:
1873 ctx = dict(context, base_model_name=root_view.get('model'))
1875 # fallback on default views methods if no ir.ui.view could be found
1877 get_func = getattr(self, '_get_default_%s_view' % view_type)
1878 arch_etree = get_func(cr, uid, context)
1879 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1880 result['type'] = view_type
1881 result['name'] = 'default'
1882 except AttributeError:
1883 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1885 # Apply post processing, groups and modifiers etc...
1886 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1887 result['arch'] = xarch
1888 result['fields'] = xfields
1890 # Add related action information if aksed
1892 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1898 ir_values_obj = self.pool.get('ir.values')
1899 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1900 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1901 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1902 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1903 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1904 #When multi="True" set it will display only in More of the list view
1905 resrelate = [clean(action) for action in resrelate
1906 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1908 for x in itertools.chain(resprint, resaction, resrelate):
1909 x['string'] = x['name']
1911 result['toolbar'] = {
1913 'action': resaction,
1918 def get_formview_id(self, cr, uid, id, context=None):
1919 """ Return an view id to open the document with. This method is meant to be
1920 overridden in addons that want to give specific view ids for example.
1922 :param int id: id of the document to open
1926 def get_formview_action(self, cr, uid, id, context=None):
1927 """ Return an action to open the document. This method is meant to be
1928 overridden in addons that want to give specific view ids for example.
1930 :param int id: id of the document to open
1932 view_id = self.get_formview_id(cr, uid, id, context=context)
1934 'type': 'ir.actions.act_window',
1935 'res_model': self._name,
1936 'view_type': 'form',
1937 'view_mode': 'form',
1938 'views': [(view_id, 'form')],
1939 'target': 'current',
1943 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1944 return self.pool['ir.ui.view'].postprocess_and_fields(
1945 cr, uid, self._name, node, view_id, context=context)
1947 def search_count(self, cr, user, args, context=None):
1948 res = self.search(cr, user, args, context=context, count=True)
1949 if isinstance(res, list):
1953 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1955 Search for records based on a search domain.
1957 :param cr: database cursor
1958 :param user: current user id
1959 :param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
1960 :param offset: optional number of results to skip in the returned values (default: 0)
1961 :param limit: optional max number of records to return (default: **None**)
1962 :param order: optional columns to sort by (default: self._order=id )
1963 :param context: optional context arguments, like lang, time zone
1964 :type context: dictionary
1965 :param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
1966 :return: id or list of ids of records matching the criteria
1967 :rtype: integer or list of integers
1968 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1970 **Expressing a search domain (args)**
1972 Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
1974 * **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
1975 * **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
1976 The semantics of most of these operators are obvious.
1977 The ``child_of`` operator will look for records who are children or grand-children of a given record,
1978 according to the semantics of this model (i.e following the relationship field named by
1979 ``self._parent_name``, by default ``parent_id``.
1980 * **value** must be a valid value to compare with the values of **field_name**, depending on its type.
1982 Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
1983 These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
1984 Be very careful about this when you combine them the first time.
1986 Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
1988 [('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
1990 The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
1992 (name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
1995 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1997 def name_get(self, cr, user, ids, context=None):
1998 """Returns the preferred display value (text representation) for the records with the
1999 given ``ids``. By default this will be the value of the ``name`` column, unless
2000 the model implements a custom behavior.
2001 Can sometimes be seen as the inverse function of :meth:`~.name_search`, but it is not
2005 :return: list of pairs ``(id,text_repr)`` for all records with the given ``ids``.
2009 if isinstance(ids, (int, long)):
2012 if self._rec_name in self._all_columns:
2013 rec_name_column = self._all_columns[self._rec_name].column
2014 return [(r['id'], rec_name_column.as_display_name(cr, user, self, r[self._rec_name], context=context))
2015 for r in self.read(cr, user, ids, [self._rec_name],
2016 load='_classic_write', context=context)]
2017 return [(id, "%s,%s" % (self._name, id)) for id in ids]
2019 def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
2020 """Search for records that have a display name matching the given ``name`` pattern if compared
2021 with the given ``operator``, while also matching the optional search domain (``args``).
2022 This is used for example to provide suggestions based on a partial value for a relational
2024 Sometimes be seen as the inverse function of :meth:`~.name_get`, but it is not
2027 This method is equivalent to calling :meth:`~.search` with a search domain based on ``name``
2028 and then :meth:`~.name_get` on the result of the search.
2030 :param list args: optional search domain (see :meth:`~.search` for syntax),
2031 specifying further restrictions
2032 :param str operator: domain operator for matching the ``name`` pattern, such as ``'like'``
2034 :param int limit: optional max number of records to return
2036 :return: list of pairs ``(id,text_repr)`` for all matching records.
2038 return self._name_search(cr, user, name, args, operator, context, limit)
2040 def name_create(self, cr, uid, name, context=None):
2041 """Creates a new record by calling :meth:`~.create` with only one
2042 value provided: the name of the new record (``_rec_name`` field).
2043 The new record will also be initialized with any default values applicable
2044 to this model, or provided through the context. The usual behavior of
2045 :meth:`~.create` applies.
2046 Similarly, this method may raise an exception if the model has multiple
2047 required fields and some do not have default values.
2049 :param name: name of the record to create
2052 :return: the :meth:`~.name_get` pair value for the newly-created record.
2054 rec_id = self.create(cr, uid, {self._rec_name: name}, context)
2055 return self.name_get(cr, uid, [rec_id], context)[0]
2057 # private implementation of name_search, allows passing a dedicated user for the name_get part to
2058 # solve some access rights issues
2059 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
2065 # optimize out the default criterion of ``ilike ''`` that matches everything
2066 if not (name == '' and operator == 'ilike'):
2067 args += [(self._rec_name, operator, name)]
2068 access_rights_uid = name_get_uid or user
2069 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
2070 res = self.name_get(cr, access_rights_uid, ids, context)
2073 def read_string(self, cr, uid, id, langs, fields=None, context=None):
2076 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
2078 fields = self._columns.keys() + self._inherit_fields.keys()
2079 #FIXME: collect all calls to _get_source into one SQL call.
2081 res[lang] = {'code': lang}
2083 if f in self._columns:
2084 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
2086 res[lang][f] = res_trans
2088 res[lang][f] = self._columns[f].string
2089 for table in self._inherits:
2090 cols = intersect(self._inherit_fields.keys(), fields)
2091 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
2094 res[lang]['code'] = lang
2095 for f in res2[lang]:
2096 res[lang][f] = res2[lang][f]
2099 def write_string(self, cr, uid, id, langs, vals, context=None):
2100 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
2101 #FIXME: try to only call the translation in one SQL
2104 if field in self._columns:
2105 src = self._columns[field].string
2106 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
2107 for table in self._inherits:
2108 cols = intersect(self._inherit_fields.keys(), vals)
2110 self.pool[table].write_string(cr, uid, id, langs, vals, context)
2113 def _add_missing_default_values(self, cr, uid, values, context=None):
2114 missing_defaults = []
2115 avoid_tables = [] # avoid overriding inherited values when parent is set
2116 for tables, parent_field in self._inherits.items():
2117 if parent_field in values:
2118 avoid_tables.append(tables)
2119 for field in self._columns.keys():
2120 if not field in values:
2121 missing_defaults.append(field)
2122 for field in self._inherit_fields.keys():
2123 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
2124 missing_defaults.append(field)
2126 if len(missing_defaults):
2127 # override defaults with the provided values, never allow the other way around
2128 defaults = self.default_get(cr, uid, missing_defaults, context)
2130 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
2131 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
2132 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
2133 defaults[dv] = [(6, 0, defaults[dv])]
2134 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
2135 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
2136 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
2137 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
2138 defaults.update(values)
2142 def clear_caches(self):
2143 """ Clear the caches
2145 This clears the caches associated to methods decorated with
2146 ``tools.ormcache`` or ``tools.ormcache_multi``.
2149 getattr(self, '_ormcache')
2151 self.pool._any_cache_cleared = True
2152 except AttributeError:
2156 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,
2157 read_group_result, read_group_order=None, context=None):
2158 """Helper method for filling in empty groups for all possible values of
2159 the field being grouped by"""
2161 # self._group_by_full should map groupable fields to a method that returns
2162 # a list of all aggregated values that we want to display for this field,
2163 # in the form of a m2o-like pair (key,label).
2164 # This is useful to implement kanban views for instance, where all columns
2165 # should be displayed even if they don't contain any record.
2167 # Grab the list of all groups that should be displayed, including all present groups
2168 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
2169 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
2170 read_group_order=read_group_order,
2171 access_rights_uid=openerp.SUPERUSER_ID,
2174 result_template = dict.fromkeys(aggregated_fields, False)
2175 result_template[groupby + '_count'] = 0
2176 if remaining_groupbys:
2177 result_template['__context'] = {'group_by': remaining_groupbys}
2179 # Merge the left_side (current results as dicts) with the right_side (all
2180 # possible values as m2o pairs). Both lists are supposed to be using the
2181 # same ordering, and can be merged in one pass.
2184 def append_left(left_side):
2185 grouped_value = left_side[groupby] and left_side[groupby][0]
2186 if not grouped_value in known_values:
2187 result.append(left_side)
2188 known_values[grouped_value] = left_side
2190 count_attr = groupby + '_count'
2191 known_values[grouped_value].update({count_attr: left_side[count_attr]})
2192 def append_right(right_side):
2193 grouped_value = right_side[0]
2194 if not grouped_value in known_values:
2195 line = dict(result_template)
2196 line[groupby] = right_side
2197 line['__domain'] = [(groupby,'=',grouped_value)] + domain
2199 known_values[grouped_value] = line
2200 while read_group_result or all_groups:
2201 left_side = read_group_result[0] if read_group_result else None
2202 right_side = all_groups[0] if all_groups else None
2203 assert left_side is None or left_side[groupby] is False \
2204 or isinstance(left_side[groupby], (tuple,list)), \
2205 'M2O-like pair expected, got %r' % left_side[groupby]
2206 assert right_side is None or isinstance(right_side, (tuple,list)), \
2207 'M2O-like pair expected, got %r' % right_side
2208 if left_side is None:
2209 append_right(all_groups.pop(0))
2210 elif right_side is None:
2211 append_left(read_group_result.pop(0))
2212 elif left_side[groupby] == right_side:
2213 append_left(read_group_result.pop(0))
2214 all_groups.pop(0) # discard right_side
2215 elif not left_side[groupby] or not left_side[groupby][0]:
2216 # left side == "Undefined" entry, not present on right_side
2217 append_left(read_group_result.pop(0))
2219 append_right(all_groups.pop(0))
2223 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
2226 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
2228 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
2229 to the query if order should be computed against m2o field.
2230 :param orderby: the orderby definition in the form "%(field)s %(order)s"
2231 :param aggregated_fields: list of aggregated fields in the query
2232 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
2233 These dictionaries contains the qualified name of each groupby
2234 (fully qualified SQL name for the corresponding field),
2235 and the (non raw) field name.
2236 :param osv.Query query: the query under construction
2237 :return: (groupby_terms, orderby_terms)
2240 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
2241 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
2243 return groupby_terms, orderby_terms
2245 self._check_qorder(orderby)
2246 for order_part in orderby.split(','):
2247 order_split = order_part.split()
2248 order_field = order_split[0]
2249 if order_field in groupby_fields:
2251 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
2252 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
2254 orderby_terms.append(order_clause)
2255 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
2257 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
2258 orderby_terms.append(order)
2259 elif order_field in aggregated_fields:
2260 orderby_terms.append(order_part)
2262 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
2263 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
2264 self._name, order_part)
2265 return groupby_terms, orderby_terms
2267 def _read_group_process_groupby(self, gb, query, context):
2269 Helper method to collect important information about groupbys: raw
2270 field name, type, time informations, qualified name, ...
2272 split = gb.split(':')
2273 field_type = self._all_columns[split[0]].column._type
2274 gb_function = split[1] if len(split) == 2 else None
2275 temporal = field_type in ('date', 'datetime')
2276 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
2277 qualified_field = self._inherits_join_calc(split[0], query)
2280 'day': 'dd MMM YYYY',
2281 'week': "'W'w YYYY",
2282 'month': 'MMMM YYYY',
2283 'quarter': 'QQQ YYYY',
2287 'day': dateutil.relativedelta.relativedelta(days=1),
2288 'week': datetime.timedelta(days=7),
2289 'month': dateutil.relativedelta.relativedelta(months=1),
2290 'quarter': dateutil.relativedelta.relativedelta(months=3),
2291 'year': dateutil.relativedelta.relativedelta(years=1)
2294 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
2295 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
2296 if field_type == 'boolean':
2297 qualified_field = "coalesce(%s,false)" % qualified_field
2302 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2303 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2304 'tz_convert': tz_convert,
2305 'qualified_field': qualified_field
2308 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2310 Helper method to sanitize the data received by read_group. The None
2311 values are converted to False, and the date/datetime are formatted,
2312 and corrected according to the timezones.
2314 value = False if value is None else value
2315 gb = groupby_dict.get(key)
2316 if gb and gb['type'] in ('date', 'datetime') and value:
2317 if isinstance(value, basestring):
2318 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2319 value = datetime.datetime.strptime(value, dt_format)
2320 if gb['tz_convert']:
2321 value = pytz.timezone(context['tz']).localize(value)
2324 def _read_group_get_domain(self, groupby, value):
2326 Helper method to construct the domain corresponding to a groupby and
2327 a given value. This is mostly relevant for date/datetime.
2329 if groupby['type'] in ('date', 'datetime') and value:
2330 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2331 domain_dt_begin = value
2332 domain_dt_end = value + groupby['interval']
2333 if groupby['tz_convert']:
2334 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2335 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2336 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2337 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2338 if groupby['type'] == 'many2one' and value:
2340 return [(groupby['field'], '=', value)]
2342 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2344 Helper method to format the data contained in the dictianary data by
2345 adding the domain corresponding to its values, the groupbys in the
2346 context and by properly formatting the date/datetime values.
2348 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2349 for k,v in data.iteritems():
2350 gb = groupby_dict.get(k)
2351 if gb and gb['type'] in ('date', 'datetime') and v:
2352 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2354 data['__domain'] = domain_group + domain
2355 if len(groupby) - len(annotated_groupbys) >= 1:
2356 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2360 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2362 Get the list of records in list view grouped by the given ``groupby`` fields
2364 :param cr: database cursor
2365 :param uid: current user id
2366 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2367 :param list fields: list of fields present in the list view specified on the object
2368 :param list groupby: list of groupby descriptions by which the records will be grouped.
2369 A groupby description is either a field (then it will be grouped by that field)
2370 or a string 'field:groupby_function'. Right now, the only functions supported
2371 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2372 date/datetime fields.
2373 :param int offset: optional number of records to skip
2374 :param int limit: optional max number of records to return
2375 :param dict context: context arguments, like lang, time zone.
2376 :param list orderby: optional ``order by`` specification, for
2377 overriding the natural sort ordering of the
2378 groups, see also :py:meth:`~osv.osv.osv.search`
2379 (supported only for many2one fields currently)
2380 :param bool lazy: if true, the results are only grouped by the first groupby and the
2381 remaining groupbys are put in the __context key. If false, all the groupbys are
2383 :return: list of dictionaries(one dictionary for each record) containing:
2385 * the values of fields grouped by the fields in ``groupby`` argument
2386 * __domain: list of tuples specifying the search criteria
2387 * __context: dictionary with argument like ``groupby``
2388 :rtype: [{'field_name_1': value, ...]
2389 :raise AccessError: * if user has no read rights on the requested object
2390 * if user tries to bypass access rules for read on the requested object
2394 self.check_access_rights(cr, uid, 'read')
2395 query = self._where_calc(cr, uid, domain, context=context)
2396 fields = fields or self._columns.keys()
2398 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2399 groupby_list = groupby[:1] if lazy else groupby
2400 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2401 for gb in groupby_list]
2402 groupby_fields = [g['field'] for g in annotated_groupbys]
2403 order = orderby or ','.join([g for g in groupby_list])
2404 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2406 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2407 for gb in groupby_fields:
2408 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2409 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2410 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2411 if not (gb in self._all_columns):
2412 # Don't allow arbitrary values, as this would be a SQL injection vector!
2413 raise except_orm(_('Invalid group_by'),
2414 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2416 aggregated_fields = [
2418 if f not in ('id', 'sequence')
2419 if f not in groupby_fields
2420 if self._all_columns[f].column._type in ('integer', 'float')
2421 if getattr(self._all_columns[f].column, '_classic_write')]
2423 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2424 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2426 for gb in annotated_groupbys:
2427 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2429 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2430 from_clause, where_clause, where_clause_params = query.get_sql()
2431 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2432 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2436 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2437 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2440 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count %(extra_fields)s
2448 'table': self._table,
2449 'count_field': count_field,
2450 'extra_fields': prefix_terms(',', select_terms),
2451 'from': from_clause,
2452 'where': prefix_term('WHERE', where_clause),
2453 'groupby': prefix_terms('GROUP BY', groupby_terms),
2454 'orderby': prefix_terms('ORDER BY', orderby_terms),
2455 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2456 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2458 cr.execute(query, where_clause_params)
2459 fetched_data = cr.dictfetchall()
2461 if not groupby_fields:
2464 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2466 data_ids = [r['id'] for r in fetched_data]
2467 many2onefields = list(set(many2onefields))
2468 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2469 for d in fetched_data:
2470 d.update(data_dict[d['id']])
2472 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2473 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2474 if lazy and groupby_fields[0] in self._group_by_full:
2475 # Right now, read_group only fill results in lazy mode (by default).
2476 # If you need to have the empty groups in 'eager' mode, then the
2477 # method _read_group_fill_results need to be completely reimplemented
2479 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2480 aggregated_fields, result, read_group_order=order,
2484 def _inherits_join_add(self, current_model, parent_model_name, query):
2486 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2487 :param current_model: current model object
2488 :param parent_model_name: name of the parent model for which the clauses should be added
2489 :param query: query object on which the JOIN should be added
2491 inherits_field = current_model._inherits[parent_model_name]
2492 parent_model = self.pool[parent_model_name]
2493 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2496 def _inherits_join_calc(self, field, query):
2498 Adds missing table select and join clause(s) to ``query`` for reaching
2499 the field coming from an '_inherits' parent table (no duplicates).
2501 :param field: name of inherited field to reach
2502 :param query: query object on which the JOIN should be added
2503 :return: qualified name of field, to be used in SELECT clause
2505 current_table = self
2506 parent_alias = '"%s"' % current_table._table
2507 while field in current_table._inherit_fields and not field in current_table._columns:
2508 parent_model_name = current_table._inherit_fields[field][0]
2509 parent_table = self.pool[parent_model_name]
2510 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2511 current_table = parent_table
2512 return '%s."%s"' % (parent_alias, field)
2514 def _parent_store_compute(self, cr):
2515 if not self._parent_store:
2517 _logger.info('Computing parent left and right for table %s...', self._table)
2518 def browse_rec(root, pos=0):
2520 where = self._parent_name+'='+str(root)
2522 where = self._parent_name+' IS NULL'
2523 if self._parent_order:
2524 where += ' order by '+self._parent_order
2525 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2527 for id in cr.fetchall():
2528 pos2 = browse_rec(id[0], pos2)
2529 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2531 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2532 if self._parent_order:
2533 query += ' order by ' + self._parent_order
2536 for (root,) in cr.fetchall():
2537 pos = browse_rec(root, pos)
2540 def _update_store(self, cr, f, k):
2541 _logger.info("storing computed values of fields.function '%s'", k)
2542 ss = self._columns[k]._symbol_set
2543 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2544 cr.execute('select id from '+self._table)
2545 ids_lst = map(lambda x: x[0], cr.fetchall())
2547 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2548 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2549 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2550 for key, val in res.items():
2553 # if val is a many2one, just write the ID
2554 if type(val) == tuple:
2556 if val is not False:
2557 cr.execute(update_query, (ss[1](val), key))
2559 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2560 """Raise except_orm if value is not among the valid values for the selection field"""
2561 if self._columns[field]._type == 'reference':
2562 val_model, val_id_str = value.split(',', 1)
2565 val_id = long(val_id_str)
2569 raise except_orm(_('ValidateError'),
2570 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2574 if isinstance(self._columns[field].selection, (tuple, list)):
2575 if val in dict(self._columns[field].selection):
2577 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2579 raise except_orm(_('ValidateError'),
2580 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field))
2582 def _check_removed_columns(self, cr, log=False):
2583 # iterate on the database columns to drop the NOT NULL constraints
2584 # of fields which were required but have been removed (or will be added by another module)
2585 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2586 columns += MAGIC_COLUMNS
2587 cr.execute("SELECT a.attname, a.attnotnull"
2588 " FROM pg_class c, pg_attribute a"
2589 " WHERE c.relname=%s"
2590 " AND c.oid=a.attrelid"
2591 " AND a.attisdropped=%s"
2592 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2593 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2595 for column in cr.dictfetchall():
2597 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2598 column['attname'], self._table, self._name)
2599 if column['attnotnull']:
2600 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2601 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2602 self._table, column['attname'])
2604 def _save_constraint(self, cr, constraint_name, type):
2606 Record the creation of a constraint for this model, to make it possible
2607 to delete it later when the module is uninstalled. Type can be either
2608 'f' or 'u' depending on the constraint being a foreign key or not.
2610 if not self._module:
2611 # no need to save constraints for custom models as they're not part
2614 assert type in ('f', 'u')
2616 SELECT 1 FROM ir_model_constraint, ir_module_module
2617 WHERE ir_model_constraint.module=ir_module_module.id
2618 AND ir_model_constraint.name=%s
2619 AND ir_module_module.name=%s
2620 """, (constraint_name, self._module))
2623 INSERT INTO ir_model_constraint
2624 (name, date_init, date_update, module, model, type)
2625 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2626 (SELECT id FROM ir_module_module WHERE name=%s),
2627 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2628 (constraint_name, self._module, self._name, type))
2630 def _save_relation_table(self, cr, relation_table):
2632 Record the creation of a many2many for this model, to make it possible
2633 to delete it later when the module is uninstalled.
2636 SELECT 1 FROM ir_model_relation, ir_module_module
2637 WHERE ir_model_relation.module=ir_module_module.id
2638 AND ir_model_relation.name=%s
2639 AND ir_module_module.name=%s
2640 """, (relation_table, self._module))
2642 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2643 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2644 (SELECT id FROM ir_module_module WHERE name=%s),
2645 (SELECT id FROM ir_model WHERE model=%s))""",
2646 (relation_table, self._module, self._name))
2648 # checked version: for direct m2o starting from `self`
2649 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2650 assert self.is_transient() or not dest_model.is_transient(), \
2651 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2652 if self.is_transient() and not dest_model.is_transient():
2653 # TransientModel relationships to regular Models are annoying
2654 # usually because they could block deletion due to the FKs.
2655 # So unless stated otherwise we default them to ondelete=cascade.
2656 ondelete = ondelete or 'cascade'
2657 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2658 self._foreign_keys.add(fk_def)
2659 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2661 # unchecked version: for custom cases, such as m2m relationships
2662 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2663 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2664 self._foreign_keys.add(fk_def)
2665 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2667 def _drop_constraint(self, cr, source_table, constraint_name):
2668 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2670 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2671 # Find FK constraint(s) currently established for the m2o field,
2672 # and see whether they are stale or not
2673 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2674 cl2.relname as foreign_table
2675 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2676 pg_attribute as att1, pg_attribute as att2
2677 WHERE con.conrelid = cl1.oid
2678 AND cl1.relname = %s
2679 AND con.confrelid = cl2.oid
2680 AND array_lower(con.conkey, 1) = 1
2681 AND con.conkey[1] = att1.attnum
2682 AND att1.attrelid = cl1.oid
2683 AND att1.attname = %s
2684 AND array_lower(con.confkey, 1) = 1
2685 AND con.confkey[1] = att2.attnum
2686 AND att2.attrelid = cl2.oid
2687 AND att2.attname = %s
2688 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2689 constraints = cr.dictfetchall()
2691 if len(constraints) == 1:
2692 # Is it the right constraint?
2694 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2695 or cons['foreign_table'] != dest_model._table:
2696 # Wrong FK: drop it and recreate
2697 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2698 source_table, cons['constraint_name'])
2699 self._drop_constraint(cr, source_table, cons['constraint_name'])
2701 # it's all good, nothing to do!
2704 # Multiple FKs found for the same field, drop them all, and re-create
2705 for cons in constraints:
2706 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2707 source_table, cons['constraint_name'])
2708 self._drop_constraint(cr, source_table, cons['constraint_name'])
2710 # (re-)create the FK
2711 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2715 def _auto_init(self, cr, context=None):
2718 Call _field_create and, unless _auto is False:
2720 - create the corresponding table in database for the model,
2721 - possibly add the parent columns in database,
2722 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2723 'write_date' in database if _log_access is True (the default),
2724 - report on database columns no more existing in _columns,
2725 - remove no more existing not null constraints,
2726 - alter existing database columns to match _columns,
2727 - create database tables to match _columns,
2728 - add database indices to match _columns,
2729 - save in self._foreign_keys a list a foreign keys to create (see
2733 self._foreign_keys = set()
2734 raise_on_invalid_object_name(self._name)
2737 store_compute = False
2739 update_custom_fields = context.get('update_custom_fields', False)
2740 self._field_create(cr, context=context)
2741 create = not self._table_exist(cr)
2745 self._create_table(cr)
2748 if self._parent_store:
2749 if not self._parent_columns_exist(cr):
2750 self._create_parent_columns(cr)
2751 store_compute = True
2753 # Create the create_uid, create_date, write_uid, write_date, columns if desired.
2754 if self._log_access:
2755 self._add_log_columns(cr)
2757 self._check_removed_columns(cr, log=False)
2759 # iterate on the "object columns"
2760 column_data = self._select_column_data(cr)
2762 for k, f in self._columns.iteritems():
2763 if k in MAGIC_COLUMNS:
2765 # Don't update custom (also called manual) fields
2766 if f.manual and not update_custom_fields:
2769 if isinstance(f, fields.one2many):
2770 self._o2m_raise_on_missing_reference(cr, f)
2772 elif isinstance(f, fields.many2many):
2773 self._m2m_raise_or_create_relation(cr, f)
2776 res = column_data.get(k)
2778 # The field is not found as-is in database, try if it
2779 # exists with an old name.
2780 if not res and hasattr(f, 'oldname'):
2781 res = column_data.get(f.oldname)
2783 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2785 column_data[k] = res
2786 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2787 self._table, f.oldname, k)
2789 # The field already exists in database. Possibly
2790 # change its type, rename it, drop it or change its
2793 f_pg_type = res['typname']
2794 f_pg_size = res['size']
2795 f_pg_notnull = res['attnotnull']
2796 if isinstance(f, fields.function) and not f.store and\
2797 not getattr(f, 'nodrop', False):
2798 _logger.info('column %s (%s) converted to a function, removed from table %s',
2799 k, f.string, self._table)
2800 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2802 _schema.debug("Table '%s': dropped column '%s' with cascade",
2806 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2811 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2812 ('varchar', 'text', 'TEXT', ''),
2813 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2814 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2815 ('timestamp', 'date', 'date', '::date'),
2816 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2817 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2819 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2821 with cr.savepoint():
2822 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2823 except psycopg2.NotSupportedError:
2824 # In place alter table cannot be done because a view is depending of this field.
2825 # Do a manual copy. This will drop the view (that will be recreated later)
2826 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2827 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2828 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2829 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2831 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2832 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2834 if (f_pg_type==c[0]) and (f._type==c[1]):
2835 if f_pg_type != f_obj_type:
2837 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2838 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2839 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2840 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2842 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2843 self._table, k, c[0], c[1])
2846 if f_pg_type != f_obj_type:
2850 newname = k + '_moved' + str(i)
2851 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2852 "WHERE c.relname=%s " \
2853 "AND a.attname=%s " \
2854 "AND c.oid=a.attrelid ", (self._table, newname))
2855 if not cr.fetchone()[0]:
2859 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2860 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2861 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2862 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2863 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2864 self._table, k, f_pg_type, f._type, newname)
2866 # if the field is required and hasn't got a NOT NULL constraint
2867 if f.required and f_pg_notnull == 0:
2868 # set the field to the default value if any
2869 if k in self._defaults:
2870 if callable(self._defaults[k]):
2871 default = self._defaults[k](self, cr, SUPERUSER_ID, context)
2873 default = self._defaults[k]
2875 if default is not None:
2876 ss = self._columns[k]._symbol_set
2877 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k)
2878 cr.execute(query, (ss[1](default),))
2879 # add the NOT NULL constraint
2882 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2884 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2887 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2888 "If you want to have it, you should update the records and execute manually:\n"\
2889 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2890 _schema.warning(msg, self._table, k, self._table, k)
2892 elif not f.required and f_pg_notnull == 1:
2893 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2895 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2898 indexname = '%s_%s_index' % (self._table, k)
2899 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2900 res2 = cr.dictfetchall()
2901 if not res2 and f.select:
2902 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2904 if f._type == 'text':
2905 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2906 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2907 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2908 " because there is a length limit for indexable btree values!\n"\
2909 "Use a search view instead if you simply want to make the field searchable."
2910 _schema.warning(msg, self._table, f._type, k)
2911 if res2 and not f.select:
2912 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2914 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2915 _schema.debug(msg, self._table, k, f._type)
2917 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2918 dest_model = self.pool[f._obj]
2919 if dest_model._table != 'ir_actions':
2920 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2922 # The field doesn't exist in database. Create it if necessary.
2924 if not isinstance(f, fields.function) or f.store:
2925 # add the missing field
2926 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2927 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2928 _schema.debug("Table '%s': added column '%s' with definition=%s",
2929 self._table, k, get_pg_type(f)[1])
2932 if not create and k in self._defaults:
2933 if callable(self._defaults[k]):
2934 default = self._defaults[k](self, cr, SUPERUSER_ID, context)
2936 default = self._defaults[k]
2938 ss = self._columns[k]._symbol_set
2939 query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
2940 cr.execute(query, (ss[1](default),))
2942 _logger.debug("Table '%s': setting default value of new column %s", self._table, k)
2944 # remember the functions to call for the stored fields
2945 if isinstance(f, fields.function):
2947 if f.store is not True: # i.e. if f.store is a dict
2948 order = f.store[f.store.keys()[0]][2]
2949 todo_end.append((order, self._update_store, (f, k)))
2951 # and add constraints if needed
2952 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2953 if f._obj not in self.pool:
2954 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2955 dest_model = self.pool[f._obj]
2956 ref = dest_model._table
2957 # ir_actions is inherited so foreign key doesn't work on it
2958 if ref != 'ir_actions':
2959 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2961 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2965 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2966 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2969 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2970 "Try to re-run: openerp-server --update=module\n"\
2971 "If it doesn't work, update records and execute manually:\n"\
2972 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2973 _logger.warning(msg, k, self._table, self._table, k)
2977 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2978 create = not bool(cr.fetchone())
2980 cr.commit() # start a new transaction
2983 self._add_sql_constraints(cr)
2986 self._execute_sql(cr)
2989 self._parent_store_compute(cr)
2994 def _auto_end(self, cr, context=None):
2995 """ Create the foreign keys recorded by _auto_init. """
2996 for t, k, r, d in self._foreign_keys:
2997 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2998 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
3000 del self._foreign_keys
3003 def _table_exist(self, cr):
3004 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
3008 def _create_table(self, cr):
3009 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
3010 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
3011 _schema.debug("Table '%s': created", self._table)
3014 def _parent_columns_exist(self, cr):
3015 cr.execute("""SELECT c.relname
3016 FROM pg_class c, pg_attribute a
3017 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
3018 """, (self._table, 'parent_left'))
3022 def _create_parent_columns(self, cr):
3023 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
3024 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
3025 if 'parent_left' not in self._columns:
3026 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
3028 _schema.debug("Table '%s': added column '%s' with definition=%s",
3029 self._table, 'parent_left', 'INTEGER')
3030 elif not self._columns['parent_left'].select:
3031 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
3033 if 'parent_right' not in self._columns:
3034 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
3036 _schema.debug("Table '%s': added column '%s' with definition=%s",
3037 self._table, 'parent_right', 'INTEGER')
3038 elif not self._columns['parent_right'].select:
3039 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
3041 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
3042 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
3043 self._parent_name, self._name)
3048 def _add_log_columns(self, cr):
3049 for field, field_def in LOG_ACCESS_COLUMNS.iteritems():
3052 FROM pg_class c, pg_attribute a
3053 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
3054 """, (self._table, field))
3056 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def))
3058 _schema.debug("Table '%s': added column '%s' with definition=%s",
3059 self._table, field, field_def)
3062 def _select_column_data(self, cr):
3063 # attlen is the number of bytes necessary to represent the type when
3064 # the type has a fixed size. If the type has a varying size attlen is
3065 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
3066 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
3067 "FROM pg_class c,pg_attribute a,pg_type t " \
3068 "WHERE c.relname=%s " \
3069 "AND c.oid=a.attrelid " \
3070 "AND a.atttypid=t.oid", (self._table,))
3071 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
3074 def _o2m_raise_on_missing_reference(self, cr, f):
3075 # TODO this check should be a method on fields.one2many.
3076 if f._obj in self.pool:
3077 other = self.pool[f._obj]
3078 # TODO the condition could use fields_get_keys().
3079 if f._fields_id not in other._columns.keys():
3080 if f._fields_id not in other._inherit_fields.keys():
3081 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
3083 def _m2m_raise_or_create_relation(self, cr, f):
3084 m2m_tbl, col1, col2 = f._sql_names(self)
3085 # do not create relations for custom fields as they do not belong to a module
3086 # they will be automatically removed when dropping the corresponding ir.model.field
3087 # table name for custom relation all starts with x_, see __init__
3088 if not m2m_tbl.startswith('x_'):
3089 self._save_relation_table(cr, m2m_tbl)
3090 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
3091 if not cr.dictfetchall():
3092 if f._obj not in self.pool:
3093 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
3094 dest_model = self.pool[f._obj]
3095 ref = dest_model._table
3096 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
3097 # create foreign key references with ondelete=cascade, unless the targets are SQL views
3098 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
3099 if not cr.fetchall():
3100 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
3101 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
3102 if not cr.fetchall():
3103 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
3105 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
3106 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
3107 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
3109 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
3112 def _add_sql_constraints(self, cr):
3115 Modify this model's database table constraints so they match the one in
3119 def unify_cons_text(txt):
3120 return txt.lower().replace(', ',',').replace(' (','(')
3122 for (key, con, _) in self._sql_constraints:
3123 conname = '%s_%s' % (self._table, key)
3125 self._save_constraint(cr, conname, 'u')
3126 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
3127 existing_constraints = cr.dictfetchall()
3131 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
3132 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
3133 self._table, conname, con),
3134 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
3139 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
3140 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
3141 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
3147 if not existing_constraints:
3148 # constraint does not exists:
3149 sql_actions['add']['execute'] = True
3150 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
3151 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
3152 # constraint exists but its definition has changed:
3153 sql_actions['drop']['execute'] = True
3154 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
3155 sql_actions['add']['execute'] = True
3156 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
3158 # we need to add the constraint:
3159 sql_actions = [item for item in sql_actions.values()]
3160 sql_actions.sort(key=lambda x: x['order'])
3161 for sql_action in [action for action in sql_actions if action['execute']]:
3163 cr.execute(sql_action['query'])
3165 _schema.debug(sql_action['msg_ok'])
3167 _schema.warning(sql_action['msg_err'])
3171 def _execute_sql(self, cr):
3172 """ Execute the SQL code from the _sql attribute (if any)."""
3173 if hasattr(self, "_sql"):
3174 for line in self._sql.split(';'):
3175 line2 = line.replace('\n', '').strip()
3181 # Update objects that uses this one to update their _inherits fields
3184 def _inherits_reload_src(self):
3185 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
3186 for obj in self.pool.models.values():
3187 if self._name in obj._inherits:
3188 obj._inherits_reload()
3191 def _inherits_reload(self):
3192 """ Recompute the _inherit_fields mapping.
3194 This will also call itself on each inherits'd child model.
3198 for table in self._inherits:
3199 other = self.pool[table]
3200 for col in other._columns.keys():
3201 res[col] = (table, self._inherits[table], other._columns[col], table)
3202 for col in other._inherit_fields.keys():
3203 res[col] = (table, self._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
3204 self._inherit_fields = res
3205 self._all_columns = self._get_column_infos()
3206 self._inherits_reload_src()
3209 def _get_column_infos(self):
3210 """Returns a dict mapping all fields names (direct fields and
3211 inherited field via _inherits) to a ``column_info`` struct
3212 giving detailed columns """
3214 for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems():
3215 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
3216 for k, col in self._columns.iteritems():
3217 result[k] = fields.column_info(k, col)
3221 def _inherits_check(self):
3222 for table, field_name in self._inherits.items():
3223 if field_name not in self._columns:
3224 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
3225 self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
3226 required=True, ondelete="cascade")
3227 elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
3228 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
3229 self._columns[field_name].required = True
3230 self._columns[field_name].ondelete = "cascade"
3233 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3234 """ Return the definition of each field.
3236 The returned value is a dictionary (indiced by field name) of
3237 dictionaries. The _inherits'd fields are included. The string, help,
3238 and selection (if present) attributes are translated.
3240 :param cr: database cursor
3241 :param user: current user id
3242 :param allfields: list of fields
3243 :param context: context arguments, like lang, time zone
3244 :return: dictionary of field dictionaries, each one describing a field of the business object
3245 :raise AccessError: * if user has no create/write rights on the requested object
3251 write_access = self.check_access_rights(cr, user, 'write', raise_exception=False) \
3252 or self.check_access_rights(cr, user, 'create', raise_exception=False)
3256 translation_obj = self.pool.get('ir.translation')
3257 for parent in self._inherits:
3258 res.update(self.pool[parent].fields_get(cr, user, allfields, context))
3260 for f, field in self._columns.iteritems():
3261 if (allfields and f not in allfields) or \
3262 (field.groups and not self.user_has_groups(cr, user, groups=field.groups, context=context)):
3265 res[f] = fields.field_to_dict(self, cr, user, field, context=context)
3267 if not write_access:
3268 res[f]['readonly'] = True
3269 res[f]['states'] = {}
3271 if 'lang' in context:
3272 if 'string' in res[f]:
3273 res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context['lang'])
3275 res[f]['string'] = res_trans
3276 if 'help' in res[f]:
3277 help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context['lang'])
3279 res[f]['help'] = help_trans
3283 def get_empty_list_help(self, cr, user, help, context=None):
3284 """ Generic method giving the help message displayed when having
3285 no result to display in a list or kanban view. By default it returns
3286 the help given in parameter that is generally the help message
3287 defined in the action.
3291 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3293 Check the user access rights on the given fields. This raises Access
3294 Denied if the user does not have the rights. Otherwise it returns the
3295 fields (as is if the fields is not falsy, or the readable/writable
3296 fields if fields is falsy).
3299 """Predicate to test if the user has access to the given field name."""
3300 # Ignore requested field if it doesn't exist. This is ugly but
3301 # it seems to happen at least with 'name_alias' on res.partner.
3302 if field_name not in self._all_columns:
3304 field = self._all_columns[field_name].column
3305 if user != SUPERUSER_ID and field.groups:
3306 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3310 fields = filter(p, self._all_columns.keys())
3312 filtered_fields = filter(lambda a: not p(a), fields)
3314 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s', operation, user, self._name, ', '.join(filtered_fields))
3317 _('The requested operation cannot be completed due to security restrictions. '
3318 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3319 (self._description, operation))
3322 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3323 """ Read records with given ids with the given fields
3325 :param cr: database cursor
3326 :param user: current user id
3327 :param ids: id or list of the ids of the records to read
3328 :param fields: optional list of field names to return (default: all fields would be returned)
3329 :type fields: list (example ['field_name_1', ...])
3330 :param context: optional context dictionary - it may contains keys for specifying certain options
3331 like ``context_lang``, ``context_tz`` to alter the results of the call.
3332 A special ``bin_size`` boolean flag may also be passed in the context to request the
3333 value of all fields.binary columns to be returned as the size of the binary instead of its
3334 contents. This can also be selectively overriden by passing a field-specific flag
3335 in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
3336 Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
3337 :return: list of dictionaries((dictionary per record asked)) with requested field values
3338 :rtype: [{‘name_of_the_field’: value, ...}, ...]
3339 :raise AccessError: * if user has no read rights on the requested object
3340 * if user tries to bypass access rules for read on the requested object
3344 self.check_access_rights(cr, user, 'read')
3345 fields = self.check_field_access_rights(cr, user, 'read', fields)
3346 if isinstance(ids, (int, long)):
3350 select = map(lambda x: isinstance(x, dict) and x['id'] or x, select)
3351 result = self._read_flat(cr, user, select, fields, context, load)
3353 if isinstance(ids, (int, long)):
3354 return result and result[0] or False
3357 def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
3362 if fields_to_read is None:
3363 fields_to_read = self._columns.keys()
3365 fields_to_read = list(set(fields_to_read))
3367 # all inherited fields + all non inherited fields for which the attribute whose name is in load is True
3368 fields_pre = [f for f in fields_to_read if
3369 f == self.CONCURRENCY_CHECK_FIELD
3370 or (f in self._columns and getattr(self._columns[f], '_classic_write'))
3371 ] + self._inherits.values()
3375 def convert_field(f):
3376 f_qual = '%s."%s"' % (self._table, f) # need fully-qualified references in case len(tables) > 1
3377 if f in ('create_date', 'write_date'):
3378 return "date_trunc('second', %s) as %s" % (f_qual, f)
3379 if f == self.CONCURRENCY_CHECK_FIELD:
3380 if self._log_access:
3381 return "COALESCE(%s.write_date, %s.create_date, (now() at time zone 'UTC'))::timestamp AS %s" % (self._table, self._table, f,)
3382 return "(now() at time zone 'UTC')::timestamp AS %s" % (f,)
3383 if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
3384 return 'length(%s) as "%s"' % (f_qual, f)
3387 # Construct a clause for the security rules.
3388 # 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
3389 # or will at least contain self._table.
3390 rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
3392 fields_pre2 = map(convert_field, fields_pre)
3393 order_by = self._parent_order or self._order
3394 select_fields = ','.join(fields_pre2 + ['%s.id' % self._table])
3395 query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table)
3397 query += " AND " + (' OR '.join(rule_clause))
3398 query += " ORDER BY " + order_by
3399 for sub_ids in cr.split_for_in_conditions(ids):
3400 cr.execute(query, [tuple(sub_ids)] + rule_params)
3401 results = cr.dictfetchall()
3402 result_ids = [x['id'] for x in results]
3403 self._check_record_rules_result_count(cr, user, sub_ids, result_ids, 'read', context=context)
3406 self.check_access_rule(cr, user, ids, 'read', context=context)
3407 res = map(lambda x: {'id': x}, ids)
3409 if context.get('lang'):
3410 for f in fields_pre:
3411 if f == self.CONCURRENCY_CHECK_FIELD:
3413 if self._columns[f].translate:
3414 ids = [x['id'] for x in res]
3415 #TODO: optimize out of this loop
3416 res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context['lang'], ids)
3418 r[f] = res_trans.get(r['id'], False) or r[f]
3420 for table in self._inherits:
3421 col = self._inherits[table]
3422 cols = [x for x in intersect(self._inherit_fields.keys(), fields_to_read) if x not in self._columns.keys()]
3425 res2 = self.pool[table].read(cr, user, [x[col] for x in res], cols, context, load)
3433 if not record[col]: # if the record is deleted from _inherits table?
3435 record.update(res3[record[col]])
3436 if col not in fields_to_read:
3439 # all fields which need to be post-processed by a simple function (symbol_get)
3440 fields_post = filter(lambda x: x in self._columns and self._columns[x]._symbol_get, fields_to_read)
3443 for f in fields_post:
3444 r[f] = self._columns[f]._symbol_get(r[f])
3445 ids = [x['id'] for x in res]
3447 # all non inherited fields for which the attribute whose name is in load is False
3448 fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read)
3450 # Compute POST fields
3452 for f in fields_post:
3453 todo.setdefault(self._columns[f]._multi, [])
3454 todo[self._columns[f]._multi].append(f)
3455 for key, val in todo.items():
3457 res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res)
3458 assert res2 is not None, \
3459 'The function field "%s" on the "%s" model returned None\n' \
3460 '(a dictionary was expected).' % (val[0], self._name)
3463 if isinstance(res2[record['id']], str): res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6
3464 multi_fields = res2.get(record['id'],{})
3466 record[pos] = multi_fields.get(pos,[])
3469 res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
3472 record[f] = res2[record['id']]
3476 # Warn about deprecated fields now that fields_pre and fields_post are computed
3477 # Explicitly use list() because we may receive tuples
3478 for f in list(fields_pre) + list(fields_post):
3479 field_column = self._all_columns.get(f) and self._all_columns.get(f).column
3480 if field_column and field_column.deprecated:
3481 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, field_column.deprecated)
3485 for field in vals.copy():
3487 if field in self._columns:
3488 fobj = self._columns[field]
3494 for group in groups:
3495 module = group.split(".")[0]
3496 grp = group.split(".")[1]
3497 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3498 (grp, module, 'res.groups', user))
3499 readonly = cr.fetchall()
3500 if readonly[0][0] >= 1:
3503 elif readonly[0][0] == 0:
3509 if type(vals[field]) == type([]):
3511 elif type(vals[field]) == type(0.0):
3513 elif type(vals[field]) == type(''):
3514 vals[field] = '=No Permission='
3518 if vals[field] is None:
3523 # TODO check READ access
3524 def perm_read(self, cr, user, ids, context=None, details=True):
3526 Returns some metadata about the given records.
3528 :param details: if True, \*_uid fields are replaced with the name of the user
3529 :return: list of ownership dictionaries for each requested record
3530 :rtype: list of dictionaries with the following keys:
3533 * create_uid: user who created the record
3534 * create_date: date when the record was created
3535 * write_uid: last user who changed the record
3536 * write_date: date of the last change to the record
3537 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3544 uniq = isinstance(ids, (int, long))
3548 if self._log_access:
3549 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3550 quoted_table = '"%s"' % self._table
3551 fields_str = ",".join('%s.%s'%(quoted_table, field) for field in fields)
3552 query = '''SELECT %s, __imd.module, __imd.name
3553 FROM %s LEFT JOIN ir_model_data __imd
3554 ON (__imd.model = %%s and __imd.res_id = %s.id)
3555 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3556 cr.execute(query, (self._name, tuple(ids)))
3557 res = cr.dictfetchall()
3560 r[key] = r[key] or False
3561 if details and key in ('write_uid', 'create_uid') and r[key]:
3563 r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0]
3565 pass # Leave the numeric uid there
3566 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3567 del r['name'], r['module']
3572 def _check_concurrency(self, cr, ids, context):
3575 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3577 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3578 for sub_ids in cr.split_for_in_conditions(ids):
3581 id_ref = "%s,%s" % (self._name, id)
3582 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3584 ids_to_check.extend([id, update_date])
3585 if not ids_to_check:
3587 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3590 # mention the first one only to keep the error message readable
3591 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3593 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3594 """Verify the returned rows after applying record rules matches
3595 the length of `ids`, and raise an appropriate exception if it does not.
3597 ids, result_ids = set(ids), set(result_ids)
3598 missing_ids = ids - result_ids
3600 # Attempt to distinguish record rule restriction vs deleted records,
3601 # to provide a more specific error message - check if the missinf
3602 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3603 forbidden_ids = [x[0] for x in cr.fetchall()]
3605 # the missing ids are (at least partially) hidden by access rules
3606 if uid == SUPERUSER_ID:
3608 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3609 raise except_orm(_('Access Denied'),
3610 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3611 (self._description, operation))
3613 # If we get here, the missing_ids are not in the database
3614 if operation in ('read','unlink'):
3615 # No need to warn about deleting an already deleted record.
3616 # And no error when reading a record that was deleted, to prevent spurious
3617 # errors for non-transactional search/read sequences coming from clients
3619 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3620 raise except_orm(_('Missing document(s)'),
3621 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3624 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3625 """Verifies that the operation given by ``operation`` is allowed for the user
3626 according to the access rights."""
3627 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3629 def check_access_rule(self, cr, uid, ids, operation, context=None):
3630 """Verifies that the operation given by ``operation`` is allowed for the user
3631 according to ir.rules.
3633 :param operation: one of ``write``, ``unlink``
3634 :raise except_orm: * if current ir.rules do not permit this operation.
3635 :return: None if the operation is allowed
3637 if uid == SUPERUSER_ID:
3640 if self.is_transient():
3641 # Only one single implicit access rule for transient models: owner only!
3642 # This is ok to hardcode because we assert that TransientModels always
3643 # have log_access enabled so that the create_uid column is always there.
3644 # And even with _inherits, these fields are always present in the local
3645 # table too, so no need for JOINs.
3646 cr.execute("""SELECT distinct create_uid
3648 WHERE id IN %%s""" % self._table, (tuple(ids),))
3649 uids = [x[0] for x in cr.fetchall()]
3650 if len(uids) != 1 or uids[0] != uid:
3651 raise except_orm(_('Access Denied'),
3652 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3654 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3656 where_clause = ' and ' + ' and '.join(where_clause)
3657 for sub_ids in cr.split_for_in_conditions(ids):
3658 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3659 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3660 [sub_ids] + where_params)
3661 returned_ids = [x['id'] for x in cr.dictfetchall()]
3662 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3664 def create_workflow(self, cr, uid, ids, context=None):
3665 """Create a workflow instance for each given record IDs."""
3666 from openerp import workflow
3668 workflow.trg_create(uid, self._name, res_id, cr)
3671 def delete_workflow(self, cr, uid, ids, context=None):
3672 """Delete the workflow instances bound to the given record IDs."""
3673 from openerp import workflow
3675 workflow.trg_delete(uid, self._name, res_id, cr)
3678 def step_workflow(self, cr, uid, ids, context=None):
3679 """Reevaluate the workflow instances of the given record IDs."""
3680 from openerp import workflow
3682 workflow.trg_write(uid, self._name, res_id, cr)
3685 def signal_workflow(self, cr, uid, ids, signal, context=None):
3686 """Send given workflow signal and return a dict mapping ids to workflow results"""
3687 from openerp import workflow
3690 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3693 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3694 """ Rebind the workflow instance bound to the given 'old' record IDs to
3695 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3697 from openerp import workflow
3698 for old_id, new_id in old_new_ids:
3699 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3702 def unlink(self, cr, uid, ids, context=None):
3704 Delete records with given ids
3706 :param cr: database cursor
3707 :param uid: current user id
3708 :param ids: id or list of ids
3709 :param context: (optional) context arguments, like lang, time zone
3711 :raise AccessError: * if user has no unlink rights on the requested object
3712 * if user tries to bypass access rules for unlink on the requested object
3713 :raise UserError: if the record is default property for other records
3718 if isinstance(ids, (int, long)):
3721 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3723 self._check_concurrency(cr, ids, context)
3725 self.check_access_rights(cr, uid, 'unlink')
3727 ir_property = self.pool.get('ir.property')
3729 # Check if the records are used as default properties.
3730 domain = [('res_id', '=', False),
3731 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3733 if ir_property.search(cr, uid, domain, context=context):
3734 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3736 # Delete the records' properties.
3737 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3738 ir_property.unlink(cr, uid, property_ids, context=context)
3740 self.delete_workflow(cr, uid, ids, context=context)
3742 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3743 pool_model_data = self.pool.get('ir.model.data')
3744 ir_values_obj = self.pool.get('ir.values')
3745 for sub_ids in cr.split_for_in_conditions(ids):
3746 cr.execute('delete from ' + self._table + ' ' \
3747 'where id IN %s', (sub_ids,))
3749 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3750 # as these are not connected with real database foreign keys, and would be dangling references.
3751 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3752 # to avoid possible side-effects during admin calls.
3753 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3754 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3755 # Step 2. Marching towards the real deletion of referenced records
3757 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3759 # For the same reason, removing the record relevant to ir_values
3760 ir_value_ids = ir_values_obj.search(cr, uid,
3761 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3764 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3766 for order, obj_name, store_ids, fields in result_store:
3767 if obj_name == self._name:
3768 effective_store_ids = list(set(store_ids) - set(ids))
3770 effective_store_ids = store_ids
3771 if effective_store_ids:
3772 obj = self.pool[obj_name]
3773 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3774 rids = map(lambda x: x[0], cr.fetchall())
3776 obj._store_set_values(cr, uid, rids, fields, context)
3783 def write(self, cr, user, ids, vals, context=None):
3785 Update records with given ids with the given field values
3787 :param cr: database cursor
3788 :param user: current user id
3790 :param ids: object id or list of object ids to update according to **vals**
3791 :param vals: field values to update, e.g {'field_name': new_field_value, ...}
3792 :type vals: dictionary
3793 :param context: (optional) context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
3794 :type context: dictionary
3796 :raise AccessError: * if user has no write rights on the requested object
3797 * if user tries to bypass access rules for write on the requested object
3798 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3799 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3801 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
3803 + For a many2many field, a list of tuples is expected.
3804 Here is the list of tuple that are accepted, with the corresponding semantics ::
3806 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3807 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3808 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3809 (3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
3810 (4, ID) link to existing record with id = ID (adds a relationship)
3811 (5) unlink all (like using (3,ID) for all linked records)
3812 (6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
3815 [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
3817 + For a one2many field, a lits of tuples is expected.
3818 Here is the list of tuple that are accepted, with the corresponding semantics ::
3820 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3821 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3822 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3825 [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
3827 + For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
3828 + For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
3832 self.check_field_access_rights(cr, user, 'write', vals.keys())
3833 for field in vals.copy():
3835 if field in self._columns:
3836 fobj = self._columns[field]
3837 elif field in self._inherit_fields:
3838 fobj = self._inherit_fields[field][2]
3845 for group in groups:
3846 module = group.split(".")[0]
3847 grp = group.split(".")[1]
3848 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3849 (grp, module, 'res.groups', user))
3850 readonly = cr.fetchall()
3851 if readonly[0][0] >= 1:
3862 if isinstance(ids, (int, long)):
3865 self._check_concurrency(cr, ids, context)
3866 self.check_access_rights(cr, user, 'write')
3868 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3870 # No direct update of parent_left/right
3871 vals.pop('parent_left', None)
3872 vals.pop('parent_right', None)
3874 parents_changed = []
3875 parent_order = self._parent_order or self._order
3876 if self._parent_store and (self._parent_name in vals):
3877 # The parent_left/right computation may take up to
3878 # 5 seconds. No need to recompute the values if the
3879 # parent is the same.
3880 # Note: to respect parent_order, nodes must be processed in
3881 # order, so ``parents_changed`` must be ordered properly.
3882 parent_val = vals[self._parent_name]
3884 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3885 (self._table, self._parent_name, self._parent_name, parent_order)
3886 cr.execute(query, (tuple(ids), parent_val))
3888 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3889 (self._table, self._parent_name, parent_order)
3890 cr.execute(query, (tuple(ids),))
3891 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3898 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3900 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3901 if field_column and field_column.deprecated:
3902 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3903 if field in self._columns:
3904 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3905 if (not totranslate) or not self._columns[field].translate:
3906 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3907 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3908 direct.append(field)
3910 upd_todo.append(field)
3912 updend.append(field)
3913 if field in self._columns \
3914 and hasattr(self._columns[field], 'selection') \
3916 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3918 if self._log_access:
3919 upd0.append('write_uid=%s')
3920 upd0.append("write_date=(now() at time zone 'UTC')")
3924 self.check_access_rule(cr, user, ids, 'write', context=context)
3925 for sub_ids in cr.split_for_in_conditions(ids):
3926 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3927 'where id IN %s', upd1 + [sub_ids])
3928 if cr.rowcount != len(sub_ids):
3929 raise except_orm(_('AccessError'),
3930 _('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3935 if self._columns[f].translate:
3936 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3939 # Inserting value to DB
3940 context_wo_lang = dict(context, lang=None)
3941 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3942 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3945 # call the 'set' method of fields which are not classic_write
3946 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3948 # default element in context must be removed when call a one2many or many2many
3949 rel_context = context.copy()
3950 for c in context.items():
3951 if c[0].startswith('default_'):
3952 del rel_context[c[0]]
3954 for field in upd_todo:
3956 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3958 unknown_fields = updend[:]
3959 for table in self._inherits:
3960 col = self._inherits[table]
3962 for sub_ids in cr.split_for_in_conditions(ids):
3963 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3964 'where id IN %s', (sub_ids,))
3965 nids.extend([x[0] for x in cr.fetchall()])
3969 if self._inherit_fields[val][0] == table:
3971 unknown_fields.remove(val)
3973 self.pool[table].write(cr, user, nids, v, context)
3977 'No such field(s) in model %s: %s.',
3978 self._name, ', '.join(unknown_fields))
3979 self._validate(cr, user, ids, context)
3981 # TODO: use _order to set dest at the right position and not first node of parent
3982 # We can't defer parent_store computation because the stored function
3983 # fields that are computer may refer (directly or indirectly) to
3984 # parent_left/right (via a child_of domain)
3987 self.pool._init_parent[self._name] = True
3989 order = self._parent_order or self._order
3990 parent_val = vals[self._parent_name]
3992 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3994 clause, params = '%s IS NULL' % (self._parent_name,), ()
3996 for id in parents_changed:
3997 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3998 pleft, pright = cr.fetchone()
3999 distance = pright - pleft + 1
4001 # Positions of current siblings, to locate proper insertion point;
4002 # this can _not_ be fetched outside the loop, as it needs to be refreshed
4003 # after each update, in case several nodes are sequentially inserted one
4004 # next to the other (i.e computed incrementally)
4005 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
4006 parents = cr.fetchall()
4008 # Find Position of the element
4010 for (parent_pright, parent_id) in parents:
4013 position = parent_pright and parent_pright + 1 or 1
4015 # It's the first node of the parent
4020 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
4021 position = cr.fetchone()[0] + 1
4023 if pleft < position <= pright:
4024 raise except_orm(_('UserError'), _('Recursivity Detected.'))
4026 if pleft < position:
4027 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
4028 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
4029 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
4031 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
4032 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
4033 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
4035 result += self._store_get_values(cr, user, ids, vals.keys(), context)
4039 for order, model_name, ids_to_update, fields_to_recompute in result:
4040 key = (model_name, tuple(fields_to_recompute))
4041 done.setdefault(key, {})
4042 # avoid to do several times the same computation
4044 for id in ids_to_update:
4045 if id not in done[key]:
4046 done[key][id] = True
4048 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
4050 self.step_workflow(cr, user, ids, context=context)
4054 # TODO: Should set perm to user.xxx
4056 def create(self, cr, user, vals, context=None):
4058 Create a new record for the model.
4060 The values for the new record are initialized using the ``vals``
4061 argument, and if necessary the result of ``default_get()``.
4063 :param cr: database cursor
4064 :param user: current user id
4066 :param vals: field values for new record, e.g {'field_name': field_value, ...}
4067 :type vals: dictionary
4068 :param context: optional context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
4069 :type context: dictionary
4070 :return: id of new record created
4071 :raise AccessError: * if user has no create rights on the requested object
4072 * if user tries to bypass access rules for create on the requested object
4073 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
4074 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
4076 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific.
4077 Please see the description of the :py:meth:`~osv.osv.osv.write` method for details about the possible values and how
4084 if self.is_transient():
4085 self._transient_vacuum(cr, user)
4087 self.check_access_rights(cr, user, 'create')
4089 if self._log_access:
4090 for f in LOG_ACCESS_COLUMNS:
4091 if vals.pop(f, None) is not None:
4093 'Field `%s` is not allowed when creating the model `%s`.',
4095 vals = self._add_missing_default_values(cr, user, vals, context)
4098 for v in self._inherits:
4099 if self._inherits[v] not in vals:
4102 tocreate[v] = {'id': vals[self._inherits[v]]}
4105 # columns will contain a list of field defined as a tuple
4106 # tuple(field_name, format_string, field_value)
4107 # the tuple will be used by the string formatting for the INSERT
4109 ('id', "nextval('%s')" % self._sequence),
4114 for v in vals.keys():
4115 if v in self._inherit_fields and v not in self._columns:
4116 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4117 tocreate[table][v] = vals[v]
4120 if (v not in self._inherit_fields) and (v not in self._columns):
4122 unknown_fields.append(v)
4125 'No such field(s) in model %s: %s.',
4126 self._name, ', '.join(unknown_fields))
4128 if not self._sequence:
4131 _('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.')
4134 for table in tocreate:
4135 if self._inherits[table] in vals:
4136 del vals[self._inherits[table]]
4138 record_id = tocreate[table].pop('id', None)
4140 # When linking/creating parent records, force context without 'no_store_function' key that
4141 # defers stored functions computing, as these won't be computed in batch at the end of create().
4142 parent_context = dict(context)
4143 parent_context.pop('no_store_function', None)
4145 if record_id is None or not record_id:
4146 record_id = self.pool[table].create(cr, user, tocreate[table], context=parent_context)
4148 self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
4150 columns.append((self._inherits[table], '%s', record_id))
4152 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4153 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4155 for bool_field in bool_fields:
4156 if bool_field not in vals:
4157 vals[bool_field] = False
4159 for field in vals.copy():
4161 if field in self._columns:
4162 fobj = self._columns[field]
4164 fobj = self._inherit_fields[field][2]
4170 for group in groups:
4171 module = group.split(".")[0]
4172 grp = group.split(".")[1]
4173 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4174 (grp, module, 'res.groups', user))
4175 readonly = cr.fetchall()
4176 if readonly[0][0] >= 1:
4179 elif readonly[0][0] == 0:
4187 current_field = self._columns[field]
4188 if current_field._classic_write:
4189 columns.append((field, '%s', current_field._symbol_set[1](vals[field])))
4191 #for the function fields that receive a value, we set them directly in the database
4192 #(they may be required), but we also need to trigger the _fct_inv()
4193 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4194 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4195 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4196 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4197 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4198 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4199 #after the release but, definitively, the behavior shouldn't be different for related and function
4201 upd_todo.append(field)
4203 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4204 #related. See the above TODO comment for further explanations.
4205 if not isinstance(current_field, fields.related):
4206 upd_todo.append(field)
4207 if field in self._columns \
4208 and hasattr(current_field, 'selection') \
4210 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4211 if self._log_access:
4212 columns.append(('create_uid', '%s', user))
4213 columns.append(('write_uid', '%s', user))
4214 columns.append(('create_date', "(now() at time zone 'UTC')"))
4215 columns.append(('write_date', "(now() at time zone 'UTC')"))
4217 # the list of tuples used in this formatting corresponds to
4218 # tuple(field_name, format, value)
4219 # In some case, for example (id, create_date, write_date) we does not
4220 # need to read the third value of the tuple, because the real value is
4221 # encoded in the second value (the format).
4223 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4225 ', '.join('"%s"' % f[0] for f in columns),
4226 ', '.join(f[1] for f in columns)
4228 tuple([f[2] for f in columns if len(f) > 2])
4231 id_new, = cr.fetchone()
4232 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4234 if self._parent_store and not context.get('defer_parent_store_computation'):
4236 self.pool._init_parent[self._name] = True
4238 parent = vals.get(self._parent_name, False)
4240 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4242 result_p = cr.fetchall()
4243 for (pleft,) in result_p:
4248 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4249 pleft_old = cr.fetchone()[0]
4252 cr.execute('select max(parent_right) from '+self._table)
4253 pleft = cr.fetchone()[0] or 0
4254 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4255 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4256 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4258 # default element in context must be remove when call a one2many or many2many
4259 rel_context = context.copy()
4260 for c in context.items():
4261 if c[0].startswith('default_'):
4262 del rel_context[c[0]]
4265 for field in upd_todo:
4266 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4267 self._validate(cr, user, [id_new], context)
4269 if not context.get('no_store_function', False):
4270 result += self._store_get_values(cr, user, [id_new],
4271 list(set(vals.keys() + self._inherits.values())),
4275 for order, model_name, ids, fields2 in result:
4276 if not (model_name, ids, fields2) in done:
4277 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4278 done.append((model_name, ids, fields2))
4280 if self._log_create and not (context and context.get('no_store_function', False)):
4281 message = self._description + \
4283 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4284 "' " + _("created.")
4285 self.log(cr, user, id_new, message, True, context=context)
4286 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4287 self.create_workflow(cr, user, [id_new], context=context)
4290 def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None):
4291 """Fetch records as objects allowing to use dot notation to browse fields and relations
4293 :param cr: database cursor
4294 :param uid: current user id
4295 :param select: id or list of ids.
4296 :param context: context arguments, like lang, time zone
4297 :rtype: object or list of objects requested
4300 self._list_class = list_class or browse_record_list
4302 # need to accepts ints and longs because ids coming from a method
4303 # launched by button in the interface have a type long...
4304 if isinstance(select, (int, long)):
4305 return browse_record(cr, uid, select, self, cache, context=context, list_class=self._list_class, fields_process=fields_process)
4306 elif isinstance(select, list):
4307 return self._list_class((browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select), context=context)
4309 return browse_null()
4311 def _store_get_values(self, cr, uid, ids, fields, context):
4312 """Returns an ordered list of fields.function to call due to
4313 an update operation on ``fields`` of records with ``ids``,
4314 obtained by calling the 'store' triggers of these fields,
4315 as setup by their 'store' attribute.
4317 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4319 if fields is None: fields = []
4320 stored_functions = self.pool._store_function.get(self._name, [])
4322 # use indexed names for the details of the stored_functions:
4323 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4325 # only keep store triggers that should be triggered for the ``fields``
4327 triggers_to_compute = [f for f in stored_functions \
4328 if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
4331 target_id_results = {}
4332 for store_trigger in triggers_to_compute:
4333 target_func_id_ = id(store_trigger[target_ids_func_])
4334 if not target_func_id_ in target_id_results:
4335 # use admin user for accessing objects having rules defined on store fields
4336 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4337 target_ids = target_id_results[target_func_id_]
4339 # the compound key must consider the priority and model name
4340 key = (store_trigger[priority_], store_trigger[model_name_])
4341 for target_id in target_ids:
4342 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4344 # Here to_compute_map looks like:
4345 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4346 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4347 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4350 # Now we need to generate the batch function calls list
4352 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4354 for ((priority,model), id_map) in to_compute_map.iteritems():
4355 trigger_ids_maps = {}
4356 # function_ids_maps =
4357 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4358 for target_id, triggers in id_map.iteritems():
4359 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4360 for triggers, target_ids in trigger_ids_maps.iteritems():
4361 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4362 [t[func_field_to_compute_] for t in triggers]))
4363 ordered_keys = call_map.keys()
4367 result = reduce(operator.add, (call_map[k] for k in ordered_keys))
4370 def _store_set_values(self, cr, uid, ids, fields, context):
4371 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4372 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4377 if self._log_access:
4378 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4382 field_dict.setdefault(r[0], [])
4383 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4384 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4385 for i in self.pool._store_function.get(self._name, []):
4387 up_write_date = write_date + datetime.timedelta(hours=i[5])
4388 if datetime.datetime.now() < up_write_date:
4390 field_dict[r[0]].append(i[1])
4396 if self._columns[f]._multi not in keys:
4397 keys.append(self._columns[f]._multi)
4398 todo.setdefault(self._columns[f]._multi, [])
4399 todo[self._columns[f]._multi].append(f)
4403 # use admin user for accessing objects having rules defined on store fields
4404 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4405 for id, value in result.items():
4407 for f in value.keys():
4408 if f in field_dict[id]:
4415 if self._columns[v]._type == 'many2one':
4417 value[v] = value[v][0]
4420 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4421 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4424 cr.execute('update "' + self._table + '" set ' + \
4425 ','.join(upd0) + ' where id = %s', upd1)
4429 # use admin user for accessing objects having rules defined on store fields
4430 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4431 for r in result.keys():
4433 if r in field_dict.keys():
4434 if f in field_dict[r]:
4436 for id, value in result.items():
4437 if self._columns[f]._type == 'many2one':
4442 cr.execute('update "' + self._table + '" set ' + \
4443 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4449 def perm_write(self, cr, user, ids, fields, context=None):
4450 raise NotImplementedError(_('This method does not exist anymore'))
4452 # TODO: ameliorer avec NULL
4453 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4454 """Computes the WHERE clause needed to implement an OpenERP domain.
4455 :param domain: the domain to compute
4457 :param active_test: whether the default filtering of records with ``active``
4458 field set to ``False`` should be applied.
4459 :return: the query expressing the given domain as provided in domain
4460 :rtype: osv.query.Query
4465 # if the object has a field named 'active', filter out all inactive
4466 # records unless they were explicitely asked for
4467 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4469 # the item[0] trick below works for domain items and '&'/'|'/'!'
4471 if not any(item[0] == 'active' for item in domain):
4472 domain.insert(0, ('active', '=', 1))
4474 domain = [('active', '=', 1)]
4477 e = expression.expression(cr, user, domain, self, context)
4478 tables = e.get_tables()
4479 where_clause, where_params = e.to_sql()
4480 where_clause = where_clause and [where_clause] or []
4482 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4484 return Query(tables, where_clause, where_params)
4486 def _check_qorder(self, word):
4487 if not regex_order.match(word):
4488 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4491 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4492 """Add what's missing in ``query`` to implement all appropriate ir.rules
4493 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4495 :param query: the current query object
4497 if uid == SUPERUSER_ID:
4500 def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None):
4501 """ :param string parent_model: string of the parent model
4502 :param model child_object: model object, base of the rule application
4505 if parent_model and child_object:
4506 # as inherited rules are being applied, we need to add the missing JOIN
4507 # to reach the parent table (if it was not JOINed yet in the query)
4508 parent_alias = child_object._inherits_join_add(child_object, parent_model, query)
4509 # inherited rules are applied on the external table -> need to get the alias and replace
4510 parent_table = self.pool[parent_model]._table
4511 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4512 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4514 for table in added_tables:
4515 # table is just a table name -> switch to the full alias
4516 if table == '"%s"' % parent_table:
4517 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4518 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4520 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4521 added_tables = new_tables
4522 query.where_clause += added_clause
4523 query.where_clause_params += added_params
4524 for table in added_tables:
4525 if table not in query.tables:
4526 query.tables.append(table)
4530 # apply main rules on the object
4531 rule_obj = self.pool.get('ir.rule')
4532 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4533 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4535 # apply ir.rules from the parents (through _inherits)
4536 for inherited_model in self._inherits:
4537 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4538 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4539 parent_model=inherited_model, child_object=self)
4541 def _generate_m2o_order_by(self, order_field, query):
4543 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4544 either native m2o fields or function/related fields that are stored, including
4545 intermediate JOINs for inheritance if required.
4547 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4549 if order_field not in self._columns and order_field in self._inherit_fields:
4550 # also add missing joins for reaching the table containing the m2o field
4551 qualified_field = self._inherits_join_calc(order_field, query)
4552 order_field_column = self._inherit_fields[order_field][2]
4554 qualified_field = '"%s"."%s"' % (self._table, order_field)
4555 order_field_column = self._columns[order_field]
4557 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4558 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4559 _logger.debug("Many2one function/related fields must be stored " \
4560 "to be used as ordering fields! Ignoring sorting for %s.%s",
4561 self._name, order_field)
4564 # figure out the applicable order_by for the m2o
4565 dest_model = self.pool[order_field_column._obj]
4566 m2o_order = dest_model._order
4567 if not regex_order.match(m2o_order):
4568 # _order is complex, can't use it here, so we default to _rec_name
4569 m2o_order = dest_model._rec_name
4571 # extract the field names, to be able to qualify them and add desc/asc
4573 for order_part in m2o_order.split(","):
4574 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4575 m2o_order = m2o_order_list
4577 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4578 # as we don't want to exclude results that have NULL values for the m2o
4579 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4580 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4581 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4582 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4584 def _generate_order_by(self, order_spec, query):
4586 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4587 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4589 :raise" except_orm in case order_spec is malformed
4591 order_by_clause = ''
4592 order_spec = order_spec or self._order
4594 order_by_elements = []
4595 self._check_qorder(order_spec)
4596 for order_part in order_spec.split(','):
4597 order_split = order_part.strip().split(' ')
4598 order_field = order_split[0].strip()
4599 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4601 if order_field == 'id' or (self._log_access and order_field in LOG_ACCESS_COLUMNS.keys()):
4602 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4603 elif order_field in self._columns:
4604 order_column = self._columns[order_field]
4605 if order_column._classic_read:
4606 inner_clause = '"%s"."%s"' % (self._table, order_field)
4607 elif order_column._type == 'many2one':
4608 inner_clause = self._generate_m2o_order_by(order_field, query)
4610 continue # ignore non-readable or "non-joinable" fields
4611 elif order_field in self._inherit_fields:
4612 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4613 order_column = parent_obj._columns[order_field]
4614 if order_column._classic_read:
4615 inner_clause = self._inherits_join_calc(order_field, query)
4616 elif order_column._type == 'many2one':
4617 inner_clause = self._generate_m2o_order_by(order_field, query)
4619 continue # ignore non-readable or "non-joinable" fields
4621 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4623 if isinstance(inner_clause, list):
4624 for clause in inner_clause:
4625 order_by_elements.append("%s %s" % (clause, order_direction))
4627 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4628 if order_by_elements:
4629 order_by_clause = ",".join(order_by_elements)
4631 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4633 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4635 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4636 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4637 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4638 This is ok at the security level because this method is private and not callable through XML-RPC.
4640 :param access_rights_uid: optional user ID to use when checking access rights
4641 (not for ir.rules, this is only for ir.model.access)
4645 self.check_access_rights(cr, access_rights_uid or user, 'read')
4647 # For transient models, restrict acces to the current user, except for the super-user
4648 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4649 args = expression.AND(([('create_uid', '=', user)], args or []))
4651 query = self._where_calc(cr, user, args, context=context)
4652 self._apply_ir_rules(cr, user, query, 'read', context=context)
4653 order_by = self._generate_order_by(order, query)
4654 from_clause, where_clause, where_clause_params = query.get_sql()
4656 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4659 # Ignore order, limit and offset when just counting, they don't make sense and could
4661 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4662 cr.execute(query_str, where_clause_params)
4666 limit_str = limit and ' limit %d' % limit or ''
4667 offset_str = offset and ' offset %d' % offset or ''
4668 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4669 cr.execute(query_str, where_clause_params)
4672 # TDE note: with auto_join, we could have several lines about the same result
4673 # i.e. a lead with several unread messages; we uniquify the result using
4674 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4675 def _uniquify_list(seq):
4677 return [x for x in seq if x not in seen and not seen.add(x)]
4679 return _uniquify_list([x[0] for x in res])
4681 # returns the different values ever entered for one field
4682 # this is used, for example, in the client when the user hits enter on
4684 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4687 if field in self._inherit_fields:
4688 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4690 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4692 def copy_data(self, cr, uid, id, default=None, context=None):
4694 Copy given record's data with all its fields values
4696 :param cr: database cursor
4697 :param uid: current user id
4698 :param id: id of the record to copy
4699 :param default: field values to override in the original values of the copied record
4700 :type default: dictionary
4701 :param context: context arguments, like lang, time zone
4702 :type context: dictionary
4703 :return: dictionary containing all the field values
4709 # avoid recursion through already copied records in case of circular relationship
4710 seen_map = context.setdefault('__copy_data_seen', {})
4711 if id in seen_map.setdefault(self._name, []):
4713 seen_map[self._name].append(id)
4717 if 'state' not in default:
4718 if 'state' in self._defaults:
4719 if callable(self._defaults['state']):
4720 default['state'] = self._defaults['state'](self, cr, uid, context)
4722 default['state'] = self._defaults['state']
4724 # build a black list of fields that should not be copied
4725 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4726 def blacklist_given_fields(obj):
4727 # blacklist the fields that are given by inheritance
4728 for other, field_to_other in obj._inherits.items():
4729 blacklist.add(field_to_other)
4730 if field_to_other in default:
4731 # all the fields of 'other' are given by the record: default[field_to_other],
4732 # except the ones redefined in self
4733 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4735 blacklist_given_fields(self.pool[other])
4736 # blacklist deprecated fields
4737 for name, field in obj._columns.items():
4738 if field.deprecated:
4741 blacklist_given_fields(self)
4744 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4746 if f not in blacklist
4747 if not isinstance(fi.column, fields.function))
4749 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4753 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4756 for f, colinfo in fields_to_copy.iteritems():
4757 field = colinfo.column
4758 if field._type == 'many2one':
4759 res[f] = data[f] and data[f][0]
4760 elif field._type == 'one2many':
4761 other = self.pool[field._obj]
4762 # duplicate following the order of the ids because we'll rely on
4763 # it later for copying translations in copy_translation()!
4764 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4765 # the lines are duplicated using the wrong (old) parent, but then
4766 # are reassigned to the correct one thanks to the (0, 0, ...)
4767 res[f] = [(0, 0, line) for line in lines if line]
4768 elif field._type == 'many2many':
4769 res[f] = [(6, 0, data[f])]
4775 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4779 # avoid recursion through already copied records in case of circular relationship
4780 seen_map = context.setdefault('__copy_translations_seen',{})
4781 if old_id in seen_map.setdefault(self._name,[]):
4783 seen_map[self._name].append(old_id)
4785 trans_obj = self.pool.get('ir.translation')
4786 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4787 fields = self.fields_get(cr, uid, context=context)
4789 for field_name, field_def in fields.items():
4790 # removing the lang to compare untranslated values
4791 context_wo_lang = dict(context, lang=None)
4792 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4793 # we must recursively copy the translations for o2o and o2m
4794 if field_def['type'] == 'one2many':
4795 target_obj = self.pool[field_def['relation']]
4796 # here we rely on the order of the ids to match the translations
4797 # as foreseen in copy_data()
4798 old_children = sorted(r.id for r in old_record[field_name])
4799 new_children = sorted(r.id for r in new_record[field_name])
4800 for (old_child, new_child) in zip(old_children, new_children):
4801 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4802 # and for translatable fields we keep them for copy
4803 elif field_def.get('translate'):
4804 if field_name in self._columns:
4805 trans_name = self._name + "," + field_name
4808 elif field_name in self._inherit_fields:
4809 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4810 # get the id of the parent record to set the translation
4811 inherit_field_name = self._inherit_fields[field_name][1]
4812 target_id = new_record[inherit_field_name].id
4813 source_id = old_record[inherit_field_name].id
4817 trans_ids = trans_obj.search(cr, uid, [
4818 ('name', '=', trans_name),
4819 ('res_id', '=', source_id)
4821 user_lang = context.get('lang')
4822 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4824 # remove source to avoid triggering _set_src
4825 del record['source']
4826 record.update({'res_id': target_id})
4827 if user_lang and user_lang == record['lang']:
4828 # 'source' to force the call to _set_src
4829 # 'value' needed if value is changed in copy(), want to see the new_value
4830 record['source'] = old_record[field_name]
4831 record['value'] = new_record[field_name]
4832 trans_obj.create(cr, uid, record, context=context)
4835 def copy(self, cr, uid, id, default=None, context=None):
4837 Duplicate record with given id updating it with default values
4839 :param cr: database cursor
4840 :param uid: current user id
4841 :param id: id of the record to copy
4842 :param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4843 :type default: dictionary
4844 :param context: context arguments, like lang, time zone
4845 :type context: dictionary
4846 :return: id of the newly created record
4851 context = context.copy()
4852 data = self.copy_data(cr, uid, id, default, context)
4853 new_id = self.create(cr, uid, data, context)
4854 self.copy_translations(cr, uid, id, new_id, context)
4857 def exists(self, cr, uid, ids, context=None):
4858 """Checks whether the given id or ids exist in this model,
4859 and return the list of ids that do. This is simple to use for
4860 a truth test on a browse_record::
4865 :param ids: id or list of ids to check for existence
4866 :type ids: int or [int]
4867 :return: the list of ids that currently exist, out of
4870 if type(ids) in (int, long):
4874 query = 'SELECT id FROM "%s"' % self._table
4875 cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
4876 return [x[0] for x in cr.fetchall()]
4878 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4879 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4881 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4882 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4883 return self._check_recursion(cr, uid, ids, context, parent)
4885 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4887 Verifies that there is no loop in a hierarchical structure of records,
4888 by following the parent relationship using the **parent** field until a loop
4889 is detected or until a top-level record is found.
4891 :param cr: database cursor
4892 :param uid: current user id
4893 :param ids: list of ids of records to check
4894 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4895 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4898 parent = self._parent_name
4900 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4901 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4904 while current_id is not None:
4905 cr.execute(query, (current_id,))
4906 result = cr.fetchone()
4907 current_id = result[0] if result else None
4908 if current_id == id:
4912 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4914 Verifies that there is no loop in a hierarchical structure of records,
4915 by following the parent relationship using the **parent** field until a loop
4916 is detected or until a top-level record is found.
4918 :param cr: database cursor
4919 :param uid: current user id
4920 :param ids: list of ids of records to check
4921 :param field_name: field to check
4922 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4925 field = self._all_columns.get(field_name)
4926 field = field.column if field else None
4927 if not field or field._type != 'many2many' or field._obj != self._name:
4928 # field must be a many2many on itself
4929 raise ValueError('invalid field_name: %r' % (field_name,))
4931 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4935 for i in range(0, len(ids_parent), cr.IN_MAX):
4937 sub_ids_parent = ids_parent[i:j]
4938 cr.execute(query, (tuple(sub_ids_parent),))
4939 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4940 ids_parent = ids_parent2
4941 for i in ids_parent:
4946 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4947 """Retrieve the External ID(s) of any database record.
4949 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4951 :return: map of ids to the list of their fully qualified External IDs
4952 in the form ``module.key``, or an empty list when there's no External
4953 ID for a record, e.g.::
4955 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4958 ir_model_data = self.pool.get('ir.model.data')
4959 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4960 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4963 # can't use dict.fromkeys() as the list would be shared!
4965 for record in data_results:
4966 result[record['res_id']].append('%(module)s.%(name)s' % record)
4969 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4970 """Retrieve the External ID of any database record, if there
4971 is one. This method works as a possible implementation
4972 for a function field, to be able to add it to any
4973 model object easily, referencing it as ``Model.get_external_id``.
4975 When multiple External IDs exist for a record, only one
4976 of them is returned (randomly).
4978 :return: map of ids to their fully qualified XML ID,
4979 defaulting to an empty string when there's none
4980 (to be usable as a function field),
4983 { 'id': 'module.ext_id',
4986 results = self._get_xml_ids(cr, uid, ids)
4987 for k, v in results.iteritems():
4994 # backwards compatibility
4995 get_xml_id = get_external_id
4996 _get_xml_ids = _get_external_ids
4998 def print_report(self, cr, uid, ids, name, data, context=None):
5000 Render the report `name` for the given IDs. The report must be defined
5001 for this model, not another.
5003 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
5004 assert self._name == report.table
5005 return report.create(cr, uid, ids, data, context)
5008 def is_transient(self):
5009 """ Return whether the model is transient.
5011 See :class:`TransientModel`.
5014 return self._transient
5016 def _transient_clean_rows_older_than(self, cr, seconds):
5017 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
5018 # Never delete rows used in last 5 minutes
5019 seconds = max(seconds, 300)
5020 query = ("SELECT id FROM " + self._table + " WHERE"
5021 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
5022 " < ((now() at time zone 'UTC') - interval %s)")
5023 cr.execute(query, ("%s seconds" % seconds,))
5024 ids = [x[0] for x in cr.fetchall()]
5025 self.unlink(cr, SUPERUSER_ID, ids)
5027 def _transient_clean_old_rows(self, cr, max_count):
5028 # Check how many rows we have in the table
5029 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
5031 if res[0][0] <= max_count:
5032 return # max not reached, nothing to do
5033 self._transient_clean_rows_older_than(cr, 300)
5035 def _transient_vacuum(self, cr, uid, force=False):
5036 """Clean the transient records.
5038 This unlinks old records from the transient model tables whenever the
5039 "_transient_max_count" or "_max_age" conditions (if any) are reached.
5040 Actual cleaning will happen only once every "_transient_check_time" calls.
5041 This means this method can be called frequently called (e.g. whenever
5042 a new record is created).
5043 Example with both max_hours and max_count active:
5044 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
5045 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5046 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
5047 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
5048 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
5049 would immediately cause the maximum to be reached again.
5050 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
5052 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
5053 _transient_check_time = 20 # arbitrary limit on vacuum executions
5054 self._transient_check_count += 1
5055 if not force and (self._transient_check_count < _transient_check_time):
5056 return True # no vacuum cleaning this time
5057 self._transient_check_count = 0
5059 # Age-based expiration
5060 if self._transient_max_hours:
5061 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
5063 # Count-based expiration
5064 if self._transient_max_count:
5065 self._transient_clean_old_rows(cr, self._transient_max_count)
5069 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
5070 """ Serializes one2many and many2many commands into record dictionaries
5071 (as if all the records came from the database via a read()). This
5072 method is aimed at onchange methods on one2many and many2many fields.
5074 Because commands might be creation commands, not all record dicts
5075 will contain an ``id`` field. Commands matching an existing record
5076 will have an ``id``.
5078 :param field_name: name of the one2many or many2many field matching the commands
5079 :type field_name: str
5080 :param commands: one2many or many2many commands to execute on ``field_name``
5081 :type commands: list((int|False, int|False, dict|False))
5082 :param fields: list of fields to read from the database, when applicable
5083 :type fields: list(str)
5084 :returns: records in a shape similar to that returned by ``read()``
5085 (except records may be missing the ``id`` field if they don't exist in db)
5088 result = [] # result (list of dict)
5089 record_ids = [] # ids of records to read
5090 updates = {} # {id: dict} of updates on particular records
5092 for command in commands:
5093 if not isinstance(command, (list, tuple)):
5094 record_ids.append(command)
5095 elif command[0] == 0:
5096 result.append(command[2])
5097 elif command[0] == 1:
5098 record_ids.append(command[1])
5099 updates.setdefault(command[1], {}).update(command[2])
5100 elif command[0] in (2, 3):
5101 record_ids = [id for id in record_ids if id != command[1]]
5102 elif command[0] == 4:
5103 record_ids.append(command[1])
5104 elif command[0] == 5:
5105 result, record_ids = [], []
5106 elif command[0] == 6:
5107 result, record_ids = [], list(command[2])
5109 # read the records and apply the updates
5110 other_model = self.pool[self._all_columns[field_name].column._obj]
5111 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
5112 record.update(updates.get(record['id'], {}))
5113 result.append(record)
5117 # for backward compatibility
5118 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5120 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5122 Performs a ``search()`` followed by a ``read()``.
5124 :param cr: database cursor
5125 :param user: current user id
5126 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5127 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5128 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5129 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5130 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5131 :param context: context arguments.
5132 :return: List of dictionaries containing the asked fields.
5133 :rtype: List of dictionaries.
5136 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5140 if fields and fields == ['id']:
5141 # shortcut read if we only want the ids
5142 return [{'id': id} for id in record_ids]
5144 # read() ignores active_test, but it would forward it to any downstream search call
5145 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5146 # was presumably only meant for the main search().
5147 # TODO: Move this to read() directly?
5148 read_ctx = dict(context or {})
5149 read_ctx.pop('active_test', None)
5151 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5152 if len(result) <= 1:
5156 index = dict((r['id'], r) for r in result)
5157 return [index[x] for x in record_ids if x in index]
5159 def _register_hook(self, cr):
5160 """ stuff to do right after the registry is built """
5163 def __getattr__(self, name):
5164 if name.startswith('signal_'):
5165 signal_name = name[len('signal_'):]
5167 return (lambda *args, **kwargs:
5168 self.signal_workflow(*args, signal=signal_name, **kwargs))
5169 get = getattr(super(BaseModel, self), '__getattr__', None)
5170 if get is not None: return get(name)
5171 raise AttributeError(
5172 "'%s' object has no attribute '%s'" % (type(self).__name__, name))
5174 # keep this import here, at top it will cause dependency cycle errors
5177 class Model(BaseModel):
5178 """Main super-class for regular database-persisted OpenERP models.
5180 OpenERP models are created by inheriting from this class::
5185 The system will later instantiate the class once per database (on
5186 which the class' module is installed).
5189 _register = False # not visible in ORM registry, meant to be python-inherited only
5190 _transient = False # True in a TransientModel
5192 class TransientModel(BaseModel):
5193 """Model super-class for transient records, meant to be temporarily
5194 persisted, and regularly vaccuum-cleaned.
5196 A TransientModel has a simplified access rights management,
5197 all users can create new records, and may only access the
5198 records they created. The super-user has unrestricted access
5199 to all TransientModel records.
5202 _register = False # not visible in ORM registry, meant to be python-inherited only
5205 class AbstractModel(BaseModel):
5206 """Abstract Model super-class for creating an abstract class meant to be
5207 inherited by regular models (Models or TransientModels) but not meant to
5208 be usable on its own, or persisted.
5210 Technical note: we don't want to make AbstractModel the super-class of
5211 Model or BaseModel because it would not make sense to put the main
5212 definition of persistence methods such as create() in it, and still we
5213 should be able to override them within an AbstractModel.
5215 _auto = False # don't create any database backend for AbstractModels
5216 _register = False # not visible in ORM registry, meant to be python-inherited only
5219 def itemgetter_tuple(items):
5220 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5221 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5226 return lambda gettable: (gettable[items[0]],)
5227 return operator.itemgetter(*items)
5229 class ImportWarning(Warning):
5230 """ Used to send warnings upwards the stack during the import process
5234 def convert_pgerror_23502(model, fields, info, e):
5235 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5236 r'not-null constraint\n',
5238 field_name = m.group('field')
5239 if not m or field_name not in fields:
5240 return {'message': unicode(e)}
5241 message = _(u"Missing required value for the field '%s'.") % field_name
5242 field = fields.get(field_name)
5244 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5247 'field': field_name,
5250 def convert_pgerror_23505(model, fields, info, e):
5251 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5253 field_name = m.group('field')
5254 if not m or field_name not in fields:
5255 return {'message': unicode(e)}
5256 message = _(u"The value for the field '%s' already exists.") % field_name
5257 field = fields.get(field_name)
5259 message = _(u"%s This might be '%s' in the current model, or a field "
5260 u"of the same name in an o2m.") % (message, field['string'])
5263 'field': field_name,
5266 PGERROR_TO_OE = collections.defaultdict(
5267 # shape of mapped converters
5268 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5269 # not_null_violation
5270 '23502': convert_pgerror_23502,
5271 # unique constraint error
5272 '23505': convert_pgerror_23505,
5274 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: