1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object relational mapping to database (postgresql) module
25 * Hierarchical structure
26 * Constraints consistency, validations
27 * Object meta Data depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default fields value
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * 2 different inheritancies
36 - classicals (varchar, integer, boolean, ...)
37 - relations (one2many, many2one, many2many)
56 from collections import defaultdict
59 import dateutil.parser
61 from lxml import etree
65 import openerp.tools as tools
66 from openerp.tools.config import config
67 from openerp.tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
68 from openerp.tools.safe_eval import safe_eval as eval
69 from openerp.tools.translate import _
70 from openerp import SUPERUSER_ID
71 from query import Query
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 # List of etree._Element subclasses that we choose to ignore when parsing XML.
77 from openerp.tools import SKIPPED_ELEMENT_TYPES
79 regex_order = re.compile('^( *([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
80 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
82 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
84 def transfer_field_to_modifiers(field, modifiers):
87 for attr in ('invisible', 'readonly', 'required'):
88 state_exceptions[attr] = []
89 default_values[attr] = bool(field.get(attr))
90 for state, modifs in (field.get("states",{})).items():
92 if default_values[modif[0]] != modif[1]:
93 state_exceptions[modif[0]].append(state)
95 for attr, default_value in default_values.items():
96 if state_exceptions[attr]:
97 modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
99 modifiers[attr] = default_value
102 # Don't deal with groups, it is done by check_group().
103 # Need the context to evaluate the invisible attribute on tree views.
104 # For non-tree views, the context shouldn't be given.
105 def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
106 if node.get('attrs'):
107 modifiers.update(eval(node.get('attrs')))
109 if node.get('states'):
110 if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
111 # TODO combine with AND or OR, use implicit AND for now.
112 modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
114 modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
116 for a in ('invisible', 'readonly', 'required'):
118 v = bool(eval(node.get(a), {'context': context or {}}))
119 if in_tree_view and a == 'invisible':
120 # Invisible in a tree view has a specific meaning, make it a
121 # new key in the modifiers attribute.
122 modifiers['tree_invisible'] = v
123 elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
124 # Don't set the attribute to False if a dynamic value was
125 # provided (i.e. a domain from attrs or states).
129 def simplify_modifiers(modifiers):
130 for a in ('invisible', 'readonly', 'required'):
131 if a in modifiers and not modifiers[a]:
135 def transfer_modifiers_to_node(modifiers, node):
137 simplify_modifiers(modifiers)
138 node.set('modifiers', simplejson.dumps(modifiers))
140 def setup_modifiers(node, field=None, context=None, in_tree_view=False):
141 """ Processes node attributes and field descriptors to generate
142 the ``modifiers`` node attribute and set it on the provided node.
144 Alters its first argument in-place.
146 :param node: ``field`` node from an OpenERP view
147 :type node: lxml.etree._Element
148 :param dict field: field descriptor corresponding to the provided node
149 :param dict context: execution context used to evaluate node attributes
150 :param bool in_tree_view: triggers the ``tree_invisible`` code
151 path (separate from ``invisible``): in
152 tree view there are two levels of
153 invisibility, cell content (a column is
154 present but the cell itself is not
155 displayed) with ``invisible`` and column
156 invisibility (the whole column is
157 hidden) with ``tree_invisible``.
161 if field is not None:
162 transfer_field_to_modifiers(field, modifiers)
163 transfer_node_to_modifiers(
164 node, modifiers, context=context, in_tree_view=in_tree_view)
165 transfer_modifiers_to_node(modifiers, node)
167 def test_modifiers(what, expected):
169 if isinstance(what, basestring):
170 node = etree.fromstring(what)
171 transfer_node_to_modifiers(node, modifiers)
172 simplify_modifiers(modifiers)
173 json = simplejson.dumps(modifiers)
174 assert json == expected, "%s != %s" % (json, expected)
175 elif isinstance(what, dict):
176 transfer_field_to_modifiers(what, modifiers)
177 simplify_modifiers(modifiers)
178 json = simplejson.dumps(modifiers)
179 assert json == expected, "%s != %s" % (json, expected)
184 # openerp.osv.orm.modifiers_tests()
185 def modifiers_tests():
186 test_modifiers('<field name="a"/>', '{}')
187 test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
188 test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
189 test_modifiers('<field name="a" required="1"/>', '{"required": true}')
190 test_modifiers('<field name="a" invisible="0"/>', '{}')
191 test_modifiers('<field name="a" readonly="0"/>', '{}')
192 test_modifiers('<field name="a" required="0"/>', '{}')
193 test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
194 test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
195 test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
196 test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
198 # The dictionary is supposed to be the result of fields_get().
199 test_modifiers({}, '{}')
200 test_modifiers({"invisible": True}, '{"invisible": true}')
201 test_modifiers({"invisible": False}, '{}')
204 def check_object_name(name):
205 """ Check if the given name is a valid openerp object name.
207 The _name attribute in osv and osv_memory object is subject to
208 some restrictions. This function returns True or False whether
209 the given name is allowed or not.
211 TODO: this is an approximation. The goal in this approximation
212 is to disallow uppercase characters (in some places, we quote
213 table/column names and in other not, which leads to this kind
216 psycopg2.ProgrammingError: relation "xxx" does not exist).
218 The same restriction should apply to both osv and osv_memory
219 objects for consistency.
222 if regex_object_name.match(name) is None:
226 def raise_on_invalid_object_name(name):
227 if not check_object_name(name):
228 msg = "The _name attribute %s is not valid." % name
230 raise except_orm('ValueError', msg)
232 POSTGRES_CONFDELTYPES = {
240 def intersect(la, lb):
241 return filter(lambda x: x in lb, la)
243 def fix_import_export_id_paths(fieldname):
245 Fixes the id fields in import and exports, and splits field paths
248 :param str fieldname: name of the field to import/export
249 :return: split field name
252 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
253 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
254 return fixed_external_id.split('/')
256 class except_orm(Exception):
257 def __init__(self, name, value):
260 self.args = (name, value)
262 class BrowseRecordError(Exception):
265 class browse_null(object):
266 """ Readonly python database object browser
272 def __getitem__(self, name):
275 def __getattr__(self, name):
276 return None # XXX: return self ?
284 def __nonzero__(self):
287 def __unicode__(self):
291 raise NotImplementedError("Iteration is not allowed on %s" % self)
295 # TODO: execute an object method on browse_record_list
297 class browse_record_list(list):
298 """ Collection of browse objects
300 Such an instance will be returned when doing a ``browse([ids..])``
301 and will be iterable, yielding browse() objects
304 def __init__(self, lst, context=None):
307 super(browse_record_list, self).__init__(lst)
308 self.context = context
311 class browse_record(object):
312 """ An object that behaves like a row of an object's table.
313 It has attributes after the columns of the corresponding object.
317 uobj = pool.get('res.users')
318 user_rec = uobj.browse(cr, uid, 104)
322 def __init__(self, cr, uid, id, table, cache, context=None,
323 list_class=browse_record_list, fields_process=None):
325 :param table: the browsed object (inherited from orm)
326 :param dict cache: a dictionary of model->field->data to be shared
327 across browse objects, thus reducing the SQL
328 read()s. It can speed up things a lot, but also be
329 disastrous if not discarded after write()/unlink()
331 :param dict context: dictionary with an optional context
333 if fields_process is None:
337 self._list_class = list_class
341 self._table = table # deprecated, use _model!
343 self._table_name = self._table._name
344 self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name)
345 self._context = context
346 self._fields_process = fields_process
348 cache.setdefault(table._name, {})
349 self._data = cache[table._name]
351 # if not (id and isinstance(id, (int, long,))):
352 # raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
353 # if not table.exists(cr, uid, id, context):
354 # raise BrowseRecordError(_('Object %s does not exists') % (self,))
356 if id not in self._data:
357 self._data[id] = {'id': id}
361 def __getitem__(self, name):
365 if name not in self._data[self._id]:
366 # build the list of fields we will fetch
368 # fetch the definition of the field which was asked for
369 if name in self._table._columns:
370 col = self._table._columns[name]
371 elif name in self._table._inherit_fields:
372 col = self._table._inherit_fields[name][2]
373 elif hasattr(self._table, str(name)):
374 attr = getattr(self._table, name)
375 if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)):
376 def function_proxy(*args, **kwargs):
377 if 'context' not in kwargs and self._context:
378 kwargs.update(context=self._context)
379 return attr(self._cr, self._uid, [self._id], *args, **kwargs)
380 return function_proxy
384 error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
385 self.__logger.warning(error_msg)
386 if self.__logger.isEnabledFor(logging.DEBUG):
387 self.__logger.debug(''.join(traceback.format_stack()))
388 raise KeyError(error_msg)
390 prefetchable = lambda f: f._classic_write and f._prefetch and not f.groups and not f.deprecated
392 # if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
393 if prefetchable(col):
394 # gen the list of "local" (ie not inherited) fields which are classic or many2one
395 field_filter = lambda x: prefetchable(x[1])
396 fields_to_fetch = filter(field_filter, self._table._columns.items())
397 # gen the list of inherited fields
398 inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
399 # complete the field list with the inherited fields which are classic or many2one
400 fields_to_fetch += filter(field_filter, inherits)
401 # otherwise we fetch only that field
403 fields_to_fetch = [(name, col)]
405 ids = filter(lambda id: name not in self._data[id], self._data.keys())
407 field_names = map(lambda x: x[0], fields_to_fetch)
409 field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
410 except (openerp.exceptions.AccessError, except_orm):
413 # prefetching attempt failed, perhaps we're violating ACL restrictions involuntarily
414 _logger.info('Prefetching attempt for fields %s on %s failed for ids %s, re-trying just for id %s', field_names, self._model._name, ids, self._id)
416 field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
418 # TODO: improve this, very slow for reports
419 if self._fields_process:
420 lang = self._context.get('lang', 'en_US') or 'en_US'
421 lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid, [('code', '=', lang)])
423 raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,))
424 lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid, lang_obj_ids[0])
426 for field_name, field_column in fields_to_fetch:
427 if field_column._type in self._fields_process:
428 for result_line in field_values:
429 result_line[field_name] = self._fields_process[field_column._type](result_line[field_name])
430 if result_line[field_name]:
431 result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj)
434 # Where did those ids come from? Perhaps old entries in ir_model_dat?
435 _logger.warning("No field_values found for ids %s in %s", ids, self)
436 raise KeyError('Field %s not found in %s'%(name, self))
437 # create browse records for 'remote' objects
438 for result_line in field_values:
440 for field_name, field_column in fields_to_fetch:
441 if field_column._type == 'many2one':
442 if result_line[field_name]:
443 obj = self._table.pool[field_column._obj]
444 if isinstance(result_line[field_name], (list, tuple)):
445 value = result_line[field_name][0]
447 value = result_line[field_name]
449 # FIXME: this happen when a _inherits object
450 # overwrite a field of it parent. Need
451 # testing to be sure we got the right
452 # object and not the parent one.
453 if not isinstance(value, browse_record):
455 # In some cases the target model is not available yet, so we must ignore it,
456 # which is safe in most cases, this value will just be loaded later when needed.
457 # This situation can be caused by custom fields that connect objects with m2o without
458 # respecting module dependencies, causing relationships to be connected to soon when
459 # the target is not loaded yet.
461 new_data[field_name] = browse_record(self._cr,
462 self._uid, value, obj, self._cache,
463 context=self._context,
464 list_class=self._list_class,
465 fields_process=self._fields_process)
467 new_data[field_name] = value
469 new_data[field_name] = browse_null()
471 new_data[field_name] = browse_null()
472 elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
473 new_data[field_name] = self._list_class(
474 (browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj),
475 self._cache, context=self._context, list_class=self._list_class,
476 fields_process=self._fields_process)
477 for id in result_line[field_name]),
478 context=self._context)
479 elif field_column._type == 'reference':
480 if result_line[field_name]:
481 if isinstance(result_line[field_name], browse_record):
482 new_data[field_name] = result_line[field_name]
484 ref_obj, ref_id = result_line[field_name].split(',')
485 ref_id = long(ref_id)
487 obj = self._table.pool[ref_obj]
488 new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
490 new_data[field_name] = browse_null()
492 new_data[field_name] = browse_null()
494 new_data[field_name] = result_line[field_name]
495 self._data[result_line['id']].update(new_data)
497 if not name in self._data[self._id]:
498 # How did this happen? Could be a missing model due to custom fields used too soon, see above.
499 self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values)
500 self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table)
501 raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
502 return self._data[self._id][name]
504 def __getattr__(self, name):
509 exc_info = sys.exc_info()
510 raise AttributeError, "Got %r while trying to get attribute %s on a %s record." % (e, name, self._table._name), exc_info[2]
512 def __contains__(self, name):
513 return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
516 raise NotImplementedError("Iteration is not allowed on %s" % self)
518 def __hasattr__(self, name):
525 return "browse_record(%s, %s)" % (self._table_name, self._id)
527 def __eq__(self, other):
528 if not isinstance(other, browse_record):
530 return (self._table_name, self._id) == (other._table_name, other._id)
532 def __ne__(self, other):
533 if not isinstance(other, browse_record):
535 return (self._table_name, self._id) != (other._table_name, other._id)
537 # we need to define __unicode__ even though we've already defined __str__
538 # because we have overridden __getattr__
539 def __unicode__(self):
540 return unicode(str(self))
543 return hash((self._table_name, self._id))
548 """Force refreshing this browse_record's data and all the data of the
549 records that belong to the same cache, by emptying the cache completely,
550 preserving only the record identifiers (for prefetching optimizations).
552 for model, model_cache in self._cache.iteritems():
553 # only preserve the ids of the records that were in the cache
554 cached_ids = dict([(i, {'id': i}) for i in model_cache.keys()])
555 self._cache[model].clear()
556 self._cache[model].update(cached_ids)
558 def pg_varchar(size=0):
559 """ Returns the VARCHAR declaration for the provided size:
561 * If no size (or an empty or negative size is provided) return an
563 * Otherwise return a VARCHAR(n)
565 :type int size: varchar size, optional
569 if not isinstance(size, int):
570 raise TypeError("VARCHAR parameter should be an int, got %s"
573 return 'VARCHAR(%d)' % size
576 FIELDS_TO_PGTYPES = {
577 fields.boolean: 'bool',
578 fields.integer: 'int4',
582 fields.datetime: 'timestamp',
583 fields.binary: 'bytea',
584 fields.many2one: 'int4',
585 fields.serialized: 'text',
588 def get_pg_type(f, type_override=None):
590 :param fields._column f: field to get a Postgres type for
591 :param type type_override: use the provided type for dispatching instead of the field's own type
592 :returns: (postgres_identification_type, postgres_type_specification)
595 field_type = type_override or type(f)
597 if field_type in FIELDS_TO_PGTYPES:
598 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
599 elif issubclass(field_type, fields.float):
600 # Explicit support for "falsy" digits (0, False) to indicate a
601 # NUMERIC field with no fixed precision. The values will be saved
602 # in the database with all significant digits.
603 # FLOAT8 type is still the default when there is no precision because
604 # it is faster for most operations (sums, etc.)
605 if f.digits is not None:
606 pg_type = ('numeric', 'NUMERIC')
608 pg_type = ('float8', 'DOUBLE PRECISION')
609 elif issubclass(field_type, (fields.char, fields.reference)):
610 pg_type = ('varchar', pg_varchar(f.size))
611 elif issubclass(field_type, fields.selection):
612 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
613 or getattr(f, 'size', None) == -1:
614 pg_type = ('int4', 'INTEGER')
616 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
617 elif issubclass(field_type, fields.function):
618 if f._type == 'selection':
619 pg_type = ('varchar', pg_varchar())
621 pg_type = get_pg_type(f, getattr(fields, f._type))
623 _logger.warning('%s type not supported!', field_type)
629 class MetaModel(type):
630 """ Metaclass for the Model.
632 This class is used as the metaclass for the Model class to discover
633 the models defined in a module (i.e. without instanciating them).
634 If the automatic discovery is not needed, it is possible to set the
635 model's _register attribute to False.
639 module_to_models = {}
641 def __init__(self, name, bases, attrs):
642 if not self._register:
643 self._register = True
644 super(MetaModel, self).__init__(name, bases, attrs)
647 # The (OpenERP) module name can be in the `openerp.addons` namespace
648 # or not. For instance module `sale` can be imported as
649 # `openerp.addons.sale` (the good way) or `sale` (for backward
651 module_parts = self.__module__.split('.')
652 if len(module_parts) > 2 and module_parts[0] == 'openerp' and \
653 module_parts[1] == 'addons':
654 module_name = self.__module__.split('.')[2]
656 module_name = self.__module__.split('.')[0]
657 if not hasattr(self, '_module'):
658 self._module = module_name
660 # Remember which models to instanciate for this module.
662 self.module_to_models.setdefault(self._module, []).append(self)
665 # Definition of log access columns, automatically added to models if
666 # self._log_access is True
667 LOG_ACCESS_COLUMNS = {
668 'create_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
669 'create_date': 'TIMESTAMP',
670 'write_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
671 'write_date': 'TIMESTAMP'
673 # special columns automatically created by the ORM
674 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS.keys()
676 class BaseModel(object):
677 """ Base class for OpenERP models.
679 OpenERP models are created by inheriting from this class' subclasses:
681 * Model: for regular database-persisted models
682 * TransientModel: for temporary data, stored in the database but automatically
683 vaccuumed every so often
684 * AbstractModel: for abstract super classes meant to be shared by multiple
685 _inheriting classes (usually Models or TransientModels)
687 The system will later instantiate the class once per database (on
688 which the class' module is installed).
690 To create a class that should not be instantiated, the _register class attribute
693 __metaclass__ = MetaModel
694 _auto = True # create database backend
695 _register = False # Set to false if the model shouldn't be automatically discovered.
702 _parent_name = 'parent_id'
703 _parent_store = False
704 _parent_order = False
711 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
712 # to include in the _read_group, if grouped on this field
716 _transient = False # True in a TransientModel
719 # { 'parent_model': 'm2o_field', ... }
722 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
723 # model from which it is inherits'd, r is the (local) field towards m, f
724 # is the _column object itself, and n is the original (i.e. top-most)
727 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
728 # field_column_obj, origina_parent_model), ... }
731 # Mapping field name/column_info object
732 # This is similar to _inherit_fields but:
733 # 1. includes self fields,
734 # 2. uses column_info instead of a triple.
740 _sql_constraints = []
741 _protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
743 CONCURRENCY_CHECK_FIELD = '__last_update'
745 def log(self, cr, uid, id, message, secondary=False, context=None):
746 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
748 def view_init(self, cr, uid, fields_list, context=None):
749 """Override this method to do specific things when a view on the object is opened."""
752 def _field_create(self, cr, context=None):
753 """ Create entries in ir_model_fields for all the model's fields.
755 If necessary, also create an entry in ir_model, and if called from the
756 modules loading scheme (by receiving 'module' in the context), also
757 create entries in ir_model_data (for the model and the fields).
759 - create an entry in ir_model (if there is not already one),
760 - create an entry in ir_model_data (if there is not already one, and if
761 'module' is in the context),
762 - update ir_model_fields with the fields found in _columns
763 (TODO there is some redundancy as _columns is updated from
764 ir_model_fields in __init__).
769 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
771 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
772 model_id = cr.fetchone()[0]
773 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
775 model_id = cr.fetchone()[0]
776 if 'module' in context:
777 name_id = 'model_'+self._name.replace('.', '_')
778 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
780 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
781 (name_id, context['module'], 'ir.model', model_id)
784 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
786 for rec in cr.dictfetchall():
787 cols[rec['name']] = rec
789 ir_model_fields_obj = self.pool.get('ir.model.fields')
791 # sparse field should be created at the end, as it depends on its serialized field already existing
792 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
793 for (k, f) in model_fields:
795 'model_id': model_id,
798 'field_description': f.string,
800 'relation': f._obj or '',
801 'select_level': tools.ustr(f.select or 0),
802 'readonly': (f.readonly and 1) or 0,
803 'required': (f.required and 1) or 0,
804 'selectable': (f.selectable and 1) or 0,
805 'translate': (f.translate and 1) or 0,
806 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
807 'serialization_field_id': None,
809 if getattr(f, 'serialization_field', None):
810 # resolve link to serialization_field if specified by name
811 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
812 if not serialization_field_id:
813 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
814 vals['serialization_field_id'] = serialization_field_id[0]
816 # When its a custom field,it does not contain f.select
817 if context.get('field_state', 'base') == 'manual':
818 if context.get('field_name', '') == k:
819 vals['select_level'] = context.get('select', '0')
820 #setting value to let the problem NOT occur next time
822 vals['select_level'] = cols[k]['select_level']
825 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
826 id = cr.fetchone()[0]
828 cr.execute("""INSERT INTO ir_model_fields (
829 id, model_id, model, name, field_description, ttype,
830 relation,state,select_level,relation_field, translate, serialization_field_id
832 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
834 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
835 vals['relation'], 'base',
836 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
838 if 'module' in context:
839 name1 = 'field_' + self._table + '_' + k
840 cr.execute("select name from ir_model_data where name=%s", (name1,))
842 name1 = name1 + "_" + str(id)
843 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
844 (name1, context['module'], 'ir.model.fields', id)
847 for key, val in vals.items():
848 if cols[k][key] != vals[key]:
849 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
850 cr.execute("""UPDATE ir_model_fields SET
851 model_id=%s, field_description=%s, ttype=%s, relation=%s,
852 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
854 model=%s AND name=%s""", (
855 vals['model_id'], vals['field_description'], vals['ttype'],
857 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
862 # Goal: try to apply inheritance at the instanciation level and
863 # put objects in the pool var
866 def create_instance(cls, pool, cr):
867 """ Instanciate a given model.
869 This class method instanciates the class of some model (i.e. a class
870 deriving from osv or osv_memory). The class might be the class passed
871 in argument or, if it inherits from another class, a class constructed
872 by combining the two classes.
874 The ``attributes`` argument specifies which parent class attributes
877 TODO: the creation of the combined class is repeated at each call of
878 this method. This is probably unnecessary.
881 attributes = ['_columns', '_defaults', '_inherits', '_constraints',
884 parent_names = getattr(cls, '_inherit', None)
886 if isinstance(parent_names, (str, unicode)):
887 name = cls._name or parent_names
888 parent_names = [parent_names]
892 raise TypeError('_name is mandatory in case of multiple inheritance')
894 for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
895 if parent_name not in pool:
896 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
897 'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
898 parent_model = pool[parent_name]
899 if not getattr(cls, '_original_module', None) and name == parent_model._name:
900 cls._original_module = parent_model._original_module
901 parent_class = parent_model.__class__
904 new = copy.copy(getattr(parent_model, s, {}))
906 # Don't _inherit custom fields.
910 if hasattr(new, 'update'):
911 new.update(cls.__dict__.get(s, {}))
912 elif s=='_constraints':
913 for c in cls.__dict__.get(s, []):
915 for c2 in range(len(new)):
916 #For _constraints, we should check field and methods as well
917 if new[c2][2]==c[2] and (new[c2][0] == c[0] \
918 or getattr(new[c2][0],'__name__', True) == \
919 getattr(c[0],'__name__', False)):
920 # If new class defines a constraint with
921 # same function name, we let it override
930 new.extend(cls.__dict__.get(s, []))
933 # Keep links to non-inherited constraints, e.g. useful when exporting translations
934 nattr['_local_constraints'] = cls.__dict__.get('_constraints', [])
935 nattr['_local_sql_constraints'] = cls.__dict__.get('_sql_constraints', [])
937 cls = type(name, (cls, parent_class), dict(nattr, _register=False))
939 cls._local_constraints = getattr(cls, '_constraints', [])
940 cls._local_sql_constraints = getattr(cls, '_sql_constraints', [])
942 if not getattr(cls, '_original_module', None):
943 cls._original_module = cls._module
944 obj = object.__new__(cls)
946 if hasattr(obj, '_columns'):
947 # float fields are registry-dependent (digit attribute). Duplicate them to avoid issues.
948 for c, f in obj._columns.items():
949 if f._type == 'float':
950 obj._columns[c] = copy.copy(f)
952 obj.__init__(pool, cr)
956 """Register this model.
958 This doesn't create an instance but simply register the model
959 as being part of the module where it is defined.
964 # Set the module name (e.g. base, sale, accounting, ...) on the class.
965 module = cls.__module__.split('.')[0]
966 if not hasattr(cls, '_module'):
969 # Record this class in the list of models to instantiate for this module,
970 # managed by the metaclass.
971 module_model_list = MetaModel.module_to_models.setdefault(cls._module, [])
972 if cls not in module_model_list:
974 module_model_list.append(cls)
976 # Since we don't return an instance here, the __init__
977 # method won't be called.
980 def __init__(self, pool, cr):
981 """ Initialize a model and make it part of the given registry.
983 - copy the stored fields' functions in the osv_pool,
984 - update the _columns with the fields found in ir_model_fields,
985 - ensure there is a many2one for each _inherits'd parent,
986 - update the children's _columns,
987 - give a chance to each field to initialize itself.
990 pool.add(self._name, self)
993 if not self._name and not hasattr(self, '_inherit'):
994 name = type(self).__name__.split('.')[0]
995 msg = "The class %s has to have a _name attribute" % name
998 raise except_orm('ValueError', msg)
1000 if not self._description:
1001 self._description = self._name
1003 self._table = self._name.replace('.', '_')
1005 if not hasattr(self, '_log_access'):
1006 # If _log_access is not specified, it is the same value as _auto.
1007 self._log_access = getattr(self, "_auto", True)
1009 self._columns = self._columns.copy()
1010 for store_field in self._columns:
1011 f = self._columns[store_field]
1012 if hasattr(f, 'digits_change'):
1014 def not_this_field(stored_func):
1015 x, y, z, e, f, l = stored_func
1016 return x != self._name or y != store_field
1017 self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, []))
1018 if not isinstance(f, fields.function):
1024 sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, f.priority, None)}
1025 for object, aa in sm.items():
1027 (fnct, fields2, order, length) = aa
1029 (fnct, fields2, order) = aa
1032 raise except_orm('Error',
1033 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
1034 self.pool._store_function.setdefault(object, [])
1035 t = (self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)
1036 if not t in self.pool._store_function[object]:
1037 self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
1038 self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
1040 for (key, _, msg) in self._sql_constraints:
1041 self.pool._sql_error[self._table+'_'+key] = msg
1043 # Load manual fields
1045 # Check the query is already done for all modules of if we need to
1047 if self.pool.fields_by_model is not None:
1048 manual_fields = self.pool.fields_by_model.get(self._name, [])
1050 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual'))
1051 manual_fields = cr.dictfetchall()
1052 for field in manual_fields:
1053 if field['name'] in self._columns:
1056 'string': field['field_description'],
1057 'required': bool(field['required']),
1058 'readonly': bool(field['readonly']),
1059 'domain': eval(field['domain']) if field['domain'] else None,
1060 'size': field['size'] or None,
1061 'ondelete': field['on_delete'],
1062 'translate': (field['translate']),
1065 #'select': int(field['select_level'])
1068 if field['serialization_field_id']:
1069 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
1070 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
1071 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
1072 attrs.update({'relation': field['relation']})
1073 self._columns[field['name']] = fields.sparse(**attrs)
1074 elif field['ttype'] == 'selection':
1075 self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
1076 elif field['ttype'] == 'reference':
1077 self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
1078 elif field['ttype'] == 'many2one':
1079 self._columns[field['name']] = fields.many2one(field['relation'], **attrs)
1080 elif field['ttype'] == 'one2many':
1081 self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
1082 elif field['ttype'] == 'many2many':
1083 _rel1 = field['relation'].replace('.', '_')
1084 _rel2 = field['model'].replace('.', '_')
1085 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
1086 self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
1088 self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
1090 self._inherits_check()
1091 self._inherits_reload()
1092 if not self._sequence:
1093 self._sequence = self._table + '_id_seq'
1094 for k in self._defaults:
1095 assert (k in self._columns) or (k in self._inherit_fields), 'Default function defined in %s but field %s does not exist !' % (self._name, k,)
1096 for f in self._columns:
1097 self._columns[f].restart()
1100 if self.is_transient():
1101 self._transient_check_count = 0
1102 self._transient_max_count = config.get('osv_memory_count_limit')
1103 self._transient_max_hours = config.get('osv_memory_age_limit')
1104 assert self._log_access, "TransientModels must have log_access turned on, "\
1105 "in order to implement their access rights policy"
1108 if self._rec_name is not None:
1109 assert self._rec_name in self._all_columns.keys() + ['id'], "Invalid rec_name %s for model %s" % (self._rec_name, self._name)
1111 self._rec_name = 'name'
1114 def __export_row(self, cr, uid, row, fields, context=None):
1118 def check_type(field_type):
1119 if field_type == 'float':
1121 elif field_type == 'integer':
1123 elif field_type == 'boolean':
1127 def selection_field(in_field):
1128 col_obj = self.pool[in_field.keys()[0]]
1129 if f[i] in col_obj._columns.keys():
1130 return col_obj._columns[f[i]]
1131 elif f[i] in col_obj._inherits.keys():
1132 selection_field(col_obj._inherits)
1136 def _get_xml_id(self, cr, uid, r):
1137 model_data = self.pool.get('ir.model.data')
1138 data_ids = model_data.search(cr, uid, [('model', '=', r._model._name), ('res_id', '=', r['id'])])
1140 d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
1142 r = '%s.%s' % (d['module'], d['name'])
1148 n = r._model._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
1149 if not model_data.search(cr, uid, [('name', '=', n)]):
1152 model_data.create(cr, SUPERUSER_ID, {
1154 'model': r._model._name,
1156 'module': '__export__',
1162 data = map(lambda x: '', range(len(fields)))
1164 for fpos in range(len(fields)):
1174 r = _get_xml_id(self, cr, uid, r)
1177 # To display external name of selection field when its exported
1178 if f[i] in self._columns.keys():
1179 cols = self._columns[f[i]]
1180 elif f[i] in self._inherit_fields.keys():
1181 cols = selection_field(self._inherits)
1182 if cols and cols._type == 'selection':
1183 sel_list = cols.selection
1184 if r and type(sel_list) == type([]):
1185 r = [x[1] for x in sel_list if r==x[0]]
1186 r = r and r[0] or False
1188 if f[i] in self._columns:
1189 r = check_type(self._columns[f[i]]._type)
1190 elif f[i] in self._inherit_fields:
1191 r = check_type(self._inherit_fields[f[i]][2]._type)
1192 data[fpos] = r or False
1194 if isinstance(r, (browse_record_list, list)):
1196 fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \
1199 if [x for x in fields2 if x]:
1201 done.append(fields2)
1202 if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'):
1203 data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r])
1207 lines2 = row2._model.__export_row(cr, uid, row2, fields2,
1210 for fpos2 in range(len(fields)):
1211 if lines2 and lines2[0][fpos2]:
1212 data[fpos2] = lines2[0][fpos2]
1216 name_relation = self.pool[rr._table_name]._rec_name
1217 if isinstance(rr[name_relation], browse_record):
1218 rr = rr[name_relation]
1219 rr_name = self.pool[rr._table_name].name_get(cr, uid, [rr.id], context=context)
1220 rr_name = rr_name and rr_name[0] and rr_name[0][1] or ''
1221 dt += tools.ustr(rr_name or '') + ','
1222 data[fpos] = dt[:-1]
1231 if isinstance(r, browse_record):
1232 r = self.pool[r._table_name].name_get(cr, uid, [r.id], context=context)
1233 r = r and r[0] and r[0][1] or ''
1234 data[fpos] = tools.ustr(r or '')
1235 return [data] + lines
1237 def export_data(self, cr, uid, ids, fields_to_export, context=None):
1239 Export fields for selected objects
1241 :param cr: database cursor
1242 :param uid: current user id
1243 :param ids: list of ids
1244 :param fields_to_export: list of fields
1245 :param context: context arguments, like lang, time zone
1246 :rtype: dictionary with a *datas* matrix
1248 This method is used when exporting data via client menu
1253 cols = self._columns.copy()
1254 for f in self._inherit_fields:
1255 cols.update({f: self._inherit_fields[f][2]})
1256 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
1258 for row in self.browse(cr, uid, ids, context):
1259 datas += self.__export_row(cr, uid, row, fields_to_export, context)
1260 return {'datas': datas}
1262 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
1265 Use :meth:`~load` instead
1267 Import given data in given module
1269 This method is used when importing data via client menu.
1271 Example of fields to import for a sale.order::
1274 partner_id, (=name_search)
1275 order_line/.id, (=database_id)
1277 order_line/product_id/id, (=xml id)
1278 order_line/price_unit,
1279 order_line/product_uom_qty,
1280 order_line/product_uom/id (=xml_id)
1282 This method returns a 4-tuple with the following structure::
1284 (return_code, errored_resource, error_message, unused)
1286 * The first item is a return code, it is ``-1`` in case of
1287 import error, or the last imported row number in case of success
1288 * The second item contains the record data dict that failed to import
1289 in case of error, otherwise it's 0
1290 * The third item contains an error message string in case of error,
1292 * The last item is currently unused, with no specific semantics
1294 :param fields: list of fields to import
1295 :param datas: data to import
1296 :param mode: 'init' or 'update' for record creation
1297 :param current_module: module name
1298 :param noupdate: flag for record creation
1299 :param filename: optional file to store partial import state for recovery
1300 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1301 :rtype: (int, dict or 0, str or 0, str or 0)
1303 context = dict(context) if context is not None else {}
1304 context['_import_current_module'] = current_module
1306 fields = map(fix_import_export_id_paths, fields)
1307 ir_model_data_obj = self.pool.get('ir.model.data')
1310 if m['type'] == 'error':
1311 raise Exception(m['message'])
1313 if config.get('import_partial') and filename:
1314 with open(config.get('import_partial'), 'rb') as partial_import_file:
1315 data = pickle.load(partial_import_file)
1316 position = data.get(filename, 0)
1320 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1321 self._extract_records(cr, uid, fields, datas,
1322 context=context, log=log),
1323 context=context, log=log):
1324 ir_model_data_obj._update(cr, uid, self._name,
1325 current_module, res, mode=mode, xml_id=xml_id,
1326 noupdate=noupdate, res_id=res_id, context=context)
1327 position = info.get('rows', {}).get('to', 0) + 1
1328 if config.get('import_partial') and filename and (not (position%100)):
1329 with open(config.get('import_partial'), 'rb') as partial_import:
1330 data = pickle.load(partial_import)
1331 data[filename] = position
1332 with open(config.get('import_partial'), 'wb') as partial_import:
1333 pickle.dump(data, partial_import)
1334 if context.get('defer_parent_store_computation'):
1335 self._parent_store_compute(cr)
1337 except Exception, e:
1339 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1341 if context.get('defer_parent_store_computation'):
1342 self._parent_store_compute(cr)
1343 return position, 0, 0, 0
1345 def load(self, cr, uid, fields, data, context=None):
1347 Attempts to load the data matrix, and returns a list of ids (or
1348 ``False`` if there was an error and no id could be generated) and a
1351 The ids are those of the records created and saved (in database), in
1352 the same order they were extracted from the file. They can be passed
1353 directly to :meth:`~read`
1355 :param fields: list of fields to import, at the same index as the corresponding data
1356 :type fields: list(str)
1357 :param data: row-major matrix of data to import
1358 :type data: list(list(str))
1359 :param dict context:
1360 :returns: {ids: list(int)|False, messages: [Message]}
1362 cr.execute('SAVEPOINT model_load')
1365 fields = map(fix_import_export_id_paths, fields)
1366 ModelData = self.pool['ir.model.data'].clear_caches()
1368 fg = self.fields_get(cr, uid, context=context)
1375 for id, xid, record, info in self._convert_records(cr, uid,
1376 self._extract_records(cr, uid, fields, data,
1377 context=context, log=messages.append),
1378 context=context, log=messages.append):
1380 cr.execute('SAVEPOINT model_load_save')
1381 except psycopg2.InternalError, e:
1382 # broken transaction, exit and hope the source error was
1384 if not any(message['type'] == 'error' for message in messages):
1385 messages.append(dict(info, type='error',message=
1386 u"Unknown database error: '%s'" % e))
1389 ids.append(ModelData._update(cr, uid, self._name,
1390 current_module, record, mode=mode, xml_id=xid,
1391 noupdate=noupdate, res_id=id, context=context))
1392 cr.execute('RELEASE SAVEPOINT model_load_save')
1393 except psycopg2.Warning, e:
1394 messages.append(dict(info, type='warning', message=str(e)))
1395 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1396 except psycopg2.Error, e:
1397 messages.append(dict(
1399 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1400 # Failed to write, log to messages, rollback savepoint (to
1401 # avoid broken transaction) and keep going
1402 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1403 except Exception, e:
1404 message = (_('Unknown error during import:') +
1405 u' %s: %s' % (type(e), unicode(e)))
1406 moreinfo = _('Resolve other errors first')
1407 messages.append(dict(info, type='error',
1410 # Failed for some reason, perhaps due to invalid data supplied,
1411 # rollback savepoint and keep going
1412 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1413 if any(message['type'] == 'error' for message in messages):
1414 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1416 return {'ids': ids, 'messages': messages}
1417 def _extract_records(self, cr, uid, fields_, data,
1418 context=None, log=lambda a: None):
1419 """ Generates record dicts from the data sequence.
1421 The result is a generator of dicts mapping field names to raw
1422 (unconverted, unvalidated) values.
1424 For relational fields, if sub-fields were provided the value will be
1425 a list of sub-records
1427 The following sub-fields may be set on the record (by key):
1428 * None is the name_get for the record (to use with name_create/name_search)
1429 * "id" is the External ID for the record
1430 * ".id" is the Database ID for the record
1432 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1433 # Fake columns to avoid special cases in extractor
1434 columns[None] = fields.char('rec_name')
1435 columns['id'] = fields.char('External ID')
1436 columns['.id'] = fields.integer('Database ID')
1438 # m2o fields can't be on multiple lines so exclude them from the
1439 # is_relational field rows filter, but special-case it later on to
1440 # be handled with relational fields (as it can have subfields)
1441 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1442 get_o2m_values = itemgetter_tuple(
1443 [index for index, field in enumerate(fields_)
1444 if columns[field[0]]._type == 'one2many'])
1445 get_nono2m_values = itemgetter_tuple(
1446 [index for index, field in enumerate(fields_)
1447 if columns[field[0]]._type != 'one2many'])
1448 # Checks if the provided row has any non-empty non-relational field
1449 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1450 return any(g(row)) and not any(f(row))
1454 if index >= len(data): return
1457 # copy non-relational fields to record dict
1458 record = dict((field[0], value)
1459 for field, value in itertools.izip(fields_, row)
1460 if not is_relational(field[0]))
1462 # Get all following rows which have relational values attached to
1463 # the current record (no non-relational values)
1464 record_span = itertools.takewhile(
1465 only_o2m_values, itertools.islice(data, index + 1, None))
1466 # stitch record row back on for relational fields
1467 record_span = list(itertools.chain([row], record_span))
1468 for relfield in set(
1469 field[0] for field in fields_
1470 if is_relational(field[0])):
1471 column = columns[relfield]
1472 # FIXME: how to not use _obj without relying on fields_get?
1473 Model = self.pool[column._obj]
1475 # get only cells for this sub-field, should be strictly
1476 # non-empty, field path [None] is for name_get column
1477 indices, subfields = zip(*((index, field[1:] or [None])
1478 for index, field in enumerate(fields_)
1479 if field[0] == relfield))
1481 # return all rows which have at least one value for the
1482 # subfields of relfield
1483 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1484 record[relfield] = [subrecord
1485 for subrecord, _subinfo in Model._extract_records(
1486 cr, uid, subfields, relfield_data,
1487 context=context, log=log)]
1489 yield record, {'rows': {
1491 'to': index + len(record_span) - 1
1493 index += len(record_span)
1494 def _convert_records(self, cr, uid, records,
1495 context=None, log=lambda a: None):
1496 """ Converts records from the source iterable (recursive dicts of
1497 strings) into forms which can be written to the database (via
1498 self.create or (ir.model.data)._update)
1500 :returns: a list of triplets of (id, xid, record)
1501 :rtype: list((int|None, str|None, dict))
1503 if context is None: context = {}
1504 Converter = self.pool['ir.fields.converter']
1505 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1506 Translation = self.pool['ir.translation']
1508 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1509 context.get('lang'))
1511 for f, column in columns.iteritems())
1513 convert = Converter.for_model(cr, uid, self, context=context)
1515 def _log(base, field, exception):
1516 type = 'warning' if isinstance(exception, Warning) else 'error'
1517 # logs the logical (not human-readable) field name for automated
1518 # processing of response, but injects human readable in message
1519 record = dict(base, type=type, field=field,
1520 message=unicode(exception.args[0]) % base)
1521 if len(exception.args) > 1 and exception.args[1]:
1522 record.update(exception.args[1])
1525 stream = CountingStream(records)
1526 for record, extras in stream:
1529 # name_get/name_create
1530 if None in record: pass
1537 dbid = int(record['.id'])
1539 # in case of overridden id column
1540 dbid = record['.id']
1541 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1544 record=stream.index,
1546 message=_(u"Unknown database identifier '%s'") % dbid))
1549 converted = convert(record, lambda field, err:\
1550 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1552 yield dbid, xid, converted, dict(extras, record=stream.index)
1554 def get_invalid_fields(self, cr, uid):
1555 return list(self._invalids)
1557 def _validate(self, cr, uid, ids, context=None):
1558 context = context or {}
1559 lng = context.get('lang')
1560 trans = self.pool.get('ir.translation')
1562 for constraint in self._constraints:
1563 fun, msg, fields = constraint
1565 # We don't pass around the context here: validation code
1566 # must always yield the same results.
1567 valid = fun(self, cr, uid, ids)
1569 except Exception, e:
1570 _logger.debug('Exception while validating constraint', exc_info=True)
1572 extra_error = tools.ustr(e)
1574 # Check presence of __call__ directly instead of using
1575 # callable() because it will be deprecated as of Python 3.0
1576 if hasattr(msg, '__call__'):
1577 tmp_msg = msg(self, cr, uid, ids, context=context)
1578 if isinstance(tmp_msg, tuple):
1579 tmp_msg, params = tmp_msg
1580 translated_msg = tmp_msg % params
1582 translated_msg = tmp_msg
1584 translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
1586 translated_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1588 _("The field(s) `%s` failed against a constraint: %s") % (', '.join(fields), translated_msg)
1590 self._invalids.update(fields)
1592 raise except_orm('ValidateError', '\n'.join(error_msgs))
1594 self._invalids.clear()
1596 def default_get(self, cr, uid, fields_list, context=None):
1598 Returns default values for the fields in fields_list.
1600 :param fields_list: list of fields to get the default values for (example ['field1', 'field2',])
1601 :type fields_list: list
1602 :param context: optional context dictionary - it may contains keys for specifying certain options
1603 like ``context_lang`` (language) or ``context_tz`` (timezone) to alter the results of the call.
1604 It may contain keys in the form ``default_XXX`` (where XXX is a field name), to set
1605 or override a default value for a field.
1606 A special ``bin_size`` boolean flag may also be passed in the context to request the
1607 value of all fields.binary columns to be returned as the size of the binary instead of its
1608 contents. This can also be selectively overriden by passing a field-specific flag
1609 in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
1610 Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
1611 :return: dictionary of the default values (set on the object model class, through user preferences, or in the context)
1613 # trigger view init hook
1614 self.view_init(cr, uid, fields_list, context)
1620 # get the default values for the inherited fields
1621 for t in self._inherits.keys():
1622 defaults.update(self.pool[t].default_get(cr, uid, fields_list, context))
1624 # get the default values defined in the object
1625 for f in fields_list:
1626 if f in self._defaults:
1627 if callable(self._defaults[f]):
1628 defaults[f] = self._defaults[f](self, cr, uid, context)
1630 defaults[f] = self._defaults[f]
1632 fld_def = ((f in self._columns) and self._columns[f]) \
1633 or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
1636 if isinstance(fld_def, fields.property):
1637 property_obj = self.pool.get('ir.property')
1638 prop_value = property_obj.get(cr, uid, f, self._name, context=context)
1640 if isinstance(prop_value, (browse_record, browse_null)):
1641 defaults[f] = prop_value.id
1643 defaults[f] = prop_value
1645 if f not in defaults:
1648 # get the default values set by the user and override the default
1649 # values defined in the object
1650 ir_values_obj = self.pool.get('ir.values')
1651 res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
1652 for id, field, field_value in res:
1653 if field in fields_list:
1654 fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
1655 if fld_def._type == 'many2one':
1656 obj = self.pool[fld_def._obj]
1657 if not obj.search(cr, uid, [('id', '=', field_value or False)]):
1659 if fld_def._type == 'many2many':
1660 obj = self.pool[fld_def._obj]
1662 for i in range(len(field_value or [])):
1663 if not obj.search(cr, uid, [('id', '=',
1666 field_value2.append(field_value[i])
1667 field_value = field_value2
1668 if fld_def._type == 'one2many':
1669 obj = self.pool[fld_def._obj]
1671 for i in range(len(field_value or [])):
1672 field_value2.append({})
1673 for field2 in field_value[i]:
1674 if field2 in obj._columns.keys() and obj._columns[field2]._type == 'many2one':
1675 obj2 = self.pool[obj._columns[field2]._obj]
1676 if not obj2.search(cr, uid,
1677 [('id', '=', field_value[i][field2])]):
1679 elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type == 'many2one':
1680 obj2 = self.pool[obj._inherit_fields[field2][2]._obj]
1681 if not obj2.search(cr, uid,
1682 [('id', '=', field_value[i][field2])]):
1684 # TODO add test for many2many and one2many
1685 field_value2[i][field2] = field_value[i][field2]
1686 field_value = field_value2
1687 defaults[field] = field_value
1689 # get the default values from the context
1690 for key in context or {}:
1691 if key.startswith('default_') and (key[8:] in fields_list):
1692 defaults[key[8:]] = context[key]
1695 def fields_get_keys(self, cr, user, context=None):
1696 res = self._columns.keys()
1697 # TODO I believe this loop can be replace by
1698 # res.extend(self._inherit_fields.key())
1699 for parent in self._inherits:
1700 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1703 def _rec_name_fallback(self, cr, uid, context=None):
1704 rec_name = self._rec_name
1705 if rec_name not in self._columns:
1706 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1710 # Overload this method if you need a window title which depends on the context
1712 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1715 def user_has_groups(self, cr, uid, groups, context=None):
1716 """Return true if the user is at least member of one of the groups
1717 in groups_str. Typically used to resolve ``groups`` attribute
1718 in view and model definitions.
1720 :param str groups: comma-separated list of fully-qualified group
1721 external IDs, e.g.: ``base.group_user,base.group_system``
1722 :return: True if the current user is a member of one of the
1725 return any([self.pool.get('res.users').has_group(cr, uid, group_ext_id)
1726 for group_ext_id in groups.split(',')])
1728 def _get_default_form_view(self, cr, user, context=None):
1729 """ Generates a default single-line form view using all fields
1730 of the current model except the m2m and o2m ones.
1732 :param cr: database cursor
1733 :param int user: user id
1734 :param dict context: connection context
1735 :returns: a form view as an lxml document
1736 :rtype: etree._Element
1738 view = etree.Element('form', string=self._description)
1739 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
1740 for field, descriptor in self.fields_get(cr, user, context=context).iteritems():
1741 if descriptor['type'] in ('one2many', 'many2many'):
1743 etree.SubElement(view, 'field', name=field)
1744 if descriptor['type'] == 'text':
1745 etree.SubElement(view, 'newline')
1748 def _get_default_search_view(self, cr, user, context=None):
1749 """ Generates a single-field search view, based on _rec_name.
1751 :param cr: database cursor
1752 :param int user: user id
1753 :param dict context: connection context
1754 :returns: a tree view as an lxml document
1755 :rtype: etree._Element
1757 view = etree.Element('search', string=self._description)
1758 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1761 def _get_default_tree_view(self, cr, user, context=None):
1762 """ Generates a single-field tree view, based on _rec_name.
1764 :param cr: database cursor
1765 :param int user: user id
1766 :param dict context: connection context
1767 :returns: a tree view as an lxml document
1768 :rtype: etree._Element
1770 view = etree.Element('tree', string=self._description)
1771 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1774 def _get_default_calendar_view(self, cr, user, context=None):
1775 """ Generates a default calendar view by trying to infer
1776 calendar fields from a number of pre-set attribute names
1778 :param cr: database cursor
1779 :param int user: user id
1780 :param dict context: connection context
1781 :returns: a calendar view
1782 :rtype: etree._Element
1784 def set_first_of(seq, in_, to):
1785 """Sets the first value of ``seq`` also found in ``in_`` to
1786 the ``to`` attribute of the view being closed over.
1788 Returns whether it's found a suitable value (and set it on
1789 the attribute) or not
1797 view = etree.Element('calendar', string=self._description)
1798 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1800 if self._date_name not in self._columns:
1802 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1803 if dt in self._columns:
1804 self._date_name = dt
1809 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1810 view.set('date_start', self._date_name)
1812 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1813 self._columns, 'color')
1815 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1816 self._columns, 'date_stop'):
1817 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1818 self._columns, 'date_delay'):
1820 _('Invalid Object Architecture!'),
1821 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1825 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1827 Get the detailed composition of the requested view like fields, model, view architecture
1829 :param view_id: id of the view or None
1830 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1831 :param toolbar: true to include contextual actions
1832 :param submenu: deprecated
1833 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1834 :raise AttributeError:
1835 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1836 * if some tag other than 'position' is found in parent view
1837 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1841 View = self.pool['ir.ui.view']
1844 'model': self._name,
1845 'field_parent': False,
1848 # try to find a view_id if none provided
1850 # <view_type>_view_ref in context can be used to overrride the default view
1851 view_ref_key = view_type + '_view_ref'
1852 view_ref = context.get(view_ref_key)
1855 module, view_ref = view_ref.split('.', 1)
1856 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1857 view_ref_res = cr.fetchone()
1859 view_id = view_ref_res[0]
1861 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1862 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1866 # otherwise try to find the lowest priority matching ir.ui.view
1867 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1869 # context for post-processing might be overriden
1872 # read the view with inherited views applied
1873 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1874 result['arch'] = root_view['arch']
1875 result['name'] = root_view['name']
1876 result['type'] = root_view['type']
1877 result['view_id'] = root_view['id']
1878 result['field_parent'] = root_view['field_parent']
1879 # override context fro postprocessing
1880 if root_view.get('model') != self._name:
1881 ctx = dict(context, base_model_name=root_view.get('model'))
1883 # fallback on default views methods if no ir.ui.view could be found
1885 get_func = getattr(self, '_get_default_%s_view' % view_type)
1886 arch_etree = get_func(cr, uid, context)
1887 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1888 result['type'] = view_type
1889 result['name'] = 'default'
1890 except AttributeError:
1891 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1893 # Apply post processing, groups and modifiers etc...
1894 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1895 result['arch'] = xarch
1896 result['fields'] = xfields
1898 # Add related action information if aksed
1900 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1906 ir_values_obj = self.pool.get('ir.values')
1907 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1908 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1909 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1910 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1911 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1912 #When multi="True" set it will display only in More of the list view
1913 resrelate = [clean(action) for action in resrelate
1914 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1916 for x in itertools.chain(resprint, resaction, resrelate):
1917 x['string'] = x['name']
1919 result['toolbar'] = {
1921 'action': resaction,
1926 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1927 return self.pool['ir.ui.view'].postprocess_and_fields(
1928 cr, uid, self._name, node, view_id, context=context)
1930 def search_count(self, cr, user, args, context=None):
1931 res = self.search(cr, user, args, context=context, count=True)
1932 if isinstance(res, list):
1936 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1938 Search for records based on a search domain.
1940 :param cr: database cursor
1941 :param user: current user id
1942 :param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
1943 :param offset: optional number of results to skip in the returned values (default: 0)
1944 :param limit: optional max number of records to return (default: **None**)
1945 :param order: optional columns to sort by (default: self._order=id )
1946 :param context: optional context arguments, like lang, time zone
1947 :type context: dictionary
1948 :param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
1949 :return: id or list of ids of records matching the criteria
1950 :rtype: integer or list of integers
1951 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1953 **Expressing a search domain (args)**
1955 Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
1957 * **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
1958 * **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
1959 The semantics of most of these operators are obvious.
1960 The ``child_of`` operator will look for records who are children or grand-children of a given record,
1961 according to the semantics of this model (i.e following the relationship field named by
1962 ``self._parent_name``, by default ``parent_id``.
1963 * **value** must be a valid value to compare with the values of **field_name**, depending on its type.
1965 Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
1966 These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
1967 Be very careful about this when you combine them the first time.
1969 Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
1971 [('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
1973 The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
1975 (name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
1978 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1980 def name_get(self, cr, user, ids, context=None):
1981 """Returns the preferred display value (text representation) for the records with the
1982 given ``ids``. By default this will be the value of the ``name`` column, unless
1983 the model implements a custom behavior.
1984 Can sometimes be seen as the inverse function of :meth:`~.name_search`, but it is not
1988 :return: list of pairs ``(id,text_repr)`` for all records with the given ``ids``.
1992 if isinstance(ids, (int, long)):
1995 if self._rec_name in self._all_columns:
1996 rec_name_column = self._all_columns[self._rec_name].column
1997 return [(r['id'], rec_name_column.as_display_name(cr, user, self, r[self._rec_name], context=context))
1998 for r in self.read(cr, user, ids, [self._rec_name],
1999 load='_classic_write', context=context)]
2000 return [(id, "%s,%s" % (self._name, id)) for id in ids]
2002 def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
2003 """Search for records that have a display name matching the given ``name`` pattern if compared
2004 with the given ``operator``, while also matching the optional search domain (``args``).
2005 This is used for example to provide suggestions based on a partial value for a relational
2007 Sometimes be seen as the inverse function of :meth:`~.name_get`, but it is not
2010 This method is equivalent to calling :meth:`~.search` with a search domain based on ``name``
2011 and then :meth:`~.name_get` on the result of the search.
2013 :param list args: optional search domain (see :meth:`~.search` for syntax),
2014 specifying further restrictions
2015 :param str operator: domain operator for matching the ``name`` pattern, such as ``'like'``
2017 :param int limit: optional max number of records to return
2019 :return: list of pairs ``(id,text_repr)`` for all matching records.
2021 return self._name_search(cr, user, name, args, operator, context, limit)
2023 def name_create(self, cr, uid, name, context=None):
2024 """Creates a new record by calling :meth:`~.create` with only one
2025 value provided: the name of the new record (``_rec_name`` field).
2026 The new record will also be initialized with any default values applicable
2027 to this model, or provided through the context. The usual behavior of
2028 :meth:`~.create` applies.
2029 Similarly, this method may raise an exception if the model has multiple
2030 required fields and some do not have default values.
2032 :param name: name of the record to create
2035 :return: the :meth:`~.name_get` pair value for the newly-created record.
2037 rec_id = self.create(cr, uid, {self._rec_name: name}, context)
2038 return self.name_get(cr, uid, [rec_id], context)[0]
2040 # private implementation of name_search, allows passing a dedicated user for the name_get part to
2041 # solve some access rights issues
2042 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
2048 # optimize out the default criterion of ``ilike ''`` that matches everything
2049 if not (name == '' and operator == 'ilike'):
2050 args += [(self._rec_name, operator, name)]
2051 access_rights_uid = name_get_uid or user
2052 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
2053 res = self.name_get(cr, access_rights_uid, ids, context)
2056 def read_string(self, cr, uid, id, langs, fields=None, context=None):
2059 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
2061 fields = self._columns.keys() + self._inherit_fields.keys()
2062 #FIXME: collect all calls to _get_source into one SQL call.
2064 res[lang] = {'code': lang}
2066 if f in self._columns:
2067 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
2069 res[lang][f] = res_trans
2071 res[lang][f] = self._columns[f].string
2072 for table in self._inherits:
2073 cols = intersect(self._inherit_fields.keys(), fields)
2074 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
2077 res[lang]['code'] = lang
2078 for f in res2[lang]:
2079 res[lang][f] = res2[lang][f]
2082 def write_string(self, cr, uid, id, langs, vals, context=None):
2083 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
2084 #FIXME: try to only call the translation in one SQL
2087 if field in self._columns:
2088 src = self._columns[field].string
2089 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
2090 for table in self._inherits:
2091 cols = intersect(self._inherit_fields.keys(), vals)
2093 self.pool[table].write_string(cr, uid, id, langs, vals, context)
2096 def _add_missing_default_values(self, cr, uid, values, context=None):
2097 missing_defaults = []
2098 avoid_tables = [] # avoid overriding inherited values when parent is set
2099 for tables, parent_field in self._inherits.items():
2100 if parent_field in values:
2101 avoid_tables.append(tables)
2102 for field in self._columns.keys():
2103 if not field in values:
2104 missing_defaults.append(field)
2105 for field in self._inherit_fields.keys():
2106 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
2107 missing_defaults.append(field)
2109 if len(missing_defaults):
2110 # override defaults with the provided values, never allow the other way around
2111 defaults = self.default_get(cr, uid, missing_defaults, context)
2113 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
2114 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
2115 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
2116 defaults[dv] = [(6, 0, defaults[dv])]
2117 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
2118 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
2119 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
2120 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
2121 defaults.update(values)
2125 def clear_caches(self):
2126 """ Clear the caches
2128 This clears the caches associated to methods decorated with
2129 ``tools.ormcache`` or ``tools.ormcache_multi``.
2132 getattr(self, '_ormcache')
2134 self.pool._any_cache_cleared = True
2135 except AttributeError:
2139 def _read_group_fill_results(self, cr, uid, domain, groupby, groupby_list, aggregated_fields,
2140 read_group_result, read_group_order=None, context=None):
2141 """Helper method for filling in empty groups for all possible values of
2142 the field being grouped by"""
2144 # self._group_by_full should map groupable fields to a method that returns
2145 # a list of all aggregated values that we want to display for this field,
2146 # in the form of a m2o-like pair (key,label).
2147 # This is useful to implement kanban views for instance, where all columns
2148 # should be displayed even if they don't contain any record.
2150 # Grab the list of all groups that should be displayed, including all present groups
2151 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
2152 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
2153 read_group_order=read_group_order,
2154 access_rights_uid=openerp.SUPERUSER_ID,
2157 result_template = dict.fromkeys(aggregated_fields, False)
2158 result_template[groupby + '_count'] = 0
2159 if groupby_list and len(groupby_list) > 1:
2160 result_template['__context'] = {'group_by': groupby_list[1:]}
2162 # Merge the left_side (current results as dicts) with the right_side (all
2163 # possible values as m2o pairs). Both lists are supposed to be using the
2164 # same ordering, and can be merged in one pass.
2168 if len(groupby_list) < 2 and context.get('group_by_no_leaf'):
2171 count_attr = groupby
2172 count_attr += '_count'
2174 def append_left(left_side):
2175 grouped_value = left_side[groupby] and left_side[groupby][0]
2176 if not grouped_value in known_values:
2177 result.append(left_side)
2178 known_values[grouped_value] = left_side
2180 known_values[grouped_value].update({count_attr: left_side[count_attr]})
2181 def append_right(right_side):
2182 grouped_value = right_side[0]
2183 if not grouped_value in known_values:
2184 line = dict(result_template)
2185 line[groupby] = right_side
2186 line['__domain'] = [(groupby,'=',grouped_value)] + domain
2188 known_values[grouped_value] = line
2189 while read_group_result or all_groups:
2190 left_side = read_group_result[0] if read_group_result else None
2191 right_side = all_groups[0] if all_groups else None
2192 assert left_side is None or left_side[groupby] is False \
2193 or isinstance(left_side[groupby], (tuple,list)), \
2194 'M2O-like pair expected, got %r' % left_side[groupby]
2195 assert right_side is None or isinstance(right_side, (tuple,list)), \
2196 'M2O-like pair expected, got %r' % right_side
2197 if left_side is None:
2198 append_right(all_groups.pop(0))
2199 elif right_side is None:
2200 append_left(read_group_result.pop(0))
2201 elif left_side[groupby] == right_side:
2202 append_left(read_group_result.pop(0))
2203 all_groups.pop(0) # discard right_side
2204 elif not left_side[groupby] or not left_side[groupby][0]:
2205 # left side == "Undefined" entry, not present on right_side
2206 append_left(read_group_result.pop(0))
2208 append_right(all_groups.pop(0))
2212 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
2215 def _read_group_prepare(self, orderby, aggregated_fields, groupby, qualified_groupby_field, query, groupby_type=None):
2217 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
2218 to the query if order should be computed against m2o field.
2219 :param orderby: the orderby definition in the form "%(field)s %(order)s"
2220 :param aggregated_fields: list of aggregated fields in the query
2221 :param groupby: the current groupby field name
2222 :param qualified_groupby_field: the fully qualified SQL name for the grouped field
2223 :param osv.Query query: the query under construction
2224 :param groupby_type: the type of the grouped field
2225 :return: (groupby_terms, orderby_terms)
2228 groupby_terms = [qualified_groupby_field] if groupby else []
2230 return groupby_terms, orderby_terms
2232 self._check_qorder(orderby)
2233 for order_part in orderby.split(','):
2234 order_split = order_part.split()
2235 order_field = order_split[0]
2236 if order_field == groupby:
2237 if groupby_type == 'many2one':
2238 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
2240 orderby_terms.append(order_clause)
2241 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
2243 orderby_terms.append(order_part)
2244 elif order_field in aggregated_fields:
2245 orderby_terms.append(order_part)
2247 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
2248 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
2249 self._name, order_part)
2250 return groupby_terms, orderby_terms
2252 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
2254 Get the list of records in list view grouped by the given ``groupby`` fields
2256 :param cr: database cursor
2257 :param uid: current user id
2258 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2259 :param list fields: list of fields present in the list view specified on the object
2260 :param list groupby: list of groupby descriptions by which the records will be grouped.
2261 A groupby description is either a field (then it will be grouped by that field)
2262 or a string 'field:groupby_function'. Right now, the only functions supported
2263 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2264 date/datetime fields.
2265 :param int offset: optional number of records to skip
2266 :param int limit: optional max number of records to return
2267 :param dict context: context arguments, like lang, time zone.
2268 :param list orderby: optional ``order by`` specification, for
2269 overriding the natural sort ordering of the
2270 groups, see also :py:meth:`~osv.osv.osv.search`
2271 (supported only for many2one fields currently)
2272 :return: list of dictionaries(one dictionary for each record) containing:
2274 * the values of fields grouped by the fields in ``groupby`` argument
2275 * __domain: list of tuples specifying the search criteria
2276 * __context: dictionary with argument like ``groupby``
2277 :rtype: [{'field_name_1': value, ...]
2278 :raise AccessError: * if user has no read rights on the requested object
2279 * if user tries to bypass access rules for read on the requested object
2282 context = context or {}
2283 self.check_access_rights(cr, uid, 'read')
2285 fields = self._columns.keys()
2287 query = self._where_calc(cr, uid, domain, context=context)
2288 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2290 # Take care of adding join(s) if groupby is an '_inherits'ed field
2291 groupby_list = groupby
2292 qualified_groupby_field = groupby
2294 if isinstance(groupby, list):
2295 groupby = groupby[0]
2296 splitted_groupby = groupby.split(':')
2297 if len(splitted_groupby) == 2:
2298 groupby = splitted_groupby[0]
2299 groupby_function = splitted_groupby[1]
2301 groupby_function = False
2302 qualified_groupby_field = self._inherits_join_calc(groupby, query)
2305 assert not groupby or groupby in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2306 groupby_def = self._columns.get(groupby) or (self._inherit_fields.get(groupby) and self._inherit_fields.get(groupby)[2])
2307 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2309 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
2310 fget = self.fields_get(cr, uid, fields)
2311 group_by_params = {}
2315 if fget.get(groupby):
2316 groupby_type = fget[groupby]['type']
2317 if groupby_type in ('date', 'datetime'):
2318 if groupby_function:
2319 interval = groupby_function
2323 if interval == 'day':
2324 display_format = 'dd MMM YYYY'
2325 elif interval == 'week':
2326 display_format = "'W'w YYYY"
2327 elif interval == 'month':
2328 display_format = 'MMMM YYYY'
2329 elif interval == 'quarter':
2330 display_format = 'QQQ YYYY'
2331 elif interval == 'year':
2332 display_format = 'YYYY'
2334 if groupby_type == 'datetime' and context.get('tz') in pytz.all_timezones:
2335 # Convert groupby result to user TZ to avoid confusion!
2336 # PostgreSQL is compatible with all pytz timezone names, so we can use them
2337 # directly for conversion, starting with timestamps stored in UTC.
2338 timezone = context.get('tz', 'UTC')
2339 qualified_groupby_field = "timezone('%s', timezone('UTC',%s))" % (timezone, qualified_groupby_field)
2340 qualified_groupby_field = "date_trunc('%s', %s)" % (interval, qualified_groupby_field)
2341 elif groupby_type == 'boolean':
2342 qualified_groupby_field = "coalesce(%s,false)" % qualified_groupby_field
2343 select_terms.append("%s as %s " % (qualified_groupby_field, groupby))
2345 # Don't allow arbitrary values, as this would be a SQL injection vector!
2346 raise except_orm(_('Invalid group_by'),
2347 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(groupby,))
2349 aggregated_fields = [
2351 if f not in ('id', 'sequence', groupby)
2352 if fget[f]['type'] in ('integer', 'float')
2353 if (f in self._all_columns and getattr(self._all_columns[f].column, '_classic_write'))]
2354 for f in aggregated_fields:
2355 group_operator = fget[f].get('group_operator', 'sum')
2356 qualified_field = self._inherits_join_calc(f, query)
2357 select_terms.append("%s(%s) AS %s" % (group_operator, qualified_field, f))
2359 order = orderby or groupby or ''
2360 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, groupby, qualified_groupby_field, query, groupby_type)
2362 from_clause, where_clause, where_clause_params = query.get_sql()
2363 if len(groupby_list) < 2 and context.get('group_by_no_leaf'):
2366 count_field = groupby
2368 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2369 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2372 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count
2381 'table': self._table,
2382 'count_field': count_field,
2383 'extra_fields': prefix_terms(',', select_terms),
2384 'from': from_clause,
2385 'where': prefix_term('WHERE', where_clause),
2386 'groupby': prefix_terms('GROUP BY', groupby_terms),
2387 'orderby': prefix_terms('ORDER BY', orderby_terms),
2388 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2389 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2391 cr.execute(query, where_clause_params)
2393 fetched_data = cr.dictfetchall()
2396 for r in fetched_data:
2397 for fld, val in r.items():
2398 if val is None: r[fld] = False
2399 alldata[r['id']] = r
2400 data_ids.append(r['id'])
2404 data = self.read(cr, uid, data_ids, [groupby], context=context)
2405 # restore order of the search as read() uses the default _order (this is only for groups, so the footprint of data should be small):
2406 data_dict = dict((d['id'], d[groupby] ) for d in data)
2407 result = [{'id': i, groupby: data_dict[i]} for i in data_ids]
2409 result = [{'id': i} for i in data_ids]
2413 d['__domain'] = [(groupby, '=', alldata[d['id']][groupby] or False)] + domain
2414 if not isinstance(groupby_list, (str, unicode)):
2415 if groupby or not context.get('group_by_no_leaf', False):
2416 d['__context'] = {'group_by': groupby_list[1:]}
2417 if groupby and groupby in fget:
2418 groupby_type = fget[groupby]['type']
2419 if d[groupby] and groupby_type in ('date', 'datetime'):
2420 groupby_datetime = alldata[d['id']][groupby]
2421 if isinstance(groupby_datetime, basestring):
2422 _default = datetime.datetime(1970, 1, 1) # force starts of month
2423 groupby_datetime = dateutil.parser.parse(groupby_datetime, default=_default)
2424 tz_convert = groupby_type == 'datetime' and context.get('tz') in pytz.all_timezones
2426 groupby_datetime = pytz.timezone(context['tz']).localize(groupby_datetime)
2427 d[groupby] = babel.dates.format_date(
2428 groupby_datetime, format=display_format, locale=context.get('lang', 'en_US'))
2429 domain_dt_begin = groupby_datetime
2430 if interval == 'quarter':
2431 domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(months=3)
2432 elif interval == 'month':
2433 domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(months=1)
2434 elif interval == 'week':
2435 domain_dt_end = groupby_datetime + datetime.timedelta(days=7)
2436 elif interval == 'day':
2437 domain_dt_end = groupby_datetime + datetime.timedelta(days=1)
2439 domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(years=1)
2441 # the time boundaries were all computed in the apparent TZ of the user,
2442 # so we need to convert them to UTC to have proper server-side values.
2443 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2444 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2445 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby_type == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2446 d['__domain'] = [(groupby, '>=', domain_dt_begin.strftime(dt_format)),
2447 (groupby, '<', domain_dt_end.strftime(dt_format))] + domain
2448 del alldata[d['id']][groupby]
2449 d.update(alldata[d['id']])
2452 if groupby and groupby in self._group_by_full:
2453 result = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
2454 aggregated_fields, result, read_group_order=order,
2459 def _inherits_join_add(self, current_model, parent_model_name, query):
2461 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2462 :param current_model: current model object
2463 :param parent_model_name: name of the parent model for which the clauses should be added
2464 :param query: query object on which the JOIN should be added
2466 inherits_field = current_model._inherits[parent_model_name]
2467 parent_model = self.pool[parent_model_name]
2468 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2471 def _inherits_join_calc(self, field, query):
2473 Adds missing table select and join clause(s) to ``query`` for reaching
2474 the field coming from an '_inherits' parent table (no duplicates).
2476 :param field: name of inherited field to reach
2477 :param query: query object on which the JOIN should be added
2478 :return: qualified name of field, to be used in SELECT clause
2480 current_table = self
2481 parent_alias = '"%s"' % current_table._table
2482 while field in current_table._inherit_fields and not field in current_table._columns:
2483 parent_model_name = current_table._inherit_fields[field][0]
2484 parent_table = self.pool[parent_model_name]
2485 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2486 current_table = parent_table
2487 return '%s."%s"' % (parent_alias, field)
2489 def _parent_store_compute(self, cr):
2490 if not self._parent_store:
2492 _logger.info('Computing parent left and right for table %s...', self._table)
2493 def browse_rec(root, pos=0):
2495 where = self._parent_name+'='+str(root)
2497 where = self._parent_name+' IS NULL'
2498 if self._parent_order:
2499 where += ' order by '+self._parent_order
2500 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2502 for id in cr.fetchall():
2503 pos2 = browse_rec(id[0], pos2)
2504 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2506 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2507 if self._parent_order:
2508 query += ' order by ' + self._parent_order
2511 for (root,) in cr.fetchall():
2512 pos = browse_rec(root, pos)
2515 def _update_store(self, cr, f, k):
2516 _logger.info("storing computed values of fields.function '%s'", k)
2517 ss = self._columns[k]._symbol_set
2518 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2519 cr.execute('select id from '+self._table)
2520 ids_lst = map(lambda x: x[0], cr.fetchall())
2522 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2523 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2524 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2525 for key, val in res.items():
2528 # if val is a many2one, just write the ID
2529 if type(val) == tuple:
2531 if val is not False:
2532 cr.execute(update_query, (ss[1](val), key))
2534 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2535 """Raise except_orm if value is not among the valid values for the selection field"""
2536 if self._columns[field]._type == 'reference':
2537 val_model, val_id_str = value.split(',', 1)
2540 val_id = long(val_id_str)
2544 raise except_orm(_('ValidateError'),
2545 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2549 if isinstance(self._columns[field].selection, (tuple, list)):
2550 if val in dict(self._columns[field].selection):
2552 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2554 raise except_orm(_('ValidateError'),
2555 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field))
2557 def _check_removed_columns(self, cr, log=False):
2558 # iterate on the database columns to drop the NOT NULL constraints
2559 # of fields which were required but have been removed (or will be added by another module)
2560 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2561 columns += MAGIC_COLUMNS
2562 cr.execute("SELECT a.attname, a.attnotnull"
2563 " FROM pg_class c, pg_attribute a"
2564 " WHERE c.relname=%s"
2565 " AND c.oid=a.attrelid"
2566 " AND a.attisdropped=%s"
2567 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2568 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2570 for column in cr.dictfetchall():
2572 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2573 column['attname'], self._table, self._name)
2574 if column['attnotnull']:
2575 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2576 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2577 self._table, column['attname'])
2579 def _save_constraint(self, cr, constraint_name, type):
2581 Record the creation of a constraint for this model, to make it possible
2582 to delete it later when the module is uninstalled. Type can be either
2583 'f' or 'u' depending on the constraint being a foreign key or not.
2585 if not self._module:
2586 # no need to save constraints for custom models as they're not part
2589 assert type in ('f', 'u')
2591 SELECT 1 FROM ir_model_constraint, ir_module_module
2592 WHERE ir_model_constraint.module=ir_module_module.id
2593 AND ir_model_constraint.name=%s
2594 AND ir_module_module.name=%s
2595 """, (constraint_name, self._module))
2598 INSERT INTO ir_model_constraint
2599 (name, date_init, date_update, module, model, type)
2600 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2601 (SELECT id FROM ir_module_module WHERE name=%s),
2602 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2603 (constraint_name, self._module, self._name, type))
2605 def _save_relation_table(self, cr, relation_table):
2607 Record the creation of a many2many for this model, to make it possible
2608 to delete it later when the module is uninstalled.
2611 SELECT 1 FROM ir_model_relation, ir_module_module
2612 WHERE ir_model_relation.module=ir_module_module.id
2613 AND ir_model_relation.name=%s
2614 AND ir_module_module.name=%s
2615 """, (relation_table, self._module))
2617 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2618 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2619 (SELECT id FROM ir_module_module WHERE name=%s),
2620 (SELECT id FROM ir_model WHERE model=%s))""",
2621 (relation_table, self._module, self._name))
2623 # checked version: for direct m2o starting from `self`
2624 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2625 assert self.is_transient() or not dest_model.is_transient(), \
2626 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2627 if self.is_transient() and not dest_model.is_transient():
2628 # TransientModel relationships to regular Models are annoying
2629 # usually because they could block deletion due to the FKs.
2630 # So unless stated otherwise we default them to ondelete=cascade.
2631 ondelete = ondelete or 'cascade'
2632 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2633 self._foreign_keys.add(fk_def)
2634 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2636 # unchecked version: for custom cases, such as m2m relationships
2637 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2638 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2639 self._foreign_keys.add(fk_def)
2640 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2642 def _drop_constraint(self, cr, source_table, constraint_name):
2643 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2645 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2646 # Find FK constraint(s) currently established for the m2o field,
2647 # and see whether they are stale or not
2648 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2649 cl2.relname as foreign_table
2650 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2651 pg_attribute as att1, pg_attribute as att2
2652 WHERE con.conrelid = cl1.oid
2653 AND cl1.relname = %s
2654 AND con.confrelid = cl2.oid
2655 AND array_lower(con.conkey, 1) = 1
2656 AND con.conkey[1] = att1.attnum
2657 AND att1.attrelid = cl1.oid
2658 AND att1.attname = %s
2659 AND array_lower(con.confkey, 1) = 1
2660 AND con.confkey[1] = att2.attnum
2661 AND att2.attrelid = cl2.oid
2662 AND att2.attname = %s
2663 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2664 constraints = cr.dictfetchall()
2666 if len(constraints) == 1:
2667 # Is it the right constraint?
2669 if self.is_transient() and not dest_model.is_transient():
2670 # transient foreign keys are added as cascade by default
2671 ondelete = ondelete or 'cascade'
2672 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2673 or cons['foreign_table'] != dest_model._table:
2674 # Wrong FK: drop it and recreate
2675 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2676 source_table, cons['constraint_name'])
2677 self._drop_constraint(cr, source_table, cons['constraint_name'])
2679 # it's all good, nothing to do!
2682 # Multiple FKs found for the same field, drop them all, and re-create
2683 for cons in constraints:
2684 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2685 source_table, cons['constraint_name'])
2686 self._drop_constraint(cr, source_table, cons['constraint_name'])
2688 # (re-)create the FK
2689 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2693 def _auto_init(self, cr, context=None):
2696 Call _field_create and, unless _auto is False:
2698 - create the corresponding table in database for the model,
2699 - possibly add the parent columns in database,
2700 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2701 'write_date' in database if _log_access is True (the default),
2702 - report on database columns no more existing in _columns,
2703 - remove no more existing not null constraints,
2704 - alter existing database columns to match _columns,
2705 - create database tables to match _columns,
2706 - add database indices to match _columns,
2707 - save in self._foreign_keys a list a foreign keys to create (see
2711 self._foreign_keys = set()
2712 raise_on_invalid_object_name(self._name)
2715 store_compute = False
2717 update_custom_fields = context.get('update_custom_fields', False)
2718 self._field_create(cr, context=context)
2719 create = not self._table_exist(cr)
2723 self._create_table(cr)
2726 if self._parent_store:
2727 if not self._parent_columns_exist(cr):
2728 self._create_parent_columns(cr)
2729 store_compute = True
2731 # Create the create_uid, create_date, write_uid, write_date, columns if desired.
2732 if self._log_access:
2733 self._add_log_columns(cr)
2735 self._check_removed_columns(cr, log=False)
2737 # iterate on the "object columns"
2738 column_data = self._select_column_data(cr)
2740 for k, f in self._columns.iteritems():
2741 if k in MAGIC_COLUMNS:
2743 # Don't update custom (also called manual) fields
2744 if f.manual and not update_custom_fields:
2747 if isinstance(f, fields.one2many):
2748 self._o2m_raise_on_missing_reference(cr, f)
2750 elif isinstance(f, fields.many2many):
2751 self._m2m_raise_or_create_relation(cr, f)
2754 res = column_data.get(k)
2756 # The field is not found as-is in database, try if it
2757 # exists with an old name.
2758 if not res and hasattr(f, 'oldname'):
2759 res = column_data.get(f.oldname)
2761 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2763 column_data[k] = res
2764 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2765 self._table, f.oldname, k)
2767 # The field already exists in database. Possibly
2768 # change its type, rename it, drop it or change its
2771 f_pg_type = res['typname']
2772 f_pg_size = res['size']
2773 f_pg_notnull = res['attnotnull']
2774 if isinstance(f, fields.function) and not f.store and\
2775 not getattr(f, 'nodrop', False):
2776 _logger.info('column %s (%s) in table %s removed: converted to a function !\n',
2777 k, f.string, self._table)
2778 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2780 _schema.debug("Table '%s': dropped column '%s' with cascade",
2784 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2789 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2790 ('varchar', 'text', 'TEXT', ''),
2791 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2792 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2793 ('timestamp', 'date', 'date', '::date'),
2794 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2795 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2797 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2799 with cr.savepoint():
2800 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2801 except psycopg2.NotSupportedError:
2802 # In place alter table cannot be done because a view is depending of this field.
2803 # Do a manual copy. This will drop the view (that will be recreated later)
2804 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2805 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2806 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2807 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2809 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2810 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2812 if (f_pg_type==c[0]) and (f._type==c[1]):
2813 if f_pg_type != f_obj_type:
2815 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2816 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2817 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2818 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2820 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2821 self._table, k, c[0], c[1])
2824 if f_pg_type != f_obj_type:
2828 newname = k + '_moved' + str(i)
2829 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2830 "WHERE c.relname=%s " \
2831 "AND a.attname=%s " \
2832 "AND c.oid=a.attrelid ", (self._table, newname))
2833 if not cr.fetchone()[0]:
2837 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2838 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2839 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2840 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2841 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2842 self._table, k, f_pg_type, f._type, newname)
2844 # if the field is required and hasn't got a NOT NULL constraint
2845 if f.required and f_pg_notnull == 0:
2846 # set the field to the default value if any
2847 if k in self._defaults:
2848 if callable(self._defaults[k]):
2849 default = self._defaults[k](self, cr, SUPERUSER_ID, context)
2851 default = self._defaults[k]
2853 if default is not None:
2854 ss = self._columns[k]._symbol_set
2855 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k)
2856 cr.execute(query, (ss[1](default),))
2857 # add the NOT NULL constraint
2860 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2862 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2865 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2866 "If you want to have it, you should update the records and execute manually:\n"\
2867 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2868 _schema.warning(msg, self._table, k, self._table, k)
2870 elif not f.required and f_pg_notnull == 1:
2871 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2873 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2876 indexname = '%s_%s_index' % (self._table, k)
2877 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2878 res2 = cr.dictfetchall()
2879 if not res2 and f.select:
2880 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2882 if f._type == 'text':
2883 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2884 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2885 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2886 " because there is a length limit for indexable btree values!\n"\
2887 "Use a search view instead if you simply want to make the field searchable."
2888 _schema.warning(msg, self._table, f._type, k)
2889 if res2 and not f.select:
2890 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2892 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2893 _schema.debug(msg, self._table, k, f._type)
2895 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2896 dest_model = self.pool[f._obj]
2897 if dest_model._table != 'ir_actions':
2898 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2900 # The field doesn't exist in database. Create it if necessary.
2902 if not isinstance(f, fields.function) or f.store:
2903 # add the missing field
2904 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2905 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2906 _schema.debug("Table '%s': added column '%s' with definition=%s",
2907 self._table, k, get_pg_type(f)[1])
2910 if not create and k in self._defaults:
2911 if callable(self._defaults[k]):
2912 default = self._defaults[k](self, cr, SUPERUSER_ID, context)
2914 default = self._defaults[k]
2916 ss = self._columns[k]._symbol_set
2917 query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
2918 cr.execute(query, (ss[1](default),))
2920 _logger.debug("Table '%s': setting default value of new column %s", self._table, k)
2922 # remember the functions to call for the stored fields
2923 if isinstance(f, fields.function):
2925 if f.store is not True: # i.e. if f.store is a dict
2926 order = f.store[f.store.keys()[0]][2]
2927 todo_end.append((order, self._update_store, (f, k)))
2929 # and add constraints if needed
2930 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2931 if f._obj not in self.pool:
2932 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2933 dest_model = self.pool[f._obj]
2934 ref = dest_model._table
2935 # ir_actions is inherited so foreign key doesn't work on it
2936 if ref != 'ir_actions':
2937 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2939 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2943 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2944 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2947 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2948 "Try to re-run: openerp-server --update=module\n"\
2949 "If it doesn't work, update records and execute manually:\n"\
2950 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2951 _logger.warning(msg, k, self._table, self._table, k)
2955 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2956 create = not bool(cr.fetchone())
2958 cr.commit() # start a new transaction
2961 self._add_sql_constraints(cr)
2964 self._execute_sql(cr)
2967 self._parent_store_compute(cr)
2972 def _auto_end(self, cr, context=None):
2973 """ Create the foreign keys recorded by _auto_init. """
2974 for t, k, r, d in self._foreign_keys:
2975 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2976 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2978 del self._foreign_keys
2981 def _table_exist(self, cr):
2982 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2986 def _create_table(self, cr):
2987 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2988 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2989 _schema.debug("Table '%s': created", self._table)
2992 def _parent_columns_exist(self, cr):
2993 cr.execute("""SELECT c.relname
2994 FROM pg_class c, pg_attribute a
2995 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2996 """, (self._table, 'parent_left'))
3000 def _create_parent_columns(self, cr):
3001 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
3002 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
3003 if 'parent_left' not in self._columns:
3004 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
3006 _schema.debug("Table '%s': added column '%s' with definition=%s",
3007 self._table, 'parent_left', 'INTEGER')
3008 elif not self._columns['parent_left'].select:
3009 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
3011 if 'parent_right' not in self._columns:
3012 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
3014 _schema.debug("Table '%s': added column '%s' with definition=%s",
3015 self._table, 'parent_right', 'INTEGER')
3016 elif not self._columns['parent_right'].select:
3017 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
3019 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
3020 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
3021 self._parent_name, self._name)
3026 def _add_log_columns(self, cr):
3027 for field, field_def in LOG_ACCESS_COLUMNS.iteritems():
3030 FROM pg_class c, pg_attribute a
3031 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
3032 """, (self._table, field))
3034 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def))
3036 _schema.debug("Table '%s': added column '%s' with definition=%s",
3037 self._table, field, field_def)
3040 def _select_column_data(self, cr):
3041 # attlen is the number of bytes necessary to represent the type when
3042 # the type has a fixed size. If the type has a varying size attlen is
3043 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
3044 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
3045 "FROM pg_class c,pg_attribute a,pg_type t " \
3046 "WHERE c.relname=%s " \
3047 "AND c.oid=a.attrelid " \
3048 "AND a.atttypid=t.oid", (self._table,))
3049 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
3052 def _o2m_raise_on_missing_reference(self, cr, f):
3053 # TODO this check should be a method on fields.one2many.
3054 if f._obj in self.pool:
3055 other = self.pool[f._obj]
3056 # TODO the condition could use fields_get_keys().
3057 if f._fields_id not in other._columns.keys():
3058 if f._fields_id not in other._inherit_fields.keys():
3059 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
3061 def _m2m_raise_or_create_relation(self, cr, f):
3062 m2m_tbl, col1, col2 = f._sql_names(self)
3063 # do not create relations for custom fields as they do not belong to a module
3064 # they will be automatically removed when dropping the corresponding ir.model.field
3065 # table name for custom relation all starts with x_, see __init__
3066 if not m2m_tbl.startswith('x_'):
3067 self._save_relation_table(cr, m2m_tbl)
3068 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
3069 if not cr.dictfetchall():
3070 if f._obj not in self.pool:
3071 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
3072 dest_model = self.pool[f._obj]
3073 ref = dest_model._table
3074 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
3075 # create foreign key references with ondelete=cascade, unless the targets are SQL views
3076 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
3077 if not cr.fetchall():
3078 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
3079 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
3080 if not cr.fetchall():
3081 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
3083 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
3084 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
3085 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
3087 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
3090 def _add_sql_constraints(self, cr):
3093 Modify this model's database table constraints so they match the one in
3097 def unify_cons_text(txt):
3098 return txt.lower().replace(', ',',').replace(' (','(')
3100 for (key, con, _) in self._sql_constraints:
3101 conname = '%s_%s' % (self._table, key)
3103 self._save_constraint(cr, conname, 'u')
3104 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
3105 existing_constraints = cr.dictfetchall()
3109 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
3110 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
3111 self._table, conname, con),
3112 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
3117 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
3118 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
3119 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
3125 if not existing_constraints:
3126 # constraint does not exists:
3127 sql_actions['add']['execute'] = True
3128 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
3129 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
3130 # constraint exists but its definition has changed:
3131 sql_actions['drop']['execute'] = True
3132 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
3133 sql_actions['add']['execute'] = True
3134 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
3136 # we need to add the constraint:
3137 sql_actions = [item for item in sql_actions.values()]
3138 sql_actions.sort(key=lambda x: x['order'])
3139 for sql_action in [action for action in sql_actions if action['execute']]:
3141 cr.execute(sql_action['query'])
3143 _schema.debug(sql_action['msg_ok'])
3145 _schema.warning(sql_action['msg_err'])
3149 def _execute_sql(self, cr):
3150 """ Execute the SQL code from the _sql attribute (if any)."""
3151 if hasattr(self, "_sql"):
3152 for line in self._sql.split(';'):
3153 line2 = line.replace('\n', '').strip()
3159 # Update objects that uses this one to update their _inherits fields
3162 def _inherits_reload_src(self):
3163 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
3164 for obj in self.pool.models.values():
3165 if self._name in obj._inherits:
3166 obj._inherits_reload()
3169 def _inherits_reload(self):
3170 """ Recompute the _inherit_fields mapping.
3172 This will also call itself on each inherits'd child model.
3176 for table in self._inherits:
3177 other = self.pool[table]
3178 for col in other._columns.keys():
3179 res[col] = (table, self._inherits[table], other._columns[col], table)
3180 for col in other._inherit_fields.keys():
3181 res[col] = (table, self._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
3182 self._inherit_fields = res
3183 self._all_columns = self._get_column_infos()
3184 self._inherits_reload_src()
3187 def _get_column_infos(self):
3188 """Returns a dict mapping all fields names (direct fields and
3189 inherited field via _inherits) to a ``column_info`` struct
3190 giving detailed columns """
3192 for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems():
3193 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
3194 for k, col in self._columns.iteritems():
3195 result[k] = fields.column_info(k, col)
3199 def _inherits_check(self):
3200 for table, field_name in self._inherits.items():
3201 if field_name not in self._columns:
3202 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
3203 self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
3204 required=True, ondelete="cascade")
3205 elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
3206 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
3207 self._columns[field_name].required = True
3208 self._columns[field_name].ondelete = "cascade"
3211 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3212 """ Return the definition of each field.
3214 The returned value is a dictionary (indiced by field name) of
3215 dictionaries. The _inherits'd fields are included. The string, help,
3216 and selection (if present) attributes are translated.
3218 :param cr: database cursor
3219 :param user: current user id
3220 :param allfields: list of fields
3221 :param context: context arguments, like lang, time zone
3222 :return: dictionary of field dictionaries, each one describing a field of the business object
3223 :raise AccessError: * if user has no create/write rights on the requested object
3229 write_access = self.check_access_rights(cr, user, 'write', raise_exception=False) \
3230 or self.check_access_rights(cr, user, 'create', raise_exception=False)
3234 translation_obj = self.pool.get('ir.translation')
3235 for parent in self._inherits:
3236 res.update(self.pool[parent].fields_get(cr, user, allfields, context))
3238 for f, field in self._columns.iteritems():
3239 if (allfields and f not in allfields) or \
3240 (field.groups and not self.user_has_groups(cr, user, groups=field.groups, context=context)):
3243 res[f] = fields.field_to_dict(self, cr, user, field, context=context)
3245 if not write_access:
3246 res[f]['readonly'] = True
3247 res[f]['states'] = {}
3249 if 'lang' in context:
3250 if 'string' in res[f]:
3251 res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context['lang'])
3253 res[f]['string'] = res_trans
3254 if 'help' in res[f]:
3255 help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context['lang'])
3257 res[f]['help'] = help_trans
3261 def get_empty_list_help(self, cr, user, help, context=None):
3262 """ Generic method giving the help message displayed when having
3263 no result to display in a list or kanban view. By default it returns
3264 the help given in parameter that is generally the help message
3265 defined in the action.
3269 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3271 Check the user access rights on the given fields. This raises Access
3272 Denied if the user does not have the rights. Otherwise it returns the
3273 fields (as is if the fields is not falsy, or the readable/writable
3274 fields if fields is falsy).
3277 """Predicate to test if the user has access to the given field name."""
3278 # Ignore requested field if it doesn't exist. This is ugly but
3279 # it seems to happen at least with 'name_alias' on res.partner.
3280 if field_name not in self._all_columns:
3282 field = self._all_columns[field_name].column
3283 if user != SUPERUSER_ID and field.groups:
3284 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3288 fields = filter(p, self._all_columns.keys())
3290 filtered_fields = filter(lambda a: not p(a), fields)
3292 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s', operation, user, self._name, ', '.join(filtered_fields))
3295 _('The requested operation cannot be completed due to security restrictions. '
3296 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3297 (self._description, operation))
3300 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3301 """ Read records with given ids with the given fields
3303 :param cr: database cursor
3304 :param user: current user id
3305 :param ids: id or list of the ids of the records to read
3306 :param fields: optional list of field names to return (default: all fields would be returned)
3307 :type fields: list (example ['field_name_1', ...])
3308 :param context: optional context dictionary - it may contains keys for specifying certain options
3309 like ``context_lang``, ``context_tz`` to alter the results of the call.
3310 A special ``bin_size`` boolean flag may also be passed in the context to request the
3311 value of all fields.binary columns to be returned as the size of the binary instead of its
3312 contents. This can also be selectively overriden by passing a field-specific flag
3313 in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
3314 Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
3315 :return: list of dictionaries((dictionary per record asked)) with requested field values
3316 :rtype: [{‘name_of_the_field’: value, ...}, ...]
3317 :raise AccessError: * if user has no read rights on the requested object
3318 * if user tries to bypass access rules for read on the requested object
3322 self.check_access_rights(cr, user, 'read')
3323 fields = self.check_field_access_rights(cr, user, 'read', fields)
3324 if isinstance(ids, (int, long)):
3328 select = map(lambda x: isinstance(x, dict) and x['id'] or x, select)
3329 result = self._read_flat(cr, user, select, fields, context, load)
3331 if isinstance(ids, (int, long)):
3332 return result and result[0] or False
3335 def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
3340 if fields_to_read is None:
3341 fields_to_read = self._columns.keys()
3343 fields_to_read = list(set(fields_to_read))
3345 # all inherited fields + all non inherited fields for which the attribute whose name is in load is True
3346 fields_pre = [f for f in fields_to_read if
3347 f == self.CONCURRENCY_CHECK_FIELD
3348 or (f in self._columns and getattr(self._columns[f], '_classic_write'))
3349 ] + self._inherits.values()
3353 def convert_field(f):
3354 f_qual = '%s."%s"' % (self._table, f) # need fully-qualified references in case len(tables) > 1
3355 if f in ('create_date', 'write_date'):
3356 return "date_trunc('second', %s) as %s" % (f_qual, f)
3357 if f == self.CONCURRENCY_CHECK_FIELD:
3358 if self._log_access:
3359 return "COALESCE(%s.write_date, %s.create_date, (now() at time zone 'UTC'))::timestamp AS %s" % (self._table, self._table, f,)
3360 return "(now() at time zone 'UTC')::timestamp AS %s" % (f,)
3361 if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
3362 return 'length(%s) as "%s"' % (f_qual, f)
3365 # FIXME: The query construction needs to be rewritten using the internal Query
3366 # object, as in search(), to avoid ambiguous column references when
3367 # reading/sorting on a table that is auto_joined to another table with
3368 # common columns (e.g. the magical columns)
3370 # Construct a clause for the security rules.
3371 # 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
3372 # or will at least contain self._table.
3373 rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
3375 fields_pre2 = map(convert_field, fields_pre)
3376 order_by = self._parent_order or self._order
3377 select_fields = ','.join(fields_pre2 + ['%s.id' % self._table])
3378 query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table)
3380 query += " AND " + (' OR '.join(rule_clause))
3381 query += " ORDER BY " + order_by
3382 for sub_ids in cr.split_for_in_conditions(ids):
3383 cr.execute(query, [tuple(sub_ids)] + rule_params)
3384 results = cr.dictfetchall()
3385 result_ids = [x['id'] for x in results]
3386 self._check_record_rules_result_count(cr, user, sub_ids, result_ids, 'read', context=context)
3389 self.check_access_rule(cr, user, ids, 'read', context=context)
3390 res = map(lambda x: {'id': x}, ids)
3392 if context.get('lang'):
3393 for f in fields_pre:
3394 if f == self.CONCURRENCY_CHECK_FIELD:
3396 if self._columns[f].translate:
3397 ids = [x['id'] for x in res]
3398 #TODO: optimize out of this loop
3399 res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context['lang'], ids)
3401 r[f] = res_trans.get(r['id'], False) or r[f]
3403 for table in self._inherits:
3404 col = self._inherits[table]
3405 cols = [x for x in intersect(self._inherit_fields.keys(), fields_to_read) if x not in self._columns.keys()]
3408 res2 = self.pool[table].read(cr, user, [x[col] for x in res], cols, context, load)
3416 if not record[col]: # if the record is deleted from _inherits table?
3418 record.update(res3[record[col]])
3419 if col not in fields_to_read:
3422 # all fields which need to be post-processed by a simple function (symbol_get)
3423 fields_post = filter(lambda x: x in self._columns and self._columns[x]._symbol_get, fields_to_read)
3426 for f in fields_post:
3427 r[f] = self._columns[f]._symbol_get(r[f])
3428 ids = [x['id'] for x in res]
3430 # all non inherited fields for which the attribute whose name is in load is False
3431 fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read)
3433 # Compute POST fields
3435 for f in fields_post:
3436 todo.setdefault(self._columns[f]._multi, [])
3437 todo[self._columns[f]._multi].append(f)
3438 for key, val in todo.items():
3440 res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res)
3441 assert res2 is not None, \
3442 'The function field "%s" on the "%s" model returned None\n' \
3443 '(a dictionary was expected).' % (val[0], self._name)
3446 if isinstance(res2[record['id']], str): res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6
3447 multi_fields = res2.get(record['id'],{})
3449 record[pos] = multi_fields.get(pos,[])
3452 res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
3455 record[f] = res2[record['id']]
3459 # Warn about deprecated fields now that fields_pre and fields_post are computed
3460 # Explicitly use list() because we may receive tuples
3461 for f in list(fields_pre) + list(fields_post):
3462 field_column = self._all_columns.get(f) and self._all_columns.get(f).column
3463 if field_column and field_column.deprecated:
3464 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, field_column.deprecated)
3468 for field in vals.copy():
3470 if field in self._columns:
3471 fobj = self._columns[field]
3477 for group in groups:
3478 module = group.split(".")[0]
3479 grp = group.split(".")[1]
3480 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3481 (grp, module, 'res.groups', user))
3482 readonly = cr.fetchall()
3483 if readonly[0][0] >= 1:
3486 elif readonly[0][0] == 0:
3492 if type(vals[field]) == type([]):
3494 elif type(vals[field]) == type(0.0):
3496 elif type(vals[field]) == type(''):
3497 vals[field] = '=No Permission='
3501 if vals[field] is None:
3506 # TODO check READ access
3507 def perm_read(self, cr, user, ids, context=None, details=True):
3509 Returns some metadata about the given records.
3511 :param details: if True, \*_uid fields are replaced with the name of the user
3512 :return: list of ownership dictionaries for each requested record
3513 :rtype: list of dictionaries with the following keys:
3516 * create_uid: user who created the record
3517 * create_date: date when the record was created
3518 * write_uid: last user who changed the record
3519 * write_date: date of the last change to the record
3520 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3527 uniq = isinstance(ids, (int, long))
3531 if self._log_access:
3532 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3533 quoted_table = '"%s"' % self._table
3534 fields_str = ",".join('%s.%s'%(quoted_table, field) for field in fields)
3535 query = '''SELECT %s, __imd.module, __imd.name
3536 FROM %s LEFT JOIN ir_model_data __imd
3537 ON (__imd.model = %%s and __imd.res_id = %s.id)
3538 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3539 cr.execute(query, (self._name, tuple(ids)))
3540 res = cr.dictfetchall()
3543 r[key] = r[key] or False
3544 if details and key in ('write_uid', 'create_uid') and r[key]:
3546 r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0]
3548 pass # Leave the numeric uid there
3549 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3550 del r['name'], r['module']
3555 def _check_concurrency(self, cr, ids, context):
3558 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3560 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3561 for sub_ids in cr.split_for_in_conditions(ids):
3564 id_ref = "%s,%s" % (self._name, id)
3565 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3567 ids_to_check.extend([id, update_date])
3568 if not ids_to_check:
3570 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3573 # mention the first one only to keep the error message readable
3574 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3576 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3577 """Verify the returned rows after applying record rules matches
3578 the length of `ids`, and raise an appropriate exception if it does not.
3580 ids, result_ids = set(ids), set(result_ids)
3581 missing_ids = ids - result_ids
3583 # Attempt to distinguish record rule restriction vs deleted records,
3584 # to provide a more specific error message - check if the missinf
3585 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3586 forbidden_ids = [x[0] for x in cr.fetchall()]
3588 # the missing ids are (at least partially) hidden by access rules
3589 if uid == SUPERUSER_ID:
3591 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3592 raise except_orm(_('Access Denied'),
3593 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3594 (self._description, operation))
3596 # If we get here, the missing_ids are not in the database
3597 if operation in ('read','unlink'):
3598 # No need to warn about deleting an already deleted record.
3599 # And no error when reading a record that was deleted, to prevent spurious
3600 # errors for non-transactional search/read sequences coming from clients
3602 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3603 raise except_orm(_('Missing document(s)'),
3604 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3607 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3608 """Verifies that the operation given by ``operation`` is allowed for the user
3609 according to the access rights."""
3610 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3612 def check_access_rule(self, cr, uid, ids, operation, context=None):
3613 """Verifies that the operation given by ``operation`` is allowed for the user
3614 according to ir.rules.
3616 :param operation: one of ``write``, ``unlink``
3617 :raise except_orm: * if current ir.rules do not permit this operation.
3618 :return: None if the operation is allowed
3620 if uid == SUPERUSER_ID:
3623 if self.is_transient():
3624 # Only one single implicit access rule for transient models: owner only!
3625 # This is ok to hardcode because we assert that TransientModels always
3626 # have log_access enabled so that the create_uid column is always there.
3627 # And even with _inherits, these fields are always present in the local
3628 # table too, so no need for JOINs.
3629 cr.execute("""SELECT distinct create_uid
3631 WHERE id IN %%s""" % self._table, (tuple(ids),))
3632 uids = [x[0] for x in cr.fetchall()]
3633 if len(uids) != 1 or uids[0] != uid:
3634 raise except_orm(_('Access Denied'),
3635 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3637 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3639 where_clause = ' and ' + ' and '.join(where_clause)
3640 for sub_ids in cr.split_for_in_conditions(ids):
3641 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3642 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3643 [sub_ids] + where_params)
3644 returned_ids = [x['id'] for x in cr.dictfetchall()]
3645 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3647 def create_workflow(self, cr, uid, ids, context=None):
3648 """Create a workflow instance for each given record IDs."""
3649 from openerp import workflow
3651 workflow.trg_create(uid, self._name, res_id, cr)
3654 def delete_workflow(self, cr, uid, ids, context=None):
3655 """Delete the workflow instances bound to the given record IDs."""
3656 from openerp import workflow
3658 workflow.trg_delete(uid, self._name, res_id, cr)
3661 def step_workflow(self, cr, uid, ids, context=None):
3662 """Reevaluate the workflow instances of the given record IDs."""
3663 from openerp import workflow
3665 workflow.trg_write(uid, self._name, res_id, cr)
3668 def signal_workflow(self, cr, uid, ids, signal, context=None):
3669 """Send given workflow signal and return a dict mapping ids to workflow results"""
3670 from openerp import workflow
3673 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3676 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3677 """ Rebind the workflow instance bound to the given 'old' record IDs to
3678 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3680 from openerp import workflow
3681 for old_id, new_id in old_new_ids:
3682 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3685 def unlink(self, cr, uid, ids, context=None):
3687 Delete records with given ids
3689 :param cr: database cursor
3690 :param uid: current user id
3691 :param ids: id or list of ids
3692 :param context: (optional) context arguments, like lang, time zone
3694 :raise AccessError: * if user has no unlink rights on the requested object
3695 * if user tries to bypass access rules for unlink on the requested object
3696 :raise UserError: if the record is default property for other records
3701 if isinstance(ids, (int, long)):
3704 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3706 self._check_concurrency(cr, ids, context)
3708 self.check_access_rights(cr, uid, 'unlink')
3710 ir_property = self.pool.get('ir.property')
3711 ir_attachment_obj = self.pool.get('ir.attachment')
3713 # Check if the records are used as default properties.
3714 domain = [('res_id', '=', False),
3715 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3717 if ir_property.search(cr, uid, domain, context=context):
3718 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3720 # Delete the records' properties.
3721 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3722 ir_property.unlink(cr, uid, property_ids, context=context)
3724 self.delete_workflow(cr, uid, ids, context=context)
3726 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3727 pool_model_data = self.pool.get('ir.model.data')
3728 ir_values_obj = self.pool.get('ir.values')
3729 for sub_ids in cr.split_for_in_conditions(ids):
3730 cr.execute('delete from ' + self._table + ' ' \
3731 'where id IN %s', (sub_ids,))
3733 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3734 # as these are not connected with real database foreign keys, and would be dangling references.
3735 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3736 # to avoid possible side-effects during admin calls.
3737 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3738 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3739 # Step 2. Marching towards the real deletion of referenced records
3741 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3743 # For the same reason, removing the record relevant to ir_values
3744 ir_value_ids = ir_values_obj.search(cr, uid,
3745 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3748 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3750 # For the same reason, removing the record relevant to ir_attachment
3751 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3752 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3753 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3754 if ir_attachment_ids:
3755 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3757 for order, obj_name, store_ids, fields in result_store:
3758 if obj_name == self._name:
3759 effective_store_ids = list(set(store_ids) - set(ids))
3761 effective_store_ids = store_ids
3762 if effective_store_ids:
3763 obj = self.pool[obj_name]
3764 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3765 rids = map(lambda x: x[0], cr.fetchall())
3767 obj._store_set_values(cr, uid, rids, fields, context)
3774 def write(self, cr, user, ids, vals, context=None):
3776 Update records with given ids with the given field values
3778 :param cr: database cursor
3779 :param user: current user id
3781 :param ids: object id or list of object ids to update according to **vals**
3782 :param vals: field values to update, e.g {'field_name': new_field_value, ...}
3783 :type vals: dictionary
3784 :param context: (optional) context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
3785 :type context: dictionary
3787 :raise AccessError: * if user has no write rights on the requested object
3788 * if user tries to bypass access rules for write on the requested object
3789 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3790 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3792 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
3794 + For a many2many field, a list of tuples is expected.
3795 Here is the list of tuple that are accepted, with the corresponding semantics ::
3797 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3798 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3799 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3800 (3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
3801 (4, ID) link to existing record with id = ID (adds a relationship)
3802 (5) unlink all (like using (3,ID) for all linked records)
3803 (6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
3806 [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
3808 + For a one2many field, a lits of tuples is expected.
3809 Here is the list of tuple that are accepted, with the corresponding semantics ::
3811 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3812 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3813 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3816 [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
3818 + For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
3819 + For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
3823 self.check_field_access_rights(cr, user, 'write', vals.keys())
3824 deleted_related = defaultdict(list)
3825 for field in vals.copy():
3827 if field in self._columns:
3828 fobj = self._columns[field]
3829 elif field in self._inherit_fields:
3830 fobj = self._inherit_fields[field][2]
3833 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3834 for wtuple in vals[field]:
3835 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3836 deleted_related[fobj._obj].append(wtuple[1])
3841 for group in groups:
3842 module = group.split(".")[0]
3843 grp = group.split(".")[1]
3844 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3845 (grp, module, 'res.groups', user))
3846 readonly = cr.fetchall()
3847 if readonly[0][0] >= 1:
3858 if isinstance(ids, (int, long)):
3861 self._check_concurrency(cr, ids, context)
3862 self.check_access_rights(cr, user, 'write')
3864 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3866 # No direct update of parent_left/right
3867 vals.pop('parent_left', None)
3868 vals.pop('parent_right', None)
3870 parents_changed = []
3871 parent_order = self._parent_order or self._order
3872 if self._parent_store and (self._parent_name in vals):
3873 # The parent_left/right computation may take up to
3874 # 5 seconds. No need to recompute the values if the
3875 # parent is the same.
3876 # Note: to respect parent_order, nodes must be processed in
3877 # order, so ``parents_changed`` must be ordered properly.
3878 parent_val = vals[self._parent_name]
3880 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3881 (self._table, self._parent_name, self._parent_name, parent_order)
3882 cr.execute(query, (tuple(ids), parent_val))
3884 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3885 (self._table, self._parent_name, parent_order)
3886 cr.execute(query, (tuple(ids),))
3887 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3894 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3896 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3897 if field_column and field_column.deprecated:
3898 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3899 if field in self._columns:
3900 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3901 if (not totranslate) or not self._columns[field].translate:
3902 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3903 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3904 direct.append(field)
3906 upd_todo.append(field)
3908 updend.append(field)
3909 if field in self._columns \
3910 and hasattr(self._columns[field], 'selection') \
3912 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3914 if self._log_access:
3915 upd0.append('write_uid=%s')
3916 upd0.append("write_date=(now() at time zone 'UTC')")
3920 self.check_access_rule(cr, user, ids, 'write', context=context)
3921 for sub_ids in cr.split_for_in_conditions(ids):
3922 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3923 'where id IN %s', upd1 + [sub_ids])
3924 if cr.rowcount != len(sub_ids):
3925 raise except_orm(_('AccessError'),
3926 _('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3931 if self._columns[f].translate:
3932 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3935 # Inserting value to DB
3936 context_wo_lang = dict(context, lang=None)
3937 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3938 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3941 # call the 'set' method of fields which are not classic_write
3942 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3944 # default element in context must be removed when call a one2many or many2many
3945 rel_context = context.copy()
3946 for c in context.items():
3947 if c[0].startswith('default_'):
3948 del rel_context[c[0]]
3950 for field in upd_todo:
3952 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3954 unknown_fields = updend[:]
3955 for table in self._inherits:
3956 col = self._inherits[table]
3958 for sub_ids in cr.split_for_in_conditions(ids):
3959 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3960 'where id IN %s', (sub_ids,))
3961 nids.extend([x[0] for x in cr.fetchall()])
3965 if self._inherit_fields[val][0] == table:
3967 unknown_fields.remove(val)
3969 self.pool[table].write(cr, user, nids, v, context)
3973 'No such field(s) in model %s: %s.',
3974 self._name, ', '.join(unknown_fields))
3975 self._validate(cr, user, ids, context)
3977 # TODO: use _order to set dest at the right position and not first node of parent
3978 # We can't defer parent_store computation because the stored function
3979 # fields that are computer may refer (directly or indirectly) to
3980 # parent_left/right (via a child_of domain)
3983 self.pool._init_parent[self._name] = True
3985 order = self._parent_order or self._order
3986 parent_val = vals[self._parent_name]
3988 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3990 clause, params = '%s IS NULL' % (self._parent_name,), ()
3992 for id in parents_changed:
3993 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3994 pleft, pright = cr.fetchone()
3995 distance = pright - pleft + 1
3997 # Positions of current siblings, to locate proper insertion point;
3998 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3999 # after each update, in case several nodes are sequentially inserted one
4000 # next to the other (i.e computed incrementally)
4001 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
4002 parents = cr.fetchall()
4004 # Find Position of the element
4006 for (parent_pright, parent_id) in parents:
4009 position = parent_pright and parent_pright + 1 or 1
4011 # It's the first node of the parent
4016 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
4017 position = cr.fetchone()[0] + 1
4019 if pleft < position <= pright:
4020 raise except_orm(_('UserError'), _('Recursivity Detected.'))
4022 if pleft < position:
4023 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
4024 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
4025 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
4027 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
4028 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
4029 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
4031 result += self._store_get_values(cr, user, ids, vals.keys(), context)
4035 for order, model_name, ids_to_update, fields_to_recompute in result:
4036 key = (model_name, tuple(fields_to_recompute))
4037 done.setdefault(key, {})
4038 # avoid to do several times the same computation
4040 for id in ids_to_update:
4041 if id not in done[key]:
4042 done[key][id] = True
4043 if id not in deleted_related[model_name]:
4045 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
4047 self.step_workflow(cr, user, ids, context=context)
4051 # TODO: Should set perm to user.xxx
4053 def create(self, cr, user, vals, context=None):
4055 Create a new record for the model.
4057 The values for the new record are initialized using the ``vals``
4058 argument, and if necessary the result of ``default_get()``.
4060 :param cr: database cursor
4061 :param user: current user id
4063 :param vals: field values for new record, e.g {'field_name': field_value, ...}
4064 :type vals: dictionary
4065 :param context: optional context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
4066 :type context: dictionary
4067 :return: id of new record created
4068 :raise AccessError: * if user has no create rights on the requested object
4069 * if user tries to bypass access rules for create on the requested object
4070 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
4071 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
4073 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific.
4074 Please see the description of the :py:meth:`~osv.osv.osv.write` method for details about the possible values and how
4081 if self.is_transient():
4082 self._transient_vacuum(cr, user)
4084 self.check_access_rights(cr, user, 'create')
4086 vals = self._add_missing_default_values(cr, user, vals, context)
4088 if self._log_access:
4089 for f in LOG_ACCESS_COLUMNS:
4090 if vals.pop(f, None) is not None:
4092 'Field `%s` is not allowed when creating the model `%s`.',
4096 for v in self._inherits:
4097 if self._inherits[v] not in vals:
4100 tocreate[v] = {'id': vals[self._inherits[v]]}
4103 # columns will contain a list of field defined as a tuple
4104 # tuple(field_name, format_string, field_value)
4105 # the tuple will be used by the string formatting for the INSERT
4107 ('id', "nextval('%s')" % self._sequence),
4112 for v in vals.keys():
4113 if v in self._inherit_fields and v not in self._columns:
4114 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4115 tocreate[table][v] = vals[v]
4118 if (v not in self._inherit_fields) and (v not in self._columns):
4120 unknown_fields.append(v)
4123 'No such field(s) in model %s: %s.',
4124 self._name, ', '.join(unknown_fields))
4126 if not self._sequence:
4129 _('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.')
4132 for table in tocreate:
4133 if self._inherits[table] in vals:
4134 del vals[self._inherits[table]]
4136 record_id = tocreate[table].pop('id', None)
4138 # When linking/creating parent records, force context without 'no_store_function' key that
4139 # defers stored functions computing, as these won't be computed in batch at the end of create().
4140 parent_context = dict(context)
4141 parent_context.pop('no_store_function', None)
4143 if record_id is None or not record_id:
4144 record_id = self.pool[table].create(cr, user, tocreate[table], context=parent_context)
4146 self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
4148 columns.append((self._inherits[table], '%s', record_id))
4150 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4151 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4153 for bool_field in bool_fields:
4154 if bool_field not in vals:
4155 vals[bool_field] = False
4157 for field in vals.copy():
4159 if field in self._columns:
4160 fobj = self._columns[field]
4162 fobj = self._inherit_fields[field][2]
4168 for group in groups:
4169 module = group.split(".")[0]
4170 grp = group.split(".")[1]
4171 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4172 (grp, module, 'res.groups', user))
4173 readonly = cr.fetchall()
4174 if readonly[0][0] >= 1:
4177 elif readonly[0][0] == 0:
4185 current_field = self._columns[field]
4186 if current_field._classic_write:
4187 columns.append((field, '%s', current_field._symbol_set[1](vals[field])))
4189 #for the function fields that receive a value, we set them directly in the database
4190 #(they may be required), but we also need to trigger the _fct_inv()
4191 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4192 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4193 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4194 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4195 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4196 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4197 #after the release but, definitively, the behavior shouldn't be different for related and function
4199 upd_todo.append(field)
4201 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4202 #related. See the above TODO comment for further explanations.
4203 if not isinstance(current_field, fields.related):
4204 upd_todo.append(field)
4205 if field in self._columns \
4206 and hasattr(current_field, 'selection') \
4208 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4209 if self._log_access:
4210 columns.append(('create_uid', '%s', user))
4211 columns.append(('write_uid', '%s', user))
4212 columns.append(('create_date', "(now() at time zone 'UTC')"))
4213 columns.append(('write_date', "(now() at time zone 'UTC')"))
4215 # the list of tuples used in this formatting corresponds to
4216 # tuple(field_name, format, value)
4217 # In some case, for example (id, create_date, write_date) we does not
4218 # need to read the third value of the tuple, because the real value is
4219 # encoded in the second value (the format).
4221 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4223 ', '.join('"%s"' % f[0] for f in columns),
4224 ', '.join(f[1] for f in columns)
4226 tuple([f[2] for f in columns if len(f) > 2])
4229 id_new, = cr.fetchone()
4230 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4232 if self._parent_store and not context.get('defer_parent_store_computation'):
4234 self.pool._init_parent[self._name] = True
4236 parent = vals.get(self._parent_name, False)
4238 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4240 result_p = cr.fetchall()
4241 for (pleft,) in result_p:
4246 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4247 pleft_old = cr.fetchone()[0]
4250 cr.execute('select max(parent_right) from '+self._table)
4251 pleft = cr.fetchone()[0] or 0
4252 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4253 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4254 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4256 # default element in context must be remove when call a one2many or many2many
4257 rel_context = context.copy()
4258 for c in context.items():
4259 if c[0].startswith('default_'):
4260 del rel_context[c[0]]
4263 for field in upd_todo:
4264 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4265 self._validate(cr, user, [id_new], context)
4267 if not context.get('no_store_function', False):
4268 result += self._store_get_values(cr, user, [id_new],
4269 list(set(vals.keys() + self._inherits.values())),
4273 for order, model_name, ids, fields2 in result:
4274 if not (model_name, ids, fields2) in done:
4275 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4276 done.append((model_name, ids, fields2))
4278 if self._log_create and not (context and context.get('no_store_function', False)):
4279 message = self._description + \
4281 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4282 "' " + _("created.")
4283 self.log(cr, user, id_new, message, True, context=context)
4284 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4285 self.create_workflow(cr, user, [id_new], context=context)
4288 def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None):
4289 """Fetch records as objects allowing to use dot notation to browse fields and relations
4291 :param cr: database cursor
4292 :param uid: current user id
4293 :param select: id or list of ids.
4294 :param context: context arguments, like lang, time zone
4295 :rtype: object or list of objects requested
4298 self._list_class = list_class or browse_record_list
4300 # need to accepts ints and longs because ids coming from a method
4301 # launched by button in the interface have a type long...
4302 if isinstance(select, (int, long)):
4303 return browse_record(cr, uid, select, self, cache, context=context, list_class=self._list_class, fields_process=fields_process)
4304 elif isinstance(select, list):
4305 return self._list_class((browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select), context=context)
4307 return browse_null()
4309 def _store_get_values(self, cr, uid, ids, fields, context):
4310 """Returns an ordered list of fields.function to call due to
4311 an update operation on ``fields`` of records with ``ids``,
4312 obtained by calling the 'store' triggers of these fields,
4313 as setup by their 'store' attribute.
4315 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4317 if fields is None: fields = []
4318 stored_functions = self.pool._store_function.get(self._name, [])
4320 # use indexed names for the details of the stored_functions:
4321 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4323 # only keep store triggers that should be triggered for the ``fields``
4325 triggers_to_compute = [f for f in stored_functions \
4326 if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
4329 target_id_results = {}
4330 for store_trigger in triggers_to_compute:
4331 target_func_id_ = id(store_trigger[target_ids_func_])
4332 if not target_func_id_ in target_id_results:
4333 # use admin user for accessing objects having rules defined on store fields
4334 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4335 target_ids = target_id_results[target_func_id_]
4337 # the compound key must consider the priority and model name
4338 key = (store_trigger[priority_], store_trigger[model_name_])
4339 for target_id in target_ids:
4340 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4342 # Here to_compute_map looks like:
4343 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4344 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4345 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4348 # Now we need to generate the batch function calls list
4350 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4352 for ((priority,model), id_map) in to_compute_map.iteritems():
4353 trigger_ids_maps = {}
4354 # function_ids_maps =
4355 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4356 for target_id, triggers in id_map.iteritems():
4357 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4358 for triggers, target_ids in trigger_ids_maps.iteritems():
4359 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4360 [t[func_field_to_compute_] for t in triggers]))
4361 ordered_keys = call_map.keys()
4365 result = reduce(operator.add, (call_map[k] for k in ordered_keys))
4368 def _store_set_values(self, cr, uid, ids, fields, context):
4369 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4370 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4375 if self._log_access:
4376 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4380 field_dict.setdefault(r[0], [])
4381 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4382 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4383 for i in self.pool._store_function.get(self._name, []):
4385 up_write_date = write_date + datetime.timedelta(hours=i[5])
4386 if datetime.datetime.now() < up_write_date:
4388 field_dict[r[0]].append(i[1])
4394 if self._columns[f]._multi not in keys:
4395 keys.append(self._columns[f]._multi)
4396 todo.setdefault(self._columns[f]._multi, [])
4397 todo[self._columns[f]._multi].append(f)
4401 # use admin user for accessing objects having rules defined on store fields
4402 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4403 for id, value in result.items():
4405 for f in value.keys():
4406 if f in field_dict[id]:
4413 if self._columns[v]._type == 'many2one':
4415 value[v] = value[v][0]
4418 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4419 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4422 cr.execute('update "' + self._table + '" set ' + \
4423 ','.join(upd0) + ' where id = %s', upd1)
4427 # use admin user for accessing objects having rules defined on store fields
4428 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4429 for r in result.keys():
4431 if r in field_dict.keys():
4432 if f in field_dict[r]:
4434 for id, value in result.items():
4435 if self._columns[f]._type == 'many2one':
4440 cr.execute('update "' + self._table + '" set ' + \
4441 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4447 def perm_write(self, cr, user, ids, fields, context=None):
4448 raise NotImplementedError(_('This method does not exist anymore'))
4450 # TODO: ameliorer avec NULL
4451 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4452 """Computes the WHERE clause needed to implement an OpenERP domain.
4453 :param domain: the domain to compute
4455 :param active_test: whether the default filtering of records with ``active``
4456 field set to ``False`` should be applied.
4457 :return: the query expressing the given domain as provided in domain
4458 :rtype: osv.query.Query
4463 # if the object has a field named 'active', filter out all inactive
4464 # records unless they were explicitely asked for
4465 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4467 # the item[0] trick below works for domain items and '&'/'|'/'!'
4469 if not any(item[0] == 'active' for item in domain):
4470 domain.insert(0, ('active', '=', 1))
4472 domain = [('active', '=', 1)]
4475 e = expression.expression(cr, user, domain, self, context)
4476 tables = e.get_tables()
4477 where_clause, where_params = e.to_sql()
4478 where_clause = where_clause and [where_clause] or []
4480 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4482 return Query(tables, where_clause, where_params)
4484 def _check_qorder(self, word):
4485 if not regex_order.match(word):
4486 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4489 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4490 """Add what's missing in ``query`` to implement all appropriate ir.rules
4491 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4493 :param query: the current query object
4495 if uid == SUPERUSER_ID:
4498 def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None):
4499 """ :param string parent_model: string of the parent model
4500 :param model child_object: model object, base of the rule application
4503 if parent_model and child_object:
4504 # as inherited rules are being applied, we need to add the missing JOIN
4505 # to reach the parent table (if it was not JOINed yet in the query)
4506 parent_alias = child_object._inherits_join_add(child_object, parent_model, query)
4507 # inherited rules are applied on the external table -> need to get the alias and replace
4508 parent_table = self.pool[parent_model]._table
4509 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4510 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4512 for table in added_tables:
4513 # table is just a table name -> switch to the full alias
4514 if table == '"%s"' % parent_table:
4515 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4516 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4518 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4519 added_tables = new_tables
4520 query.where_clause += added_clause
4521 query.where_clause_params += added_params
4522 for table in added_tables:
4523 if table not in query.tables:
4524 query.tables.append(table)
4528 # apply main rules on the object
4529 rule_obj = self.pool.get('ir.rule')
4530 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4531 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4533 # apply ir.rules from the parents (through _inherits)
4534 for inherited_model in self._inherits:
4535 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4536 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4537 parent_model=inherited_model, child_object=self)
4539 def _generate_m2o_order_by(self, order_field, query):
4541 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4542 either native m2o fields or function/related fields that are stored, including
4543 intermediate JOINs for inheritance if required.
4545 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4547 if order_field not in self._columns and order_field in self._inherit_fields:
4548 # also add missing joins for reaching the table containing the m2o field
4549 qualified_field = self._inherits_join_calc(order_field, query)
4550 order_field_column = self._inherit_fields[order_field][2]
4552 qualified_field = '"%s"."%s"' % (self._table, order_field)
4553 order_field_column = self._columns[order_field]
4555 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4556 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4557 _logger.debug("Many2one function/related fields must be stored " \
4558 "to be used as ordering fields! Ignoring sorting for %s.%s",
4559 self._name, order_field)
4562 # figure out the applicable order_by for the m2o
4563 dest_model = self.pool[order_field_column._obj]
4564 m2o_order = dest_model._order
4565 if not regex_order.match(m2o_order):
4566 # _order is complex, can't use it here, so we default to _rec_name
4567 m2o_order = dest_model._rec_name
4569 # extract the field names, to be able to qualify them and add desc/asc
4571 for order_part in m2o_order.split(","):
4572 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4573 m2o_order = m2o_order_list
4575 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4576 # as we don't want to exclude results that have NULL values for the m2o
4577 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4578 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4579 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4580 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4582 def _generate_order_by(self, order_spec, query):
4584 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4585 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4587 :raise" except_orm in case order_spec is malformed
4589 order_by_clause = ''
4590 order_spec = order_spec or self._order
4592 order_by_elements = []
4593 self._check_qorder(order_spec)
4594 for order_part in order_spec.split(','):
4595 order_split = order_part.strip().split(' ')
4596 order_field = order_split[0].strip()
4597 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4599 if order_field == 'id' or (self._log_access and order_field in LOG_ACCESS_COLUMNS.keys()):
4600 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4601 elif order_field in self._columns:
4602 order_column = self._columns[order_field]
4603 if order_column._classic_read:
4604 inner_clause = '"%s"."%s"' % (self._table, order_field)
4605 elif order_column._type == 'many2one':
4606 inner_clause = self._generate_m2o_order_by(order_field, query)
4608 continue # ignore non-readable or "non-joinable" fields
4609 elif order_field in self._inherit_fields:
4610 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4611 order_column = parent_obj._columns[order_field]
4612 if order_column._classic_read:
4613 inner_clause = self._inherits_join_calc(order_field, query)
4614 elif order_column._type == 'many2one':
4615 inner_clause = self._generate_m2o_order_by(order_field, query)
4617 continue # ignore non-readable or "non-joinable" fields
4619 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4621 if isinstance(inner_clause, list):
4622 for clause in inner_clause:
4623 order_by_elements.append("%s %s" % (clause, order_direction))
4625 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4626 if order_by_elements:
4627 order_by_clause = ",".join(order_by_elements)
4629 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4631 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4633 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4634 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4635 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4636 This is ok at the security level because this method is private and not callable through XML-RPC.
4638 :param access_rights_uid: optional user ID to use when checking access rights
4639 (not for ir.rules, this is only for ir.model.access)
4643 self.check_access_rights(cr, access_rights_uid or user, 'read')
4645 # For transient models, restrict acces to the current user, except for the super-user
4646 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4647 args = expression.AND(([('create_uid', '=', user)], args or []))
4649 query = self._where_calc(cr, user, args, context=context)
4650 self._apply_ir_rules(cr, user, query, 'read', context=context)
4651 order_by = self._generate_order_by(order, query)
4652 from_clause, where_clause, where_clause_params = query.get_sql()
4654 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4657 # Ignore order, limit and offset when just counting, they don't make sense and could
4659 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4660 cr.execute(query_str, where_clause_params)
4664 limit_str = limit and ' limit %d' % limit or ''
4665 offset_str = offset and ' offset %d' % offset or ''
4666 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4667 cr.execute(query_str, where_clause_params)
4670 # TDE note: with auto_join, we could have several lines about the same result
4671 # i.e. a lead with several unread messages; we uniquify the result using
4672 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4673 def _uniquify_list(seq):
4675 return [x for x in seq if x not in seen and not seen.add(x)]
4677 return _uniquify_list([x[0] for x in res])
4679 # returns the different values ever entered for one field
4680 # this is used, for example, in the client when the user hits enter on
4682 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4685 if field in self._inherit_fields:
4686 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4688 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4690 def copy_data(self, cr, uid, id, default=None, context=None):
4692 Copy given record's data with all its fields values
4694 :param cr: database cursor
4695 :param uid: current user id
4696 :param id: id of the record to copy
4697 :param default: field values to override in the original values of the copied record
4698 :type default: dictionary
4699 :param context: context arguments, like lang, time zone
4700 :type context: dictionary
4701 :return: dictionary containing all the field values
4707 # avoid recursion through already copied records in case of circular relationship
4708 seen_map = context.setdefault('__copy_data_seen', {})
4709 if id in seen_map.setdefault(self._name, []):
4711 seen_map[self._name].append(id)
4715 if 'state' not in default:
4716 if 'state' in self._defaults:
4717 if callable(self._defaults['state']):
4718 default['state'] = self._defaults['state'](self, cr, uid, context)
4720 default['state'] = self._defaults['state']
4722 # build a black list of fields that should not be copied
4723 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4724 def blacklist_given_fields(obj):
4725 # blacklist the fields that are given by inheritance
4726 for other, field_to_other in obj._inherits.items():
4727 blacklist.add(field_to_other)
4728 if field_to_other in default:
4729 # all the fields of 'other' are given by the record: default[field_to_other],
4730 # except the ones redefined in self
4731 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4733 blacklist_given_fields(self.pool[other])
4734 # blacklist deprecated fields
4735 for name, field in obj._columns.items():
4736 if field.deprecated:
4739 blacklist_given_fields(self)
4742 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4744 if f not in blacklist
4745 if not isinstance(fi.column, fields.function))
4747 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4751 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4754 for f, colinfo in fields_to_copy.iteritems():
4755 field = colinfo.column
4756 if field._type == 'many2one':
4757 res[f] = data[f] and data[f][0]
4758 elif field._type == 'one2many':
4759 other = self.pool[field._obj]
4760 # duplicate following the order of the ids because we'll rely on
4761 # it later for copying translations in copy_translation()!
4762 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4763 # the lines are duplicated using the wrong (old) parent, but then
4764 # are reassigned to the correct one thanks to the (0, 0, ...)
4765 res[f] = [(0, 0, line) for line in lines if line]
4766 elif field._type == 'many2many':
4767 res[f] = [(6, 0, data[f])]
4773 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4777 # avoid recursion through already copied records in case of circular relationship
4778 seen_map = context.setdefault('__copy_translations_seen',{})
4779 if old_id in seen_map.setdefault(self._name,[]):
4781 seen_map[self._name].append(old_id)
4783 trans_obj = self.pool.get('ir.translation')
4784 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4785 fields = self.fields_get(cr, uid, context=context)
4787 for field_name, field_def in fields.items():
4788 # removing the lang to compare untranslated values
4789 context_wo_lang = dict(context, lang=None)
4790 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4791 # we must recursively copy the translations for o2o and o2m
4792 if field_def['type'] == 'one2many':
4793 target_obj = self.pool[field_def['relation']]
4794 # here we rely on the order of the ids to match the translations
4795 # as foreseen in copy_data()
4796 old_children = sorted(r.id for r in old_record[field_name])
4797 new_children = sorted(r.id for r in new_record[field_name])
4798 for (old_child, new_child) in zip(old_children, new_children):
4799 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4800 # and for translatable fields we keep them for copy
4801 elif field_def.get('translate'):
4802 if field_name in self._columns:
4803 trans_name = self._name + "," + field_name
4806 elif field_name in self._inherit_fields:
4807 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4808 # get the id of the parent record to set the translation
4809 inherit_field_name = self._inherit_fields[field_name][1]
4810 target_id = new_record[inherit_field_name].id
4811 source_id = old_record[inherit_field_name].id
4815 trans_ids = trans_obj.search(cr, uid, [
4816 ('name', '=', trans_name),
4817 ('res_id', '=', source_id)
4819 user_lang = context.get('lang')
4820 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4822 # remove source to avoid triggering _set_src
4823 del record['source']
4824 record.update({'res_id': target_id})
4825 if user_lang and user_lang == record['lang']:
4826 # 'source' to force the call to _set_src
4827 # 'value' needed if value is changed in copy(), want to see the new_value
4828 record['source'] = old_record[field_name]
4829 record['value'] = new_record[field_name]
4830 trans_obj.create(cr, uid, record, context=context)
4833 def copy(self, cr, uid, id, default=None, context=None):
4835 Duplicate record with given id updating it with default values
4837 :param cr: database cursor
4838 :param uid: current user id
4839 :param id: id of the record to copy
4840 :param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4841 :type default: dictionary
4842 :param context: context arguments, like lang, time zone
4843 :type context: dictionary
4844 :return: id of the newly created record
4849 context = context.copy()
4850 data = self.copy_data(cr, uid, id, default, context)
4851 new_id = self.create(cr, uid, data, context)
4852 self.copy_translations(cr, uid, id, new_id, context)
4855 def exists(self, cr, uid, ids, context=None):
4856 """Checks whether the given id or ids exist in this model,
4857 and return the list of ids that do. This is simple to use for
4858 a truth test on a browse_record::
4863 :param ids: id or list of ids to check for existence
4864 :type ids: int or [int]
4865 :return: the list of ids that currently exist, out of
4868 if type(ids) in (int, long):
4872 query = 'SELECT id FROM "%s"' % self._table
4873 cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
4874 return [x[0] for x in cr.fetchall()]
4876 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4877 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4879 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4880 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4881 return self._check_recursion(cr, uid, ids, context, parent)
4883 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4885 Verifies that there is no loop in a hierarchical structure of records,
4886 by following the parent relationship using the **parent** field until a loop
4887 is detected or until a top-level record is found.
4889 :param cr: database cursor
4890 :param uid: current user id
4891 :param ids: list of ids of records to check
4892 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4893 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4896 parent = self._parent_name
4898 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4899 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4902 while current_id is not None:
4903 cr.execute(query, (current_id,))
4904 result = cr.fetchone()
4905 current_id = result[0] if result else None
4906 if current_id == id:
4910 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4912 Verifies that there is no loop in a hierarchical structure of records,
4913 by following the parent relationship using the **parent** field until a loop
4914 is detected or until a top-level record is found.
4916 :param cr: database cursor
4917 :param uid: current user id
4918 :param ids: list of ids of records to check
4919 :param field_name: field to check
4920 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4923 field = self._all_columns.get(field_name)
4924 field = field.column if field else None
4925 if not field or field._type != 'many2many' or field._obj != self._name:
4926 # field must be a many2many on itself
4927 raise ValueError('invalid field_name: %r' % (field_name,))
4929 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4933 for i in range(0, len(ids_parent), cr.IN_MAX):
4935 sub_ids_parent = ids_parent[i:j]
4936 cr.execute(query, (tuple(sub_ids_parent),))
4937 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4938 ids_parent = ids_parent2
4939 for i in ids_parent:
4944 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4945 """Retrieve the External ID(s) of any database record.
4947 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4949 :return: map of ids to the list of their fully qualified External IDs
4950 in the form ``module.key``, or an empty list when there's no External
4951 ID for a record, e.g.::
4953 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4956 ir_model_data = self.pool.get('ir.model.data')
4957 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4958 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4961 # can't use dict.fromkeys() as the list would be shared!
4963 for record in data_results:
4964 result[record['res_id']].append('%(module)s.%(name)s' % record)
4967 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4968 """Retrieve the External ID of any database record, if there
4969 is one. This method works as a possible implementation
4970 for a function field, to be able to add it to any
4971 model object easily, referencing it as ``Model.get_external_id``.
4973 When multiple External IDs exist for a record, only one
4974 of them is returned (randomly).
4976 :return: map of ids to their fully qualified XML ID,
4977 defaulting to an empty string when there's none
4978 (to be usable as a function field),
4981 { 'id': 'module.ext_id',
4984 results = self._get_xml_ids(cr, uid, ids)
4985 for k, v in results.iteritems():
4992 # backwards compatibility
4993 get_xml_id = get_external_id
4994 _get_xml_ids = _get_external_ids
4996 def print_report(self, cr, uid, ids, name, data, context=None):
4998 Render the report `name` for the given IDs. The report must be defined
4999 for this model, not another.
5001 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
5002 assert self._name == report.table
5003 return report.create(cr, uid, ids, data, context)
5006 def is_transient(self):
5007 """ Return whether the model is transient.
5009 See :class:`TransientModel`.
5012 return self._transient
5014 def _transient_clean_rows_older_than(self, cr, seconds):
5015 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
5016 # Never delete rows used in last 5 minutes
5017 seconds = max(seconds, 300)
5018 query = ("SELECT id FROM " + self._table + " WHERE"
5019 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
5020 " < ((now() at time zone 'UTC') - interval %s)")
5021 cr.execute(query, ("%s seconds" % seconds,))
5022 ids = [x[0] for x in cr.fetchall()]
5023 self.unlink(cr, SUPERUSER_ID, ids)
5025 def _transient_clean_old_rows(self, cr, max_count):
5026 # Check how many rows we have in the table
5027 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
5029 if res[0][0] <= max_count:
5030 return # max not reached, nothing to do
5031 self._transient_clean_rows_older_than(cr, 300)
5033 def _transient_vacuum(self, cr, uid, force=False):
5034 """Clean the transient records.
5036 This unlinks old records from the transient model tables whenever the
5037 "_transient_max_count" or "_max_age" conditions (if any) are reached.
5038 Actual cleaning will happen only once every "_transient_check_time" calls.
5039 This means this method can be called frequently called (e.g. whenever
5040 a new record is created).
5041 Example with both max_hours and max_count active:
5042 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
5043 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5044 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
5045 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
5046 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
5047 would immediately cause the maximum to be reached again.
5048 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
5050 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
5051 _transient_check_time = 20 # arbitrary limit on vacuum executions
5052 self._transient_check_count += 1
5053 if not force and (self._transient_check_count < _transient_check_time):
5054 return True # no vacuum cleaning this time
5055 self._transient_check_count = 0
5057 # Age-based expiration
5058 if self._transient_max_hours:
5059 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
5061 # Count-based expiration
5062 if self._transient_max_count:
5063 self._transient_clean_old_rows(cr, self._transient_max_count)
5067 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
5068 """ Serializes one2many and many2many commands into record dictionaries
5069 (as if all the records came from the database via a read()). This
5070 method is aimed at onchange methods on one2many and many2many fields.
5072 Because commands might be creation commands, not all record dicts
5073 will contain an ``id`` field. Commands matching an existing record
5074 will have an ``id``.
5076 :param field_name: name of the one2many or many2many field matching the commands
5077 :type field_name: str
5078 :param commands: one2many or many2many commands to execute on ``field_name``
5079 :type commands: list((int|False, int|False, dict|False))
5080 :param fields: list of fields to read from the database, when applicable
5081 :type fields: list(str)
5082 :returns: records in a shape similar to that returned by ``read()``
5083 (except records may be missing the ``id`` field if they don't exist in db)
5086 result = [] # result (list of dict)
5087 record_ids = [] # ids of records to read
5088 updates = {} # {id: dict} of updates on particular records
5090 for command in commands:
5091 if not isinstance(command, (list, tuple)):
5092 record_ids.append(command)
5093 elif command[0] == 0:
5094 result.append(command[2])
5095 elif command[0] == 1:
5096 record_ids.append(command[1])
5097 updates.setdefault(command[1], {}).update(command[2])
5098 elif command[0] in (2, 3):
5099 record_ids = [id for id in record_ids if id != command[1]]
5100 elif command[0] == 4:
5101 record_ids.append(command[1])
5102 elif command[0] == 5:
5103 result, record_ids = [], []
5104 elif command[0] == 6:
5105 result, record_ids = [], list(command[2])
5107 # read the records and apply the updates
5108 other_model = self.pool[self._all_columns[field_name].column._obj]
5109 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
5110 record.update(updates.get(record['id'], {}))
5111 result.append(record)
5115 # for backward compatibility
5116 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5118 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5120 Performs a ``search()`` followed by a ``read()``.
5122 :param cr: database cursor
5123 :param user: current user id
5124 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5125 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5126 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5127 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5128 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5129 :param context: context arguments.
5130 :return: List of dictionaries containing the asked fields.
5131 :rtype: List of dictionaries.
5134 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5138 if fields and fields == ['id']:
5139 # shortcut read if we only want the ids
5140 return [{'id': id} for id in record_ids]
5142 # read() ignores active_test, but it would forward it to any downstream search call
5143 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5144 # was presumably only meant for the main search().
5145 # TODO: Move this to read() directly?
5146 read_ctx = dict(context or {})
5147 read_ctx.pop('active_test', None)
5149 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5150 if len(result) <= 1:
5154 index = dict((r['id'], r) for r in result)
5155 return [index[x] for x in record_ids if x in index]
5157 def _register_hook(self, cr):
5158 """ stuff to do right after the registry is built """
5161 def __getattr__(self, name):
5162 if name.startswith('signal_'):
5163 signal_name = name[len('signal_'):]
5165 return (lambda *args, **kwargs:
5166 self.signal_workflow(*args, signal=signal_name, **kwargs))
5167 get = getattr(super(BaseModel, self), '__getattr__', None)
5168 if get is not None: return get(name)
5169 raise AttributeError(
5170 "'%s' object has no attribute '%s'" % (type(self).__name__, name))
5172 # keep this import here, at top it will cause dependency cycle errors
5175 class Model(BaseModel):
5176 """Main super-class for regular database-persisted OpenERP models.
5178 OpenERP models are created by inheriting from this class::
5183 The system will later instantiate the class once per database (on
5184 which the class' module is installed).
5187 _register = False # not visible in ORM registry, meant to be python-inherited only
5188 _transient = False # True in a TransientModel
5190 class TransientModel(BaseModel):
5191 """Model super-class for transient records, meant to be temporarily
5192 persisted, and regularly vaccuum-cleaned.
5194 A TransientModel has a simplified access rights management,
5195 all users can create new records, and may only access the
5196 records they created. The super-user has unrestricted access
5197 to all TransientModel records.
5200 _register = False # not visible in ORM registry, meant to be python-inherited only
5203 class AbstractModel(BaseModel):
5204 """Abstract Model super-class for creating an abstract class meant to be
5205 inherited by regular models (Models or TransientModels) but not meant to
5206 be usable on its own, or persisted.
5208 Technical note: we don't want to make AbstractModel the super-class of
5209 Model or BaseModel because it would not make sense to put the main
5210 definition of persistence methods such as create() in it, and still we
5211 should be able to override them within an AbstractModel.
5213 _auto = False # don't create any database backend for AbstractModels
5214 _register = False # not visible in ORM registry, meant to be python-inherited only
5217 def itemgetter_tuple(items):
5218 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5219 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5224 return lambda gettable: (gettable[items[0]],)
5225 return operator.itemgetter(*items)
5227 class ImportWarning(Warning):
5228 """ Used to send warnings upwards the stack during the import process
5232 def convert_pgerror_23502(model, fields, info, e):
5233 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5234 r'not-null constraint\n',
5236 field_name = m.group('field')
5237 if not m or field_name not in fields:
5238 return {'message': unicode(e)}
5239 message = _(u"Missing required value for the field '%s'.") % field_name
5240 field = fields.get(field_name)
5242 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5245 'field': field_name,
5248 def convert_pgerror_23505(model, fields, info, e):
5249 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5251 field_name = m.group('field')
5252 if not m or field_name not in fields:
5253 return {'message': unicode(e)}
5254 message = _(u"The value for the field '%s' already exists.") % field_name
5255 field = fields.get(field_name)
5257 message = _(u"%s This might be '%s' in the current model, or a field "
5258 u"of the same name in an o2m.") % (message, field['string'])
5261 'field': field_name,
5264 PGERROR_TO_OE = collections.defaultdict(
5265 # shape of mapped converters
5266 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5267 # not_null_violation
5268 '23502': convert_pgerror_23502,
5269 # unique constraint error
5270 '23505': convert_pgerror_23505,
5272 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: