1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object relational mapping to database (postgresql) module
25 * Hierarchical structure
26 * Constraints consistency, validations
27 * Object meta Data depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default fields value
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * 2 different inheritancies
36 - classicals (varchar, integer, boolean, ...)
37 - relations (one2many, many2one, many2many)
56 from collections import defaultdict
59 import dateutil.parser
61 from lxml import etree
65 import openerp.tools as tools
66 from openerp.tools.config import config
67 from openerp.tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
68 from openerp.tools.safe_eval import safe_eval as eval
69 from openerp.tools.translate import _
70 from openerp import SUPERUSER_ID
71 from query import Query
73 _logger = logging.getLogger(__name__)
74 _schema = logging.getLogger(__name__ + '.schema')
76 # List of etree._Element subclasses that we choose to ignore when parsing XML.
77 from openerp.tools import SKIPPED_ELEMENT_TYPES
79 regex_order = re.compile('^( *([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
80 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
82 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
84 def transfer_field_to_modifiers(field, modifiers):
87 for attr in ('invisible', 'readonly', 'required'):
88 state_exceptions[attr] = []
89 default_values[attr] = bool(field.get(attr))
90 for state, modifs in (field.get("states",{})).items():
92 if default_values[modif[0]] != modif[1]:
93 state_exceptions[modif[0]].append(state)
95 for attr, default_value in default_values.items():
96 if state_exceptions[attr]:
97 modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
99 modifiers[attr] = default_value
102 # Don't deal with groups, it is done by check_group().
103 # Need the context to evaluate the invisible attribute on tree views.
104 # For non-tree views, the context shouldn't be given.
105 def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
106 if node.get('attrs'):
107 modifiers.update(eval(node.get('attrs')))
109 if node.get('states'):
110 if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
111 # TODO combine with AND or OR, use implicit AND for now.
112 modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
114 modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
116 for a in ('invisible', 'readonly', 'required'):
118 v = bool(eval(node.get(a), {'context': context or {}}))
119 if in_tree_view and a == 'invisible':
120 # Invisible in a tree view has a specific meaning, make it a
121 # new key in the modifiers attribute.
122 modifiers['tree_invisible'] = v
123 elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
124 # Don't set the attribute to False if a dynamic value was
125 # provided (i.e. a domain from attrs or states).
129 def simplify_modifiers(modifiers):
130 for a in ('invisible', 'readonly', 'required'):
131 if a in modifiers and not modifiers[a]:
135 def transfer_modifiers_to_node(modifiers, node):
137 simplify_modifiers(modifiers)
138 node.set('modifiers', simplejson.dumps(modifiers))
140 def setup_modifiers(node, field=None, context=None, in_tree_view=False):
141 """ Processes node attributes and field descriptors to generate
142 the ``modifiers`` node attribute and set it on the provided node.
144 Alters its first argument in-place.
146 :param node: ``field`` node from an OpenERP view
147 :type node: lxml.etree._Element
148 :param dict field: field descriptor corresponding to the provided node
149 :param dict context: execution context used to evaluate node attributes
150 :param bool in_tree_view: triggers the ``tree_invisible`` code
151 path (separate from ``invisible``): in
152 tree view there are two levels of
153 invisibility, cell content (a column is
154 present but the cell itself is not
155 displayed) with ``invisible`` and column
156 invisibility (the whole column is
157 hidden) with ``tree_invisible``.
161 if field is not None:
162 transfer_field_to_modifiers(field, modifiers)
163 transfer_node_to_modifiers(
164 node, modifiers, context=context, in_tree_view=in_tree_view)
165 transfer_modifiers_to_node(modifiers, node)
167 def test_modifiers(what, expected):
169 if isinstance(what, basestring):
170 node = etree.fromstring(what)
171 transfer_node_to_modifiers(node, modifiers)
172 simplify_modifiers(modifiers)
173 json = simplejson.dumps(modifiers)
174 assert json == expected, "%s != %s" % (json, expected)
175 elif isinstance(what, dict):
176 transfer_field_to_modifiers(what, modifiers)
177 simplify_modifiers(modifiers)
178 json = simplejson.dumps(modifiers)
179 assert json == expected, "%s != %s" % (json, expected)
184 # openerp.osv.orm.modifiers_tests()
185 def modifiers_tests():
186 test_modifiers('<field name="a"/>', '{}')
187 test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
188 test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
189 test_modifiers('<field name="a" required="1"/>', '{"required": true}')
190 test_modifiers('<field name="a" invisible="0"/>', '{}')
191 test_modifiers('<field name="a" readonly="0"/>', '{}')
192 test_modifiers('<field name="a" required="0"/>', '{}')
193 test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
194 test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
195 test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
196 test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
198 # The dictionary is supposed to be the result of fields_get().
199 test_modifiers({}, '{}')
200 test_modifiers({"invisible": True}, '{"invisible": true}')
201 test_modifiers({"invisible": False}, '{}')
204 def check_object_name(name):
205 """ Check if the given name is a valid openerp object name.
207 The _name attribute in osv and osv_memory object is subject to
208 some restrictions. This function returns True or False whether
209 the given name is allowed or not.
211 TODO: this is an approximation. The goal in this approximation
212 is to disallow uppercase characters (in some places, we quote
213 table/column names and in other not, which leads to this kind
216 psycopg2.ProgrammingError: relation "xxx" does not exist).
218 The same restriction should apply to both osv and osv_memory
219 objects for consistency.
222 if regex_object_name.match(name) is None:
226 def raise_on_invalid_object_name(name):
227 if not check_object_name(name):
228 msg = "The _name attribute %s is not valid." % name
230 raise except_orm('ValueError', msg)
232 POSTGRES_CONFDELTYPES = {
240 def intersect(la, lb):
241 return filter(lambda x: x in lb, la)
243 def fix_import_export_id_paths(fieldname):
245 Fixes the id fields in import and exports, and splits field paths
248 :param str fieldname: name of the field to import/export
249 :return: split field name
252 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
253 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
254 return fixed_external_id.split('/')
256 class except_orm(Exception):
257 def __init__(self, name, value):
260 self.args = (name, value)
262 class BrowseRecordError(Exception):
265 class browse_null(object):
266 """ Readonly python database object browser
272 def __getitem__(self, name):
275 def __getattr__(self, name):
276 return None # XXX: return self ?
284 def __nonzero__(self):
287 def __unicode__(self):
291 raise NotImplementedError("Iteration is not allowed on %s" % self)
295 # TODO: execute an object method on browse_record_list
297 class browse_record_list(list):
298 """ Collection of browse objects
300 Such an instance will be returned when doing a ``browse([ids..])``
301 and will be iterable, yielding browse() objects
304 def __init__(self, lst, context=None):
307 super(browse_record_list, self).__init__(lst)
308 self.context = context
311 class browse_record(object):
312 """ An object that behaves like a row of an object's table.
313 It has attributes after the columns of the corresponding object.
317 uobj = pool.get('res.users')
318 user_rec = uobj.browse(cr, uid, 104)
322 def __init__(self, cr, uid, id, table, cache, context=None,
323 list_class=browse_record_list, fields_process=None):
325 :param table: the browsed object (inherited from orm)
326 :param dict cache: a dictionary of model->field->data to be shared
327 across browse objects, thus reducing the SQL
328 read()s. It can speed up things a lot, but also be
329 disastrous if not discarded after write()/unlink()
331 :param dict context: dictionary with an optional context
333 if fields_process is None:
337 self._list_class = list_class
341 self._table = table # deprecated, use _model!
343 self._table_name = self._table._name
344 self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name)
345 self._context = context
346 self._fields_process = fields_process
348 cache.setdefault(table._name, {})
349 self._data = cache[table._name]
351 # if not (id and isinstance(id, (int, long,))):
352 # raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
353 # if not table.exists(cr, uid, id, context):
354 # raise BrowseRecordError(_('Object %s does not exists') % (self,))
356 if id not in self._data:
357 self._data[id] = {'id': id}
361 def __getitem__(self, name):
365 if name not in self._data[self._id]:
366 # build the list of fields we will fetch
368 # fetch the definition of the field which was asked for
369 if name in self._table._columns:
370 col = self._table._columns[name]
371 elif name in self._table._inherit_fields:
372 col = self._table._inherit_fields[name][2]
373 elif hasattr(self._table, str(name)):
374 attr = getattr(self._table, name)
375 if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)):
376 def function_proxy(*args, **kwargs):
377 if 'context' not in kwargs and self._context:
378 kwargs.update(context=self._context)
379 return attr(self._cr, self._uid, [self._id], *args, **kwargs)
380 return function_proxy
384 error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
385 self.__logger.warning(error_msg)
386 if self.__logger.isEnabledFor(logging.DEBUG):
387 self.__logger.debug(''.join(traceback.format_stack()))
388 raise KeyError(error_msg)
390 prefetchable = lambda f: f._classic_write and f._prefetch and not f.groups and not f.deprecated
392 # if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
393 if prefetchable(col):
394 # gen the list of "local" (ie not inherited) fields which are classic or many2one
395 field_filter = lambda x: prefetchable(x[1])
396 fields_to_fetch = filter(field_filter, self._table._columns.items())
397 # gen the list of inherited fields
398 inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
399 # complete the field list with the inherited fields which are classic or many2one
400 fields_to_fetch += filter(field_filter, inherits)
401 # otherwise we fetch only that field
403 fields_to_fetch = [(name, col)]
405 ids = filter(lambda id: name not in self._data[id], self._data.keys())
407 field_names = map(lambda x: x[0], fields_to_fetch)
409 field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
410 except (openerp.exceptions.AccessError, except_orm):
413 # prefetching attempt failed, perhaps we're violating ACL restrictions involuntarily
414 _logger.info('Prefetching attempt for fields %s on %s failed for ids %s, re-trying just for id %s', field_names, self._model._name, ids, self._id)
416 field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
418 # TODO: improve this, very slow for reports
419 if self._fields_process:
420 lang = self._context.get('lang', 'en_US') or 'en_US'
421 lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid, [('code', '=', lang)])
423 raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,))
424 lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid, lang_obj_ids[0])
426 for field_name, field_column in fields_to_fetch:
427 if field_column._type in self._fields_process:
428 for result_line in field_values:
429 result_line[field_name] = self._fields_process[field_column._type](result_line[field_name])
430 if result_line[field_name]:
431 result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj)
434 # Where did those ids come from? Perhaps old entries in ir_model_dat?
435 _logger.warning("No field_values found for ids %s in %s", ids, self)
436 raise KeyError('Field %s not found in %s'%(name, self))
437 # create browse records for 'remote' objects
438 for result_line in field_values:
440 for field_name, field_column in fields_to_fetch:
441 if field_column._type == 'many2one':
442 if result_line[field_name]:
443 obj = self._table.pool[field_column._obj]
444 if isinstance(result_line[field_name], (list, tuple)):
445 value = result_line[field_name][0]
447 value = result_line[field_name]
449 # FIXME: this happen when a _inherits object
450 # overwrite a field of it parent. Need
451 # testing to be sure we got the right
452 # object and not the parent one.
453 if not isinstance(value, browse_record):
455 # In some cases the target model is not available yet, so we must ignore it,
456 # which is safe in most cases, this value will just be loaded later when needed.
457 # This situation can be caused by custom fields that connect objects with m2o without
458 # respecting module dependencies, causing relationships to be connected to soon when
459 # the target is not loaded yet.
461 new_data[field_name] = browse_record(self._cr,
462 self._uid, value, obj, self._cache,
463 context=self._context,
464 list_class=self._list_class,
465 fields_process=self._fields_process)
467 new_data[field_name] = value
469 new_data[field_name] = browse_null()
471 new_data[field_name] = browse_null()
472 elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
473 new_data[field_name] = self._list_class(
474 (browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj),
475 self._cache, context=self._context, list_class=self._list_class,
476 fields_process=self._fields_process)
477 for id in result_line[field_name]),
478 context=self._context)
479 elif field_column._type == 'reference':
480 if result_line[field_name]:
481 if isinstance(result_line[field_name], browse_record):
482 new_data[field_name] = result_line[field_name]
484 ref_obj, ref_id = result_line[field_name].split(',')
485 ref_id = long(ref_id)
487 obj = self._table.pool[ref_obj]
488 new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
490 new_data[field_name] = browse_null()
492 new_data[field_name] = browse_null()
494 new_data[field_name] = result_line[field_name]
495 self._data[result_line['id']].update(new_data)
497 if not name in self._data[self._id]:
498 # How did this happen? Could be a missing model due to custom fields used too soon, see above.
499 self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values)
500 self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table)
501 raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
502 return self._data[self._id][name]
504 def __getattr__(self, name):
509 exc_info = sys.exc_info()
510 raise AttributeError, "Got %r while trying to get attribute %s on a %s record." % (e, name, self._table._name), exc_info[2]
512 def __contains__(self, name):
513 return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
516 raise NotImplementedError("Iteration is not allowed on %s" % self)
518 def __hasattr__(self, name):
525 return "browse_record(%s, %s)" % (self._table_name, self._id)
527 def __eq__(self, other):
528 if not isinstance(other, browse_record):
530 return (self._table_name, self._id) == (other._table_name, other._id)
532 def __ne__(self, other):
533 if not isinstance(other, browse_record):
535 return (self._table_name, self._id) != (other._table_name, other._id)
537 # we need to define __unicode__ even though we've already defined __str__
538 # because we have overridden __getattr__
539 def __unicode__(self):
540 return unicode(str(self))
543 return hash((self._table_name, self._id))
548 """Force refreshing this browse_record's data and all the data of the
549 records that belong to the same cache, by emptying the cache completely,
550 preserving only the record identifiers (for prefetching optimizations).
552 for model, model_cache in self._cache.iteritems():
553 # only preserve the ids of the records that were in the cache
554 cached_ids = dict([(i, {'id': i}) for i in model_cache.keys()])
555 self._cache[model].clear()
556 self._cache[model].update(cached_ids)
558 def pg_varchar(size=0):
559 """ Returns the VARCHAR declaration for the provided size:
561 * If no size (or an empty or negative size is provided) return an
563 * Otherwise return a VARCHAR(n)
565 :type int size: varchar size, optional
569 if not isinstance(size, int):
570 raise TypeError("VARCHAR parameter should be an int, got %s"
573 return 'VARCHAR(%d)' % size
576 FIELDS_TO_PGTYPES = {
577 fields.boolean: 'bool',
578 fields.integer: 'int4',
582 fields.datetime: 'timestamp',
583 fields.binary: 'bytea',
584 fields.many2one: 'int4',
585 fields.serialized: 'text',
588 def get_pg_type(f, type_override=None):
590 :param fields._column f: field to get a Postgres type for
591 :param type type_override: use the provided type for dispatching instead of the field's own type
592 :returns: (postgres_identification_type, postgres_type_specification)
595 field_type = type_override or type(f)
597 if field_type in FIELDS_TO_PGTYPES:
598 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
599 elif issubclass(field_type, fields.float):
601 pg_type = ('numeric', 'NUMERIC')
603 pg_type = ('float8', 'DOUBLE PRECISION')
604 elif issubclass(field_type, (fields.char, fields.reference)):
605 pg_type = ('varchar', pg_varchar(f.size))
606 elif issubclass(field_type, fields.selection):
607 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
608 or getattr(f, 'size', None) == -1:
609 pg_type = ('int4', 'INTEGER')
611 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
612 elif issubclass(field_type, fields.function):
613 if f._type == 'selection':
614 pg_type = ('varchar', pg_varchar())
616 pg_type = get_pg_type(f, getattr(fields, f._type))
618 _logger.warning('%s type not supported!', field_type)
624 class MetaModel(type):
625 """ Metaclass for the Model.
627 This class is used as the metaclass for the Model class to discover
628 the models defined in a module (i.e. without instanciating them).
629 If the automatic discovery is not needed, it is possible to set the
630 model's _register attribute to False.
634 module_to_models = {}
636 def __init__(self, name, bases, attrs):
637 if not self._register:
638 self._register = True
639 super(MetaModel, self).__init__(name, bases, attrs)
642 # The (OpenERP) module name can be in the `openerp.addons` namespace
643 # or not. For instance module `sale` can be imported as
644 # `openerp.addons.sale` (the good way) or `sale` (for backward
646 module_parts = self.__module__.split('.')
647 if len(module_parts) > 2 and module_parts[0] == 'openerp' and \
648 module_parts[1] == 'addons':
649 module_name = self.__module__.split('.')[2]
651 module_name = self.__module__.split('.')[0]
652 if not hasattr(self, '_module'):
653 self._module = module_name
655 # Remember which models to instanciate for this module.
657 self.module_to_models.setdefault(self._module, []).append(self)
660 # Definition of log access columns, automatically added to models if
661 # self._log_access is True
662 LOG_ACCESS_COLUMNS = {
663 'create_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
664 'create_date': 'TIMESTAMP',
665 'write_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
666 'write_date': 'TIMESTAMP'
668 # special columns automatically created by the ORM
669 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS.keys()
671 class BaseModel(object):
672 """ Base class for OpenERP models.
674 OpenERP models are created by inheriting from this class' subclasses:
676 * Model: for regular database-persisted models
677 * TransientModel: for temporary data, stored in the database but automatically
678 vaccuumed every so often
679 * AbstractModel: for abstract super classes meant to be shared by multiple
680 _inheriting classes (usually Models or TransientModels)
682 The system will later instantiate the class once per database (on
683 which the class' module is installed).
685 To create a class that should not be instantiated, the _register class attribute
688 __metaclass__ = MetaModel
689 _auto = True # create database backend
690 _register = False # Set to false if the model shouldn't be automatically discovered.
697 _parent_name = 'parent_id'
698 _parent_store = False
699 _parent_order = False
706 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
707 # to include in the _read_group, if grouped on this field
711 _transient = False # True in a TransientModel
714 # { 'parent_model': 'm2o_field', ... }
717 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
718 # model from which it is inherits'd, r is the (local) field towards m, f
719 # is the _column object itself, and n is the original (i.e. top-most)
722 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
723 # field_column_obj, origina_parent_model), ... }
726 # Mapping field name/column_info object
727 # This is similar to _inherit_fields but:
728 # 1. includes self fields,
729 # 2. uses column_info instead of a triple.
735 _sql_constraints = []
736 _protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
738 CONCURRENCY_CHECK_FIELD = '__last_update'
740 def log(self, cr, uid, id, message, secondary=False, context=None):
741 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
743 def view_init(self, cr, uid, fields_list, context=None):
744 """Override this method to do specific things when a view on the object is opened."""
747 def _field_create(self, cr, context=None):
748 """ Create entries in ir_model_fields for all the model's fields.
750 If necessary, also create an entry in ir_model, and if called from the
751 modules loading scheme (by receiving 'module' in the context), also
752 create entries in ir_model_data (for the model and the fields).
754 - create an entry in ir_model (if there is not already one),
755 - create an entry in ir_model_data (if there is not already one, and if
756 'module' is in the context),
757 - update ir_model_fields with the fields found in _columns
758 (TODO there is some redundancy as _columns is updated from
759 ir_model_fields in __init__).
764 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
766 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
767 model_id = cr.fetchone()[0]
768 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
770 model_id = cr.fetchone()[0]
771 if 'module' in context:
772 name_id = 'model_'+self._name.replace('.', '_')
773 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
775 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
776 (name_id, context['module'], 'ir.model', model_id)
779 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
781 for rec in cr.dictfetchall():
782 cols[rec['name']] = rec
784 ir_model_fields_obj = self.pool.get('ir.model.fields')
786 # sparse field should be created at the end, as it depends on its serialized field already existing
787 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
788 for (k, f) in model_fields:
790 'model_id': model_id,
793 'field_description': f.string,
795 'relation': f._obj or '',
796 'select_level': tools.ustr(f.select or 0),
797 'readonly': (f.readonly and 1) or 0,
798 'required': (f.required and 1) or 0,
799 'selectable': (f.selectable and 1) or 0,
800 'translate': (f.translate and 1) or 0,
801 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
802 'serialization_field_id': None,
804 if getattr(f, 'serialization_field', None):
805 # resolve link to serialization_field if specified by name
806 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
807 if not serialization_field_id:
808 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
809 vals['serialization_field_id'] = serialization_field_id[0]
811 # When its a custom field,it does not contain f.select
812 if context.get('field_state', 'base') == 'manual':
813 if context.get('field_name', '') == k:
814 vals['select_level'] = context.get('select', '0')
815 #setting value to let the problem NOT occur next time
817 vals['select_level'] = cols[k]['select_level']
820 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
821 id = cr.fetchone()[0]
823 cr.execute("""INSERT INTO ir_model_fields (
824 id, model_id, model, name, field_description, ttype,
825 relation,state,select_level,relation_field, translate, serialization_field_id
827 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
829 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
830 vals['relation'], 'base',
831 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
833 if 'module' in context:
834 name1 = 'field_' + self._table + '_' + k
835 cr.execute("select name from ir_model_data where name=%s", (name1,))
837 name1 = name1 + "_" + str(id)
838 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
839 (name1, context['module'], 'ir.model.fields', id)
842 for key, val in vals.items():
843 if cols[k][key] != vals[key]:
844 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
845 cr.execute("""UPDATE ir_model_fields SET
846 model_id=%s, field_description=%s, ttype=%s, relation=%s,
847 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
849 model=%s AND name=%s""", (
850 vals['model_id'], vals['field_description'], vals['ttype'],
852 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
857 # Goal: try to apply inheritance at the instanciation level and
858 # put objects in the pool var
861 def create_instance(cls, pool, cr):
862 """ Instanciate a given model.
864 This class method instanciates the class of some model (i.e. a class
865 deriving from osv or osv_memory). The class might be the class passed
866 in argument or, if it inherits from another class, a class constructed
867 by combining the two classes.
869 The ``attributes`` argument specifies which parent class attributes
872 TODO: the creation of the combined class is repeated at each call of
873 this method. This is probably unnecessary.
876 attributes = ['_columns', '_defaults', '_inherits', '_constraints',
879 parent_names = getattr(cls, '_inherit', None)
881 if isinstance(parent_names, (str, unicode)):
882 name = cls._name or parent_names
883 parent_names = [parent_names]
887 raise TypeError('_name is mandatory in case of multiple inheritance')
889 for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
890 if parent_name not in pool:
891 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
892 'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
893 parent_model = pool[parent_name]
894 if not getattr(cls, '_original_module', None) and name == parent_model._name:
895 cls._original_module = parent_model._original_module
896 parent_class = parent_model.__class__
899 new = copy.copy(getattr(parent_model, s, {}))
901 # Don't _inherit custom fields.
905 if hasattr(new, 'update'):
906 new.update(cls.__dict__.get(s, {}))
907 elif s=='_constraints':
908 for c in cls.__dict__.get(s, []):
910 for c2 in range(len(new)):
911 #For _constraints, we should check field and methods as well
912 if new[c2][2]==c[2] and (new[c2][0] == c[0] \
913 or getattr(new[c2][0],'__name__', True) == \
914 getattr(c[0],'__name__', False)):
915 # If new class defines a constraint with
916 # same function name, we let it override
925 new.extend(cls.__dict__.get(s, []))
928 # Keep links to non-inherited constraints, e.g. useful when exporting translations
929 nattr['_local_constraints'] = cls.__dict__.get('_constraints', [])
930 nattr['_local_sql_constraints'] = cls.__dict__.get('_sql_constraints', [])
932 cls = type(name, (cls, parent_class), dict(nattr, _register=False))
934 cls._local_constraints = getattr(cls, '_constraints', [])
935 cls._local_sql_constraints = getattr(cls, '_sql_constraints', [])
937 if not getattr(cls, '_original_module', None):
938 cls._original_module = cls._module
939 obj = object.__new__(cls)
941 if hasattr(obj, '_columns'):
942 # float fields are registry-dependent (digit attribute). Duplicate them to avoid issues.
943 for c, f in obj._columns.items():
944 if f._type == 'float':
945 obj._columns[c] = copy.copy(f)
947 obj.__init__(pool, cr)
951 """Register this model.
953 This doesn't create an instance but simply register the model
954 as being part of the module where it is defined.
959 # Set the module name (e.g. base, sale, accounting, ...) on the class.
960 module = cls.__module__.split('.')[0]
961 if not hasattr(cls, '_module'):
964 # Record this class in the list of models to instantiate for this module,
965 # managed by the metaclass.
966 module_model_list = MetaModel.module_to_models.setdefault(cls._module, [])
967 if cls not in module_model_list:
969 module_model_list.append(cls)
971 # Since we don't return an instance here, the __init__
972 # method won't be called.
975 def __init__(self, pool, cr):
976 """ Initialize a model and make it part of the given registry.
978 - copy the stored fields' functions in the osv_pool,
979 - update the _columns with the fields found in ir_model_fields,
980 - ensure there is a many2one for each _inherits'd parent,
981 - update the children's _columns,
982 - give a chance to each field to initialize itself.
985 pool.add(self._name, self)
988 if not self._name and not hasattr(self, '_inherit'):
989 name = type(self).__name__.split('.')[0]
990 msg = "The class %s has to have a _name attribute" % name
993 raise except_orm('ValueError', msg)
995 if not self._description:
996 self._description = self._name
998 self._table = self._name.replace('.', '_')
1000 if not hasattr(self, '_log_access'):
1001 # If _log_access is not specified, it is the same value as _auto.
1002 self._log_access = getattr(self, "_auto", True)
1004 self._columns = self._columns.copy()
1005 for store_field in self._columns:
1006 f = self._columns[store_field]
1007 if hasattr(f, 'digits_change'):
1009 def not_this_field(stored_func):
1010 x, y, z, e, f, l = stored_func
1011 return x != self._name or y != store_field
1012 self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, []))
1013 if not isinstance(f, fields.function):
1019 sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, f.priority, None)}
1020 for object, aa in sm.items():
1022 (fnct, fields2, order, length) = aa
1024 (fnct, fields2, order) = aa
1027 raise except_orm('Error',
1028 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
1029 self.pool._store_function.setdefault(object, [])
1030 t = (self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)
1031 if not t in self.pool._store_function[object]:
1032 self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
1033 self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
1035 for (key, _, msg) in self._sql_constraints:
1036 self.pool._sql_error[self._table+'_'+key] = msg
1038 # Load manual fields
1040 # Check the query is already done for all modules of if we need to
1042 if self.pool.fields_by_model is not None:
1043 manual_fields = self.pool.fields_by_model.get(self._name, [])
1045 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual'))
1046 manual_fields = cr.dictfetchall()
1047 for field in manual_fields:
1048 if field['name'] in self._columns:
1051 'string': field['field_description'],
1052 'required': bool(field['required']),
1053 'readonly': bool(field['readonly']),
1054 'domain': eval(field['domain']) if field['domain'] else None,
1055 'size': field['size'] or None,
1056 'ondelete': field['on_delete'],
1057 'translate': (field['translate']),
1060 #'select': int(field['select_level'])
1063 if field['serialization_field_id']:
1064 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
1065 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
1066 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
1067 attrs.update({'relation': field['relation']})
1068 self._columns[field['name']] = fields.sparse(**attrs)
1069 elif field['ttype'] == 'selection':
1070 self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
1071 elif field['ttype'] == 'reference':
1072 self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
1073 elif field['ttype'] == 'many2one':
1074 self._columns[field['name']] = fields.many2one(field['relation'], **attrs)
1075 elif field['ttype'] == 'one2many':
1076 self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
1077 elif field['ttype'] == 'many2many':
1078 _rel1 = field['relation'].replace('.', '_')
1079 _rel2 = field['model'].replace('.', '_')
1080 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
1081 self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
1083 self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
1085 self._inherits_check()
1086 self._inherits_reload()
1087 if not self._sequence:
1088 self._sequence = self._table + '_id_seq'
1089 for k in self._defaults:
1090 assert (k in self._columns) or (k in self._inherit_fields), 'Default function defined in %s but field %s does not exist !' % (self._name, k,)
1091 for f in self._columns:
1092 self._columns[f].restart()
1095 if self.is_transient():
1096 self._transient_check_count = 0
1097 self._transient_max_count = config.get('osv_memory_count_limit')
1098 self._transient_max_hours = config.get('osv_memory_age_limit')
1099 assert self._log_access, "TransientModels must have log_access turned on, "\
1100 "in order to implement their access rights policy"
1103 if self._rec_name is not None:
1104 assert self._rec_name in self._all_columns.keys() + ['id'], "Invalid rec_name %s for model %s" % (self._rec_name, self._name)
1106 self._rec_name = 'name'
1109 def __export_row(self, cr, uid, row, fields, context=None):
1113 def check_type(field_type):
1114 if field_type == 'float':
1116 elif field_type == 'integer':
1118 elif field_type == 'boolean':
1122 def selection_field(in_field):
1123 col_obj = self.pool[in_field.keys()[0]]
1124 if f[i] in col_obj._columns.keys():
1125 return col_obj._columns[f[i]]
1126 elif f[i] in col_obj._inherits.keys():
1127 selection_field(col_obj._inherits)
1131 def _get_xml_id(self, cr, uid, r):
1132 model_data = self.pool.get('ir.model.data')
1133 data_ids = model_data.search(cr, uid, [('model', '=', r._model._name), ('res_id', '=', r['id'])])
1135 d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
1137 r = '%s.%s' % (d['module'], d['name'])
1143 n = r._model._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
1144 if not model_data.search(cr, uid, [('name', '=', n)]):
1147 model_data.create(cr, SUPERUSER_ID, {
1149 'model': r._model._name,
1151 'module': '__export__',
1157 data = map(lambda x: '', range(len(fields)))
1159 for fpos in range(len(fields)):
1169 r = _get_xml_id(self, cr, uid, r)
1172 # To display external name of selection field when its exported
1173 if f[i] in self._columns.keys():
1174 cols = self._columns[f[i]]
1175 elif f[i] in self._inherit_fields.keys():
1176 cols = selection_field(self._inherits)
1177 if cols and cols._type == 'selection':
1178 sel_list = cols.selection
1179 if r and type(sel_list) == type([]):
1180 r = [x[1] for x in sel_list if r==x[0]]
1181 r = r and r[0] or False
1183 if f[i] in self._columns:
1184 r = check_type(self._columns[f[i]]._type)
1185 elif f[i] in self._inherit_fields:
1186 r = check_type(self._inherit_fields[f[i]][2]._type)
1187 data[fpos] = r or False
1189 if isinstance(r, (browse_record_list, list)):
1191 fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \
1194 if [x for x in fields2 if x]:
1196 done.append(fields2)
1197 if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'):
1198 data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r])
1202 lines2 = row2._model.__export_row(cr, uid, row2, fields2,
1205 for fpos2 in range(len(fields)):
1206 if lines2 and lines2[0][fpos2]:
1207 data[fpos2] = lines2[0][fpos2]
1211 name_relation = self.pool[rr._table_name]._rec_name
1212 if isinstance(rr[name_relation], browse_record):
1213 rr = rr[name_relation]
1214 rr_name = self.pool[rr._table_name].name_get(cr, uid, [rr.id], context=context)
1215 rr_name = rr_name and rr_name[0] and rr_name[0][1] or ''
1216 dt += tools.ustr(rr_name or '') + ','
1217 data[fpos] = dt[:-1]
1226 if isinstance(r, browse_record):
1227 r = self.pool[r._table_name].name_get(cr, uid, [r.id], context=context)
1228 r = r and r[0] and r[0][1] or ''
1229 data[fpos] = tools.ustr(r or '')
1230 return [data] + lines
1232 def export_data(self, cr, uid, ids, fields_to_export, context=None):
1234 Export fields for selected objects
1236 :param cr: database cursor
1237 :param uid: current user id
1238 :param ids: list of ids
1239 :param fields_to_export: list of fields
1240 :param context: context arguments, like lang, time zone
1241 :rtype: dictionary with a *datas* matrix
1243 This method is used when exporting data via client menu
1248 cols = self._columns.copy()
1249 for f in self._inherit_fields:
1250 cols.update({f: self._inherit_fields[f][2]})
1251 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
1253 for row in self.browse(cr, uid, ids, context):
1254 datas += self.__export_row(cr, uid, row, fields_to_export, context)
1255 return {'datas': datas}
1257 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
1260 Use :meth:`~load` instead
1262 Import given data in given module
1264 This method is used when importing data via client menu.
1266 Example of fields to import for a sale.order::
1269 partner_id, (=name_search)
1270 order_line/.id, (=database_id)
1272 order_line/product_id/id, (=xml id)
1273 order_line/price_unit,
1274 order_line/product_uom_qty,
1275 order_line/product_uom/id (=xml_id)
1277 This method returns a 4-tuple with the following structure::
1279 (return_code, errored_resource, error_message, unused)
1281 * The first item is a return code, it is ``-1`` in case of
1282 import error, or the last imported row number in case of success
1283 * The second item contains the record data dict that failed to import
1284 in case of error, otherwise it's 0
1285 * The third item contains an error message string in case of error,
1287 * The last item is currently unused, with no specific semantics
1289 :param fields: list of fields to import
1290 :param datas: data to import
1291 :param mode: 'init' or 'update' for record creation
1292 :param current_module: module name
1293 :param noupdate: flag for record creation
1294 :param filename: optional file to store partial import state for recovery
1295 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1296 :rtype: (int, dict or 0, str or 0, str or 0)
1298 context = dict(context) if context is not None else {}
1299 context['_import_current_module'] = current_module
1301 fields = map(fix_import_export_id_paths, fields)
1302 ir_model_data_obj = self.pool.get('ir.model.data')
1305 if m['type'] == 'error':
1306 raise Exception(m['message'])
1308 if config.get('import_partial') and filename:
1309 with open(config.get('import_partial'), 'rb') as partial_import_file:
1310 data = pickle.load(partial_import_file)
1311 position = data.get(filename, 0)
1315 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1316 self._extract_records(cr, uid, fields, datas,
1317 context=context, log=log),
1318 context=context, log=log):
1319 ir_model_data_obj._update(cr, uid, self._name,
1320 current_module, res, mode=mode, xml_id=xml_id,
1321 noupdate=noupdate, res_id=res_id, context=context)
1322 position = info.get('rows', {}).get('to', 0) + 1
1323 if config.get('import_partial') and filename and (not (position%100)):
1324 with open(config.get('import_partial'), 'rb') as partial_import:
1325 data = pickle.load(partial_import)
1326 data[filename] = position
1327 with open(config.get('import_partial'), 'wb') as partial_import:
1328 pickle.dump(data, partial_import)
1329 if context.get('defer_parent_store_computation'):
1330 self._parent_store_compute(cr)
1332 except Exception, e:
1334 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1336 if context.get('defer_parent_store_computation'):
1337 self._parent_store_compute(cr)
1338 return position, 0, 0, 0
1340 def load(self, cr, uid, fields, data, context=None):
1342 Attempts to load the data matrix, and returns a list of ids (or
1343 ``False`` if there was an error and no id could be generated) and a
1346 The ids are those of the records created and saved (in database), in
1347 the same order they were extracted from the file. They can be passed
1348 directly to :meth:`~read`
1350 :param fields: list of fields to import, at the same index as the corresponding data
1351 :type fields: list(str)
1352 :param data: row-major matrix of data to import
1353 :type data: list(list(str))
1354 :param dict context:
1355 :returns: {ids: list(int)|False, messages: [Message]}
1357 cr.execute('SAVEPOINT model_load')
1360 fields = map(fix_import_export_id_paths, fields)
1361 ModelData = self.pool['ir.model.data'].clear_caches()
1363 fg = self.fields_get(cr, uid, context=context)
1370 for id, xid, record, info in self._convert_records(cr, uid,
1371 self._extract_records(cr, uid, fields, data,
1372 context=context, log=messages.append),
1373 context=context, log=messages.append):
1375 cr.execute('SAVEPOINT model_load_save')
1376 except psycopg2.InternalError, e:
1377 # broken transaction, exit and hope the source error was
1379 if not any(message['type'] == 'error' for message in messages):
1380 messages.append(dict(info, type='error',message=
1381 u"Unknown database error: '%s'" % e))
1384 ids.append(ModelData._update(cr, uid, self._name,
1385 current_module, record, mode=mode, xml_id=xid,
1386 noupdate=noupdate, res_id=id, context=context))
1387 cr.execute('RELEASE SAVEPOINT model_load_save')
1388 except psycopg2.Warning, e:
1389 messages.append(dict(info, type='warning', message=str(e)))
1390 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1391 except psycopg2.Error, e:
1392 messages.append(dict(
1394 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1395 # Failed to write, log to messages, rollback savepoint (to
1396 # avoid broken transaction) and keep going
1397 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1398 except Exception, e:
1399 message = (_('Unknown error during import:') +
1400 u' %s: %s' % (type(e), unicode(e)))
1401 moreinfo = _('Resolve other errors first')
1402 messages.append(dict(info, type='error',
1405 # Failed for some reason, perhaps due to invalid data supplied,
1406 # rollback savepoint and keep going
1407 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1408 if any(message['type'] == 'error' for message in messages):
1409 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1411 return {'ids': ids, 'messages': messages}
1412 def _extract_records(self, cr, uid, fields_, data,
1413 context=None, log=lambda a: None):
1414 """ Generates record dicts from the data sequence.
1416 The result is a generator of dicts mapping field names to raw
1417 (unconverted, unvalidated) values.
1419 For relational fields, if sub-fields were provided the value will be
1420 a list of sub-records
1422 The following sub-fields may be set on the record (by key):
1423 * None is the name_get for the record (to use with name_create/name_search)
1424 * "id" is the External ID for the record
1425 * ".id" is the Database ID for the record
1427 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1428 # Fake columns to avoid special cases in extractor
1429 columns[None] = fields.char('rec_name')
1430 columns['id'] = fields.char('External ID')
1431 columns['.id'] = fields.integer('Database ID')
1433 # m2o fields can't be on multiple lines so exclude them from the
1434 # is_relational field rows filter, but special-case it later on to
1435 # be handled with relational fields (as it can have subfields)
1436 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1437 get_o2m_values = itemgetter_tuple(
1438 [index for index, field in enumerate(fields_)
1439 if columns[field[0]]._type == 'one2many'])
1440 get_nono2m_values = itemgetter_tuple(
1441 [index for index, field in enumerate(fields_)
1442 if columns[field[0]]._type != 'one2many'])
1443 # Checks if the provided row has any non-empty non-relational field
1444 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1445 return any(g(row)) and not any(f(row))
1449 if index >= len(data): return
1452 # copy non-relational fields to record dict
1453 record = dict((field[0], value)
1454 for field, value in itertools.izip(fields_, row)
1455 if not is_relational(field[0]))
1457 # Get all following rows which have relational values attached to
1458 # the current record (no non-relational values)
1459 record_span = itertools.takewhile(
1460 only_o2m_values, itertools.islice(data, index + 1, None))
1461 # stitch record row back on for relational fields
1462 record_span = list(itertools.chain([row], record_span))
1463 for relfield in set(
1464 field[0] for field in fields_
1465 if is_relational(field[0])):
1466 column = columns[relfield]
1467 # FIXME: how to not use _obj without relying on fields_get?
1468 Model = self.pool[column._obj]
1470 # get only cells for this sub-field, should be strictly
1471 # non-empty, field path [None] is for name_get column
1472 indices, subfields = zip(*((index, field[1:] or [None])
1473 for index, field in enumerate(fields_)
1474 if field[0] == relfield))
1476 # return all rows which have at least one value for the
1477 # subfields of relfield
1478 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1479 record[relfield] = [subrecord
1480 for subrecord, _subinfo in Model._extract_records(
1481 cr, uid, subfields, relfield_data,
1482 context=context, log=log)]
1484 yield record, {'rows': {
1486 'to': index + len(record_span) - 1
1488 index += len(record_span)
1489 def _convert_records(self, cr, uid, records,
1490 context=None, log=lambda a: None):
1491 """ Converts records from the source iterable (recursive dicts of
1492 strings) into forms which can be written to the database (via
1493 self.create or (ir.model.data)._update)
1495 :returns: a list of triplets of (id, xid, record)
1496 :rtype: list((int|None, str|None, dict))
1498 if context is None: context = {}
1499 Converter = self.pool['ir.fields.converter']
1500 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1501 Translation = self.pool['ir.translation']
1503 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1504 context.get('lang'))
1506 for f, column in columns.iteritems())
1508 convert = Converter.for_model(cr, uid, self, context=context)
1510 def _log(base, field, exception):
1511 type = 'warning' if isinstance(exception, Warning) else 'error'
1512 # logs the logical (not human-readable) field name for automated
1513 # processing of response, but injects human readable in message
1514 record = dict(base, type=type, field=field,
1515 message=unicode(exception.args[0]) % base)
1516 if len(exception.args) > 1 and exception.args[1]:
1517 record.update(exception.args[1])
1520 stream = CountingStream(records)
1521 for record, extras in stream:
1524 # name_get/name_create
1525 if None in record: pass
1532 dbid = int(record['.id'])
1534 # in case of overridden id column
1535 dbid = record['.id']
1536 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1539 record=stream.index,
1541 message=_(u"Unknown database identifier '%s'") % dbid))
1544 converted = convert(record, lambda field, err:\
1545 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1547 yield dbid, xid, converted, dict(extras, record=stream.index)
1549 def get_invalid_fields(self, cr, uid):
1550 return list(self._invalids)
1552 def _validate(self, cr, uid, ids, context=None):
1553 context = context or {}
1554 lng = context.get('lang')
1555 trans = self.pool.get('ir.translation')
1557 for constraint in self._constraints:
1558 fun, msg, fields = constraint
1560 # We don't pass around the context here: validation code
1561 # must always yield the same results.
1562 valid = fun(self, cr, uid, ids)
1564 except Exception, e:
1565 _logger.debug('Exception while validating constraint', exc_info=True)
1567 extra_error = tools.ustr(e)
1569 # Check presence of __call__ directly instead of using
1570 # callable() because it will be deprecated as of Python 3.0
1571 if hasattr(msg, '__call__'):
1572 tmp_msg = msg(self, cr, uid, ids, context=context)
1573 if isinstance(tmp_msg, tuple):
1574 tmp_msg, params = tmp_msg
1575 translated_msg = tmp_msg % params
1577 translated_msg = tmp_msg
1579 translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
1581 translated_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1583 _("The field(s) `%s` failed against a constraint: %s") % (', '.join(fields), translated_msg)
1585 self._invalids.update(fields)
1587 raise except_orm('ValidateError', '\n'.join(error_msgs))
1589 self._invalids.clear()
1591 def default_get(self, cr, uid, fields_list, context=None):
1593 Returns default values for the fields in fields_list.
1595 :param fields_list: list of fields to get the default values for (example ['field1', 'field2',])
1596 :type fields_list: list
1597 :param context: optional context dictionary - it may contains keys for specifying certain options
1598 like ``context_lang`` (language) or ``context_tz`` (timezone) to alter the results of the call.
1599 It may contain keys in the form ``default_XXX`` (where XXX is a field name), to set
1600 or override a default value for a field.
1601 A special ``bin_size`` boolean flag may also be passed in the context to request the
1602 value of all fields.binary columns to be returned as the size of the binary instead of its
1603 contents. This can also be selectively overriden by passing a field-specific flag
1604 in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
1605 Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
1606 :return: dictionary of the default values (set on the object model class, through user preferences, or in the context)
1608 # trigger view init hook
1609 self.view_init(cr, uid, fields_list, context)
1615 # get the default values for the inherited fields
1616 for t in self._inherits.keys():
1617 defaults.update(self.pool[t].default_get(cr, uid, fields_list, context))
1619 # get the default values defined in the object
1620 for f in fields_list:
1621 if f in self._defaults:
1622 if callable(self._defaults[f]):
1623 defaults[f] = self._defaults[f](self, cr, uid, context)
1625 defaults[f] = self._defaults[f]
1627 fld_def = ((f in self._columns) and self._columns[f]) \
1628 or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
1631 if isinstance(fld_def, fields.property):
1632 property_obj = self.pool.get('ir.property')
1633 prop_value = property_obj.get(cr, uid, f, self._name, context=context)
1635 if isinstance(prop_value, (browse_record, browse_null)):
1636 defaults[f] = prop_value.id
1638 defaults[f] = prop_value
1640 if f not in defaults:
1643 # get the default values set by the user and override the default
1644 # values defined in the object
1645 ir_values_obj = self.pool.get('ir.values')
1646 res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
1647 for id, field, field_value in res:
1648 if field in fields_list:
1649 fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
1650 if fld_def._type == 'many2one':
1651 obj = self.pool[fld_def._obj]
1652 if not obj.search(cr, uid, [('id', '=', field_value or False)]):
1654 if fld_def._type == 'many2many':
1655 obj = self.pool[fld_def._obj]
1657 for i in range(len(field_value or [])):
1658 if not obj.search(cr, uid, [('id', '=',
1661 field_value2.append(field_value[i])
1662 field_value = field_value2
1663 if fld_def._type == 'one2many':
1664 obj = self.pool[fld_def._obj]
1666 for i in range(len(field_value or [])):
1667 field_value2.append({})
1668 for field2 in field_value[i]:
1669 if field2 in obj._columns.keys() and obj._columns[field2]._type == 'many2one':
1670 obj2 = self.pool[obj._columns[field2]._obj]
1671 if not obj2.search(cr, uid,
1672 [('id', '=', field_value[i][field2])]):
1674 elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type == 'many2one':
1675 obj2 = self.pool[obj._inherit_fields[field2][2]._obj]
1676 if not obj2.search(cr, uid,
1677 [('id', '=', field_value[i][field2])]):
1679 # TODO add test for many2many and one2many
1680 field_value2[i][field2] = field_value[i][field2]
1681 field_value = field_value2
1682 defaults[field] = field_value
1684 # get the default values from the context
1685 for key in context or {}:
1686 if key.startswith('default_') and (key[8:] in fields_list):
1687 defaults[key[8:]] = context[key]
1690 def fields_get_keys(self, cr, user, context=None):
1691 res = self._columns.keys()
1692 # TODO I believe this loop can be replace by
1693 # res.extend(self._inherit_fields.key())
1694 for parent in self._inherits:
1695 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1698 def _rec_name_fallback(self, cr, uid, context=None):
1699 rec_name = self._rec_name
1700 if rec_name not in self._columns:
1701 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1705 # Overload this method if you need a window title which depends on the context
1707 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1710 def user_has_groups(self, cr, uid, groups, context=None):
1711 """Return true if the user is at least member of one of the groups
1712 in groups_str. Typically used to resolve ``groups`` attribute
1713 in view and model definitions.
1715 :param str groups: comma-separated list of fully-qualified group
1716 external IDs, e.g.: ``base.group_user,base.group_system``
1717 :return: True if the current user is a member of one of the
1720 return any([self.pool.get('res.users').has_group(cr, uid, group_ext_id)
1721 for group_ext_id in groups.split(',')])
1723 def _get_default_form_view(self, cr, user, context=None):
1724 """ Generates a default single-line form view using all fields
1725 of the current model except the m2m and o2m ones.
1727 :param cr: database cursor
1728 :param int user: user id
1729 :param dict context: connection context
1730 :returns: a form view as an lxml document
1731 :rtype: etree._Element
1733 view = etree.Element('form', string=self._description)
1734 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
1735 for field, descriptor in self.fields_get(cr, user, context=context).iteritems():
1736 if descriptor['type'] in ('one2many', 'many2many'):
1738 etree.SubElement(view, 'field', name=field)
1739 if descriptor['type'] == 'text':
1740 etree.SubElement(view, 'newline')
1743 def _get_default_search_view(self, cr, user, context=None):
1744 """ Generates a single-field search view, based on _rec_name.
1746 :param cr: database cursor
1747 :param int user: user id
1748 :param dict context: connection context
1749 :returns: a tree view as an lxml document
1750 :rtype: etree._Element
1752 view = etree.Element('search', string=self._description)
1753 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1756 def _get_default_tree_view(self, cr, user, context=None):
1757 """ Generates a single-field tree view, based on _rec_name.
1759 :param cr: database cursor
1760 :param int user: user id
1761 :param dict context: connection context
1762 :returns: a tree view as an lxml document
1763 :rtype: etree._Element
1765 view = etree.Element('tree', string=self._description)
1766 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1769 def _get_default_calendar_view(self, cr, user, context=None):
1770 """ Generates a default calendar view by trying to infer
1771 calendar fields from a number of pre-set attribute names
1773 :param cr: database cursor
1774 :param int user: user id
1775 :param dict context: connection context
1776 :returns: a calendar view
1777 :rtype: etree._Element
1779 def set_first_of(seq, in_, to):
1780 """Sets the first value of ``seq`` also found in ``in_`` to
1781 the ``to`` attribute of the view being closed over.
1783 Returns whether it's found a suitable value (and set it on
1784 the attribute) or not
1792 view = etree.Element('calendar', string=self._description)
1793 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1795 if self._date_name not in self._columns:
1797 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1798 if dt in self._columns:
1799 self._date_name = dt
1804 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1805 view.set('date_start', self._date_name)
1807 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1808 self._columns, 'color')
1810 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1811 self._columns, 'date_stop'):
1812 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1813 self._columns, 'date_delay'):
1815 _('Invalid Object Architecture!'),
1816 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1820 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1822 Get the detailed composition of the requested view like fields, model, view architecture
1824 :param view_id: id of the view or None
1825 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1826 :param toolbar: true to include contextual actions
1827 :param submenu: deprecated
1828 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1829 :raise AttributeError:
1830 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1831 * if some tag other than 'position' is found in parent view
1832 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1836 View = self.pool['ir.ui.view']
1839 'model': self._name,
1840 'field_parent': False,
1843 # try to find a view_id if none provided
1845 # <view_type>_view_ref in context can be used to overrride the default view
1846 view_ref_key = view_type + '_view_ref'
1847 view_ref = context.get(view_ref_key)
1850 module, view_ref = view_ref.split('.', 1)
1851 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1852 view_ref_res = cr.fetchone()
1854 view_id = view_ref_res[0]
1856 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1857 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1861 # otherwise try to find the lowest priority matching ir.ui.view
1862 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1864 # context for post-processing might be overriden
1867 # read the view with inherited views applied
1868 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1869 result['arch'] = root_view['arch']
1870 result['name'] = root_view['name']
1871 result['type'] = root_view['type']
1872 result['view_id'] = root_view['id']
1873 result['field_parent'] = root_view['field_parent']
1874 # override context fro postprocessing
1875 if root_view.get('model') != self._name:
1876 ctx = dict(context, base_model_name=root_view.get('model'))
1878 # fallback on default views methods if no ir.ui.view could be found
1880 get_func = getattr(self, '_get_default_%s_view' % view_type)
1881 arch_etree = get_func(cr, uid, context)
1882 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1883 result['type'] = view_type
1884 result['name'] = 'default'
1885 except AttributeError:
1886 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1888 # Apply post processing, groups and modifiers etc...
1889 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1890 result['arch'] = xarch
1891 result['fields'] = xfields
1893 # Add related action information if aksed
1895 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1901 ir_values_obj = self.pool.get('ir.values')
1902 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1903 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1904 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1905 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1906 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1907 #When multi="True" set it will display only in More of the list view
1908 resrelate = [clean(action) for action in resrelate
1909 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1911 for x in itertools.chain(resprint, resaction, resrelate):
1912 x['string'] = x['name']
1914 result['toolbar'] = {
1916 'action': resaction,
1921 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1922 return self.pool['ir.ui.view'].postprocess_and_fields(
1923 cr, uid, self._name, node, view_id, context=context)
1925 def search_count(self, cr, user, args, context=None):
1926 res = self.search(cr, user, args, context=context, count=True)
1927 if isinstance(res, list):
1931 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1933 Search for records based on a search domain.
1935 :param cr: database cursor
1936 :param user: current user id
1937 :param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
1938 :param offset: optional number of results to skip in the returned values (default: 0)
1939 :param limit: optional max number of records to return (default: **None**)
1940 :param order: optional columns to sort by (default: self._order=id )
1941 :param context: optional context arguments, like lang, time zone
1942 :type context: dictionary
1943 :param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
1944 :return: id or list of ids of records matching the criteria
1945 :rtype: integer or list of integers
1946 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1948 **Expressing a search domain (args)**
1950 Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
1952 * **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
1953 * **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
1954 The semantics of most of these operators are obvious.
1955 The ``child_of`` operator will look for records who are children or grand-children of a given record,
1956 according to the semantics of this model (i.e following the relationship field named by
1957 ``self._parent_name``, by default ``parent_id``.
1958 * **value** must be a valid value to compare with the values of **field_name**, depending on its type.
1960 Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
1961 These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
1962 Be very careful about this when you combine them the first time.
1964 Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
1966 [('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
1968 The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
1970 (name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
1973 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
1975 def name_get(self, cr, user, ids, context=None):
1976 """Returns the preferred display value (text representation) for the records with the
1977 given ``ids``. By default this will be the value of the ``name`` column, unless
1978 the model implements a custom behavior.
1979 Can sometimes be seen as the inverse function of :meth:`~.name_search`, but it is not
1983 :return: list of pairs ``(id,text_repr)`` for all records with the given ``ids``.
1987 if isinstance(ids, (int, long)):
1990 if self._rec_name in self._all_columns:
1991 rec_name_column = self._all_columns[self._rec_name].column
1992 return [(r['id'], rec_name_column.as_display_name(cr, user, self, r[self._rec_name], context=context))
1993 for r in self.read(cr, user, ids, [self._rec_name],
1994 load='_classic_write', context=context)]
1995 return [(id, "%s,%s" % (self._name, id)) for id in ids]
1997 def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
1998 """Search for records that have a display name matching the given ``name`` pattern if compared
1999 with the given ``operator``, while also matching the optional search domain (``args``).
2000 This is used for example to provide suggestions based on a partial value for a relational
2002 Sometimes be seen as the inverse function of :meth:`~.name_get`, but it is not
2005 This method is equivalent to calling :meth:`~.search` with a search domain based on ``name``
2006 and then :meth:`~.name_get` on the result of the search.
2008 :param list args: optional search domain (see :meth:`~.search` for syntax),
2009 specifying further restrictions
2010 :param str operator: domain operator for matching the ``name`` pattern, such as ``'like'``
2012 :param int limit: optional max number of records to return
2014 :return: list of pairs ``(id,text_repr)`` for all matching records.
2016 return self._name_search(cr, user, name, args, operator, context, limit)
2018 def name_create(self, cr, uid, name, context=None):
2019 """Creates a new record by calling :meth:`~.create` with only one
2020 value provided: the name of the new record (``_rec_name`` field).
2021 The new record will also be initialized with any default values applicable
2022 to this model, or provided through the context. The usual behavior of
2023 :meth:`~.create` applies.
2024 Similarly, this method may raise an exception if the model has multiple
2025 required fields and some do not have default values.
2027 :param name: name of the record to create
2030 :return: the :meth:`~.name_get` pair value for the newly-created record.
2032 rec_id = self.create(cr, uid, {self._rec_name: name}, context)
2033 return self.name_get(cr, uid, [rec_id], context)[0]
2035 # private implementation of name_search, allows passing a dedicated user for the name_get part to
2036 # solve some access rights issues
2037 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
2043 # optimize out the default criterion of ``ilike ''`` that matches everything
2044 if not (name == '' and operator == 'ilike'):
2045 args += [(self._rec_name, operator, name)]
2046 access_rights_uid = name_get_uid or user
2047 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
2048 res = self.name_get(cr, access_rights_uid, ids, context)
2051 def read_string(self, cr, uid, id, langs, fields=None, context=None):
2054 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
2056 fields = self._columns.keys() + self._inherit_fields.keys()
2057 #FIXME: collect all calls to _get_source into one SQL call.
2059 res[lang] = {'code': lang}
2061 if f in self._columns:
2062 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
2064 res[lang][f] = res_trans
2066 res[lang][f] = self._columns[f].string
2067 for table in self._inherits:
2068 cols = intersect(self._inherit_fields.keys(), fields)
2069 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
2072 res[lang]['code'] = lang
2073 for f in res2[lang]:
2074 res[lang][f] = res2[lang][f]
2077 def write_string(self, cr, uid, id, langs, vals, context=None):
2078 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
2079 #FIXME: try to only call the translation in one SQL
2082 if field in self._columns:
2083 src = self._columns[field].string
2084 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
2085 for table in self._inherits:
2086 cols = intersect(self._inherit_fields.keys(), vals)
2088 self.pool[table].write_string(cr, uid, id, langs, vals, context)
2091 def _add_missing_default_values(self, cr, uid, values, context=None):
2092 missing_defaults = []
2093 avoid_tables = [] # avoid overriding inherited values when parent is set
2094 for tables, parent_field in self._inherits.items():
2095 if parent_field in values:
2096 avoid_tables.append(tables)
2097 for field in self._columns.keys():
2098 if not field in values:
2099 missing_defaults.append(field)
2100 for field in self._inherit_fields.keys():
2101 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
2102 missing_defaults.append(field)
2104 if len(missing_defaults):
2105 # override defaults with the provided values, never allow the other way around
2106 defaults = self.default_get(cr, uid, missing_defaults, context)
2108 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
2109 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
2110 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
2111 defaults[dv] = [(6, 0, defaults[dv])]
2112 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
2113 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
2114 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
2115 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
2116 defaults.update(values)
2120 def clear_caches(self):
2121 """ Clear the caches
2123 This clears the caches associated to methods decorated with
2124 ``tools.ormcache`` or ``tools.ormcache_multi``.
2127 getattr(self, '_ormcache')
2129 self.pool._any_cache_cleared = True
2130 except AttributeError:
2134 def _read_group_fill_results(self, cr, uid, domain, groupby, groupby_list, aggregated_fields,
2135 read_group_result, read_group_order=None, context=None):
2136 """Helper method for filling in empty groups for all possible values of
2137 the field being grouped by"""
2139 # self._group_by_full should map groupable fields to a method that returns
2140 # a list of all aggregated values that we want to display for this field,
2141 # in the form of a m2o-like pair (key,label).
2142 # This is useful to implement kanban views for instance, where all columns
2143 # should be displayed even if they don't contain any record.
2145 # Grab the list of all groups that should be displayed, including all present groups
2146 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
2147 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
2148 read_group_order=read_group_order,
2149 access_rights_uid=openerp.SUPERUSER_ID,
2152 result_template = dict.fromkeys(aggregated_fields, False)
2153 result_template[groupby + '_count'] = 0
2154 if groupby_list and len(groupby_list) > 1:
2155 result_template['__context'] = {'group_by': groupby_list[1:]}
2157 # Merge the left_side (current results as dicts) with the right_side (all
2158 # possible values as m2o pairs). Both lists are supposed to be using the
2159 # same ordering, and can be merged in one pass.
2163 if len(groupby_list) < 2 and context.get('group_by_no_leaf'):
2166 count_attr = groupby
2167 count_attr += '_count'
2169 def append_left(left_side):
2170 grouped_value = left_side[groupby] and left_side[groupby][0]
2171 if not grouped_value in known_values:
2172 result.append(left_side)
2173 known_values[grouped_value] = left_side
2175 known_values[grouped_value].update({count_attr: left_side[count_attr]})
2176 def append_right(right_side):
2177 grouped_value = right_side[0]
2178 if not grouped_value in known_values:
2179 line = dict(result_template)
2180 line[groupby] = right_side
2181 line['__domain'] = [(groupby,'=',grouped_value)] + domain
2183 known_values[grouped_value] = line
2184 while read_group_result or all_groups:
2185 left_side = read_group_result[0] if read_group_result else None
2186 right_side = all_groups[0] if all_groups else None
2187 assert left_side is None or left_side[groupby] is False \
2188 or isinstance(left_side[groupby], (tuple,list)), \
2189 'M2O-like pair expected, got %r' % left_side[groupby]
2190 assert right_side is None or isinstance(right_side, (tuple,list)), \
2191 'M2O-like pair expected, got %r' % right_side
2192 if left_side is None:
2193 append_right(all_groups.pop(0))
2194 elif right_side is None:
2195 append_left(read_group_result.pop(0))
2196 elif left_side[groupby] == right_side:
2197 append_left(read_group_result.pop(0))
2198 all_groups.pop(0) # discard right_side
2199 elif not left_side[groupby] or not left_side[groupby][0]:
2200 # left side == "Undefined" entry, not present on right_side
2201 append_left(read_group_result.pop(0))
2203 append_right(all_groups.pop(0))
2207 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
2210 def _read_group_prepare(self, orderby, aggregated_fields, groupby, qualified_groupby_field, query, groupby_type=None):
2212 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
2213 to the query if order should be computed against m2o field.
2214 :param orderby: the orderby definition in the form "%(field)s %(order)s"
2215 :param aggregated_fields: list of aggregated fields in the query
2216 :param groupby: the current groupby field name
2217 :param qualified_groupby_field: the fully qualified SQL name for the grouped field
2218 :param osv.Query query: the query under construction
2219 :param groupby_type: the type of the grouped field
2220 :return: (groupby_terms, orderby_terms)
2223 groupby_terms = [qualified_groupby_field] if groupby else []
2225 return groupby_terms, orderby_terms
2227 self._check_qorder(orderby)
2228 for order_part in orderby.split(','):
2229 order_split = order_part.split()
2230 order_field = order_split[0]
2231 if order_field == groupby:
2232 if groupby_type == 'many2one':
2233 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
2235 orderby_terms.append(order_clause)
2236 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
2238 orderby_terms.append(order_part)
2239 elif order_field in aggregated_fields:
2240 orderby_terms.append(order_part)
2242 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
2243 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
2244 self._name, order_part)
2245 return groupby_terms, orderby_terms
2247 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
2249 Get the list of records in list view grouped by the given ``groupby`` fields
2251 :param cr: database cursor
2252 :param uid: current user id
2253 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2254 :param list fields: list of fields present in the list view specified on the object
2255 :param list groupby: list of groupby descriptions by which the records will be grouped.
2256 A groupby description is either a field (then it will be grouped by that field)
2257 or a string 'field:groupby_function'. Right now, the only functions supported
2258 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2259 date/datetime fields.
2260 :param int offset: optional number of records to skip
2261 :param int limit: optional max number of records to return
2262 :param dict context: context arguments, like lang, time zone.
2263 :param list orderby: optional ``order by`` specification, for
2264 overriding the natural sort ordering of the
2265 groups, see also :py:meth:`~osv.osv.osv.search`
2266 (supported only for many2one fields currently)
2267 :return: list of dictionaries(one dictionary for each record) containing:
2269 * the values of fields grouped by the fields in ``groupby`` argument
2270 * __domain: list of tuples specifying the search criteria
2271 * __context: dictionary with argument like ``groupby``
2272 :rtype: [{'field_name_1': value, ...]
2273 :raise AccessError: * if user has no read rights on the requested object
2274 * if user tries to bypass access rules for read on the requested object
2277 context = context or {}
2278 self.check_access_rights(cr, uid, 'read')
2280 fields = self._columns.keys()
2282 query = self._where_calc(cr, uid, domain, context=context)
2283 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2285 # Take care of adding join(s) if groupby is an '_inherits'ed field
2286 groupby_list = groupby
2287 qualified_groupby_field = groupby
2289 if isinstance(groupby, list):
2290 groupby = groupby[0]
2291 splitted_groupby = groupby.split(':')
2292 if len(splitted_groupby) == 2:
2293 groupby = splitted_groupby[0]
2294 groupby_function = splitted_groupby[1]
2296 groupby_function = False
2297 qualified_groupby_field = self._inherits_join_calc(groupby, query)
2300 assert not groupby or groupby in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2301 groupby_def = self._columns.get(groupby) or (self._inherit_fields.get(groupby) and self._inherit_fields.get(groupby)[2])
2302 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2304 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
2305 fget = self.fields_get(cr, uid, fields)
2306 group_by_params = {}
2310 if fget.get(groupby):
2311 groupby_type = fget[groupby]['type']
2312 if groupby_type in ('date', 'datetime'):
2313 if groupby_function:
2314 interval = groupby_function
2318 if interval == 'day':
2319 display_format = 'dd MMM YYYY'
2320 elif interval == 'week':
2321 display_format = "'W'w YYYY"
2322 elif interval == 'month':
2323 display_format = 'MMMM YYYY'
2324 elif interval == 'quarter':
2325 display_format = 'QQQ YYYY'
2326 elif interval == 'year':
2327 display_format = 'YYYY'
2329 if groupby_type == 'datetime' and context.get('tz') in pytz.all_timezones:
2330 # Convert groupby result to user TZ to avoid confusion!
2331 # PostgreSQL is compatible with all pytz timezone names, so we can use them
2332 # directly for conversion, starting with timestamps stored in UTC.
2333 timezone = context.get('tz', 'UTC')
2334 qualified_groupby_field = "timezone('%s', timezone('UTC',%s))" % (timezone, qualified_groupby_field)
2335 qualified_groupby_field = "date_trunc('%s', %s)" % (interval, qualified_groupby_field)
2336 elif groupby_type == 'boolean':
2337 qualified_groupby_field = "coalesce(%s,false)" % qualified_groupby_field
2338 select_terms.append("%s as %s " % (qualified_groupby_field, groupby))
2340 # Don't allow arbitrary values, as this would be a SQL injection vector!
2341 raise except_orm(_('Invalid group_by'),
2342 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(groupby,))
2344 aggregated_fields = [
2346 if f not in ('id', 'sequence', groupby)
2347 if fget[f]['type'] in ('integer', 'float')
2348 if (f in self._all_columns and getattr(self._all_columns[f].column, '_classic_write'))]
2349 for f in aggregated_fields:
2350 group_operator = fget[f].get('group_operator', 'sum')
2351 qualified_field = self._inherits_join_calc(f, query)
2352 select_terms.append("%s(%s) AS %s" % (group_operator, qualified_field, f))
2354 order = orderby or groupby or ''
2355 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, groupby, qualified_groupby_field, query, groupby_type)
2357 from_clause, where_clause, where_clause_params = query.get_sql()
2358 if len(groupby_list) < 2 and context.get('group_by_no_leaf'):
2361 count_field = groupby
2363 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2364 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2367 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count
2376 'table': self._table,
2377 'count_field': count_field,
2378 'extra_fields': prefix_terms(',', select_terms),
2379 'from': from_clause,
2380 'where': prefix_term('WHERE', where_clause),
2381 'groupby': prefix_terms('GROUP BY', groupby_terms),
2382 'orderby': prefix_terms('ORDER BY', orderby_terms),
2383 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2384 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2386 cr.execute(query, where_clause_params)
2388 fetched_data = cr.dictfetchall()
2391 for r in fetched_data:
2392 for fld, val in r.items():
2393 if val is None: r[fld] = False
2394 alldata[r['id']] = r
2395 data_ids.append(r['id'])
2399 data = self.read(cr, uid, data_ids, [groupby], context=context)
2400 # restore order of the search as read() uses the default _order (this is only for groups, so the footprint of data should be small):
2401 data_dict = dict((d['id'], d[groupby] ) for d in data)
2402 result = [{'id': i, groupby: data_dict[i]} for i in data_ids]
2404 result = [{'id': i} for i in data_ids]
2408 d['__domain'] = [(groupby, '=', alldata[d['id']][groupby] or False)] + domain
2409 if not isinstance(groupby_list, (str, unicode)):
2410 if groupby or not context.get('group_by_no_leaf', False):
2411 d['__context'] = {'group_by': groupby_list[1:]}
2412 if groupby and groupby in fget:
2413 groupby_type = fget[groupby]['type']
2414 if d[groupby] and groupby_type in ('date', 'datetime'):
2415 groupby_datetime = alldata[d['id']][groupby]
2416 if isinstance(groupby_datetime, basestring):
2417 _default = datetime.datetime(1970, 1, 1) # force starts of month
2418 groupby_datetime = dateutil.parser.parse(groupby_datetime, default=_default)
2419 tz_convert = groupby_type == 'datetime' and context.get('tz') in pytz.all_timezones
2421 groupby_datetime = pytz.timezone(context['tz']).localize(groupby_datetime)
2422 d[groupby] = babel.dates.format_date(
2423 groupby_datetime, format=display_format, locale=context.get('lang', 'en_US'))
2424 domain_dt_begin = groupby_datetime
2425 if interval == 'quarter':
2426 domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(months=3)
2427 elif interval == 'month':
2428 domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(months=1)
2429 elif interval == 'week':
2430 domain_dt_end = groupby_datetime + datetime.timedelta(days=7)
2431 elif interval == 'day':
2432 domain_dt_end = groupby_datetime + datetime.timedelta(days=1)
2434 domain_dt_end = groupby_datetime + dateutil.relativedelta.relativedelta(years=1)
2436 # the time boundaries were all computed in the apparent TZ of the user,
2437 # so we need to convert them to UTC to have proper server-side values.
2438 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2439 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2440 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby_type == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2441 d['__domain'] = [(groupby, '>=', domain_dt_begin.strftime(dt_format)),
2442 (groupby, '<', domain_dt_end.strftime(dt_format))] + domain
2443 del alldata[d['id']][groupby]
2444 d.update(alldata[d['id']])
2447 if groupby and groupby in self._group_by_full:
2448 result = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
2449 aggregated_fields, result, read_group_order=order,
2454 def _inherits_join_add(self, current_model, parent_model_name, query):
2456 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2457 :param current_model: current model object
2458 :param parent_model_name: name of the parent model for which the clauses should be added
2459 :param query: query object on which the JOIN should be added
2461 inherits_field = current_model._inherits[parent_model_name]
2462 parent_model = self.pool[parent_model_name]
2463 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2466 def _inherits_join_calc(self, field, query):
2468 Adds missing table select and join clause(s) to ``query`` for reaching
2469 the field coming from an '_inherits' parent table (no duplicates).
2471 :param field: name of inherited field to reach
2472 :param query: query object on which the JOIN should be added
2473 :return: qualified name of field, to be used in SELECT clause
2475 current_table = self
2476 parent_alias = '"%s"' % current_table._table
2477 while field in current_table._inherit_fields and not field in current_table._columns:
2478 parent_model_name = current_table._inherit_fields[field][0]
2479 parent_table = self.pool[parent_model_name]
2480 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2481 current_table = parent_table
2482 return '%s."%s"' % (parent_alias, field)
2484 def _parent_store_compute(self, cr):
2485 if not self._parent_store:
2487 _logger.info('Computing parent left and right for table %s...', self._table)
2488 def browse_rec(root, pos=0):
2490 where = self._parent_name+'='+str(root)
2492 where = self._parent_name+' IS NULL'
2493 if self._parent_order:
2494 where += ' order by '+self._parent_order
2495 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2497 for id in cr.fetchall():
2498 pos2 = browse_rec(id[0], pos2)
2499 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2501 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2502 if self._parent_order:
2503 query += ' order by ' + self._parent_order
2506 for (root,) in cr.fetchall():
2507 pos = browse_rec(root, pos)
2510 def _update_store(self, cr, f, k):
2511 _logger.info("storing computed values of fields.function '%s'", k)
2512 ss = self._columns[k]._symbol_set
2513 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2514 cr.execute('select id from '+self._table)
2515 ids_lst = map(lambda x: x[0], cr.fetchall())
2517 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2518 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2519 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2520 for key, val in res.items():
2523 # if val is a many2one, just write the ID
2524 if type(val) == tuple:
2526 if val is not False:
2527 cr.execute(update_query, (ss[1](val), key))
2529 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2530 """Raise except_orm if value is not among the valid values for the selection field"""
2531 if self._columns[field]._type == 'reference':
2532 val_model, val_id_str = value.split(',', 1)
2535 val_id = long(val_id_str)
2539 raise except_orm(_('ValidateError'),
2540 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2544 if isinstance(self._columns[field].selection, (tuple, list)):
2545 if val in dict(self._columns[field].selection):
2547 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2549 raise except_orm(_('ValidateError'),
2550 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field))
2552 def _check_removed_columns(self, cr, log=False):
2553 # iterate on the database columns to drop the NOT NULL constraints
2554 # of fields which were required but have been removed (or will be added by another module)
2555 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2556 columns += MAGIC_COLUMNS
2557 cr.execute("SELECT a.attname, a.attnotnull"
2558 " FROM pg_class c, pg_attribute a"
2559 " WHERE c.relname=%s"
2560 " AND c.oid=a.attrelid"
2561 " AND a.attisdropped=%s"
2562 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2563 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2565 for column in cr.dictfetchall():
2567 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2568 column['attname'], self._table, self._name)
2569 if column['attnotnull']:
2570 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2571 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2572 self._table, column['attname'])
2574 def _save_constraint(self, cr, constraint_name, type):
2576 Record the creation of a constraint for this model, to make it possible
2577 to delete it later when the module is uninstalled. Type can be either
2578 'f' or 'u' depending on the constraint being a foreign key or not.
2580 if not self._module:
2581 # no need to save constraints for custom models as they're not part
2584 assert type in ('f', 'u')
2586 SELECT 1 FROM ir_model_constraint, ir_module_module
2587 WHERE ir_model_constraint.module=ir_module_module.id
2588 AND ir_model_constraint.name=%s
2589 AND ir_module_module.name=%s
2590 """, (constraint_name, self._module))
2593 INSERT INTO ir_model_constraint
2594 (name, date_init, date_update, module, model, type)
2595 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2596 (SELECT id FROM ir_module_module WHERE name=%s),
2597 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2598 (constraint_name, self._module, self._name, type))
2600 def _save_relation_table(self, cr, relation_table):
2602 Record the creation of a many2many for this model, to make it possible
2603 to delete it later when the module is uninstalled.
2606 SELECT 1 FROM ir_model_relation, ir_module_module
2607 WHERE ir_model_relation.module=ir_module_module.id
2608 AND ir_model_relation.name=%s
2609 AND ir_module_module.name=%s
2610 """, (relation_table, self._module))
2612 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2613 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2614 (SELECT id FROM ir_module_module WHERE name=%s),
2615 (SELECT id FROM ir_model WHERE model=%s))""",
2616 (relation_table, self._module, self._name))
2618 # checked version: for direct m2o starting from `self`
2619 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2620 assert self.is_transient() or not dest_model.is_transient(), \
2621 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2622 if self.is_transient() and not dest_model.is_transient():
2623 # TransientModel relationships to regular Models are annoying
2624 # usually because they could block deletion due to the FKs.
2625 # So unless stated otherwise we default them to ondelete=cascade.
2626 ondelete = ondelete or 'cascade'
2627 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2628 self._foreign_keys.add(fk_def)
2629 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2631 # unchecked version: for custom cases, such as m2m relationships
2632 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2633 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2634 self._foreign_keys.add(fk_def)
2635 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2637 def _drop_constraint(self, cr, source_table, constraint_name):
2638 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2640 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2641 # Find FK constraint(s) currently established for the m2o field,
2642 # and see whether they are stale or not
2643 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2644 cl2.relname as foreign_table
2645 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2646 pg_attribute as att1, pg_attribute as att2
2647 WHERE con.conrelid = cl1.oid
2648 AND cl1.relname = %s
2649 AND con.confrelid = cl2.oid
2650 AND array_lower(con.conkey, 1) = 1
2651 AND con.conkey[1] = att1.attnum
2652 AND att1.attrelid = cl1.oid
2653 AND att1.attname = %s
2654 AND array_lower(con.confkey, 1) = 1
2655 AND con.confkey[1] = att2.attnum
2656 AND att2.attrelid = cl2.oid
2657 AND att2.attname = %s
2658 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2659 constraints = cr.dictfetchall()
2661 if len(constraints) == 1:
2662 # Is it the right constraint?
2664 if self.is_transient() and not dest_model.is_transient():
2665 # transient foreign keys are added as cascade by default
2666 ondelete = ondelete or 'cascade'
2667 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2668 or cons['foreign_table'] != dest_model._table:
2669 # Wrong FK: drop it and recreate
2670 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2671 source_table, cons['constraint_name'])
2672 self._drop_constraint(cr, source_table, cons['constraint_name'])
2674 # it's all good, nothing to do!
2677 # Multiple FKs found for the same field, drop them all, and re-create
2678 for cons in constraints:
2679 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2680 source_table, cons['constraint_name'])
2681 self._drop_constraint(cr, source_table, cons['constraint_name'])
2683 # (re-)create the FK
2684 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2688 def _auto_init(self, cr, context=None):
2691 Call _field_create and, unless _auto is False:
2693 - create the corresponding table in database for the model,
2694 - possibly add the parent columns in database,
2695 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2696 'write_date' in database if _log_access is True (the default),
2697 - report on database columns no more existing in _columns,
2698 - remove no more existing not null constraints,
2699 - alter existing database columns to match _columns,
2700 - create database tables to match _columns,
2701 - add database indices to match _columns,
2702 - save in self._foreign_keys a list a foreign keys to create (see
2706 self._foreign_keys = set()
2707 raise_on_invalid_object_name(self._name)
2710 store_compute = False
2712 update_custom_fields = context.get('update_custom_fields', False)
2713 self._field_create(cr, context=context)
2714 create = not self._table_exist(cr)
2718 self._create_table(cr)
2721 if self._parent_store:
2722 if not self._parent_columns_exist(cr):
2723 self._create_parent_columns(cr)
2724 store_compute = True
2726 # Create the create_uid, create_date, write_uid, write_date, columns if desired.
2727 if self._log_access:
2728 self._add_log_columns(cr)
2730 self._check_removed_columns(cr, log=False)
2732 # iterate on the "object columns"
2733 column_data = self._select_column_data(cr)
2735 for k, f in self._columns.iteritems():
2736 if k in MAGIC_COLUMNS:
2738 # Don't update custom (also called manual) fields
2739 if f.manual and not update_custom_fields:
2742 if isinstance(f, fields.one2many):
2743 self._o2m_raise_on_missing_reference(cr, f)
2745 elif isinstance(f, fields.many2many):
2746 self._m2m_raise_or_create_relation(cr, f)
2749 res = column_data.get(k)
2751 # The field is not found as-is in database, try if it
2752 # exists with an old name.
2753 if not res and hasattr(f, 'oldname'):
2754 res = column_data.get(f.oldname)
2756 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2758 column_data[k] = res
2759 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2760 self._table, f.oldname, k)
2762 # The field already exists in database. Possibly
2763 # change its type, rename it, drop it or change its
2766 f_pg_type = res['typname']
2767 f_pg_size = res['size']
2768 f_pg_notnull = res['attnotnull']
2769 if isinstance(f, fields.function) and not f.store and\
2770 not getattr(f, 'nodrop', False):
2771 _logger.info('column %s (%s) in table %s removed: converted to a function !\n',
2772 k, f.string, self._table)
2773 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2775 _schema.debug("Table '%s': dropped column '%s' with cascade",
2779 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2784 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2785 ('varchar', 'text', 'TEXT', ''),
2786 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2787 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2788 ('timestamp', 'date', 'date', '::date'),
2789 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2790 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2792 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2794 with cr.savepoint():
2795 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2796 except psycopg2.NotSupportedError:
2797 # In place alter table cannot be done because a view is depending of this field.
2798 # Do a manual copy. This will drop the view (that will be recreated later)
2799 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2800 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2801 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2802 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2804 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2805 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2807 if (f_pg_type==c[0]) and (f._type==c[1]):
2808 if f_pg_type != f_obj_type:
2810 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2811 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2812 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2813 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2815 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2816 self._table, k, c[0], c[1])
2819 if f_pg_type != f_obj_type:
2823 newname = k + '_moved' + str(i)
2824 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2825 "WHERE c.relname=%s " \
2826 "AND a.attname=%s " \
2827 "AND c.oid=a.attrelid ", (self._table, newname))
2828 if not cr.fetchone()[0]:
2832 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2833 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2834 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2835 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2836 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2837 self._table, k, f_pg_type, f._type, newname)
2839 # if the field is required and hasn't got a NOT NULL constraint
2840 if f.required and f_pg_notnull == 0:
2841 # set the field to the default value if any
2842 if k in self._defaults:
2843 if callable(self._defaults[k]):
2844 default = self._defaults[k](self, cr, SUPERUSER_ID, context)
2846 default = self._defaults[k]
2848 if default is not None:
2849 ss = self._columns[k]._symbol_set
2850 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k)
2851 cr.execute(query, (ss[1](default),))
2852 # add the NOT NULL constraint
2855 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2857 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2860 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2861 "If you want to have it, you should update the records and execute manually:\n"\
2862 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2863 _schema.warning(msg, self._table, k, self._table, k)
2865 elif not f.required and f_pg_notnull == 1:
2866 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2868 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2871 indexname = '%s_%s_index' % (self._table, k)
2872 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2873 res2 = cr.dictfetchall()
2874 if not res2 and f.select:
2875 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2877 if f._type == 'text':
2878 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2879 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2880 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2881 " because there is a length limit for indexable btree values!\n"\
2882 "Use a search view instead if you simply want to make the field searchable."
2883 _schema.warning(msg, self._table, f._type, k)
2884 if res2 and not f.select:
2885 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2887 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2888 _schema.debug(msg, self._table, k, f._type)
2890 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2891 dest_model = self.pool[f._obj]
2892 if dest_model._table != 'ir_actions':
2893 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2895 # The field doesn't exist in database. Create it if necessary.
2897 if not isinstance(f, fields.function) or f.store:
2898 # add the missing field
2899 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2900 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2901 _schema.debug("Table '%s': added column '%s' with definition=%s",
2902 self._table, k, get_pg_type(f)[1])
2905 if not create and k in self._defaults:
2906 if callable(self._defaults[k]):
2907 default = self._defaults[k](self, cr, SUPERUSER_ID, context)
2909 default = self._defaults[k]
2911 ss = self._columns[k]._symbol_set
2912 query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
2913 cr.execute(query, (ss[1](default),))
2915 _logger.debug("Table '%s': setting default value of new column %s", self._table, k)
2917 # remember the functions to call for the stored fields
2918 if isinstance(f, fields.function):
2920 if f.store is not True: # i.e. if f.store is a dict
2921 order = f.store[f.store.keys()[0]][2]
2922 todo_end.append((order, self._update_store, (f, k)))
2924 # and add constraints if needed
2925 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2926 if f._obj not in self.pool:
2927 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2928 dest_model = self.pool[f._obj]
2929 ref = dest_model._table
2930 # ir_actions is inherited so foreign key doesn't work on it
2931 if ref != 'ir_actions':
2932 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2934 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2938 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2939 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2942 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2943 "Try to re-run: openerp-server --update=module\n"\
2944 "If it doesn't work, update records and execute manually:\n"\
2945 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2946 _logger.warning(msg, k, self._table, self._table, k)
2950 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2951 create = not bool(cr.fetchone())
2953 cr.commit() # start a new transaction
2956 self._add_sql_constraints(cr)
2959 self._execute_sql(cr)
2962 self._parent_store_compute(cr)
2967 def _auto_end(self, cr, context=None):
2968 """ Create the foreign keys recorded by _auto_init. """
2969 for t, k, r, d in self._foreign_keys:
2970 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
2971 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
2973 del self._foreign_keys
2976 def _table_exist(self, cr):
2977 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2981 def _create_table(self, cr):
2982 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
2983 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
2984 _schema.debug("Table '%s': created", self._table)
2987 def _parent_columns_exist(self, cr):
2988 cr.execute("""SELECT c.relname
2989 FROM pg_class c, pg_attribute a
2990 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
2991 """, (self._table, 'parent_left'))
2995 def _create_parent_columns(self, cr):
2996 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
2997 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
2998 if 'parent_left' not in self._columns:
2999 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
3001 _schema.debug("Table '%s': added column '%s' with definition=%s",
3002 self._table, 'parent_left', 'INTEGER')
3003 elif not self._columns['parent_left'].select:
3004 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
3006 if 'parent_right' not in self._columns:
3007 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
3009 _schema.debug("Table '%s': added column '%s' with definition=%s",
3010 self._table, 'parent_right', 'INTEGER')
3011 elif not self._columns['parent_right'].select:
3012 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
3014 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
3015 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
3016 self._parent_name, self._name)
3021 def _add_log_columns(self, cr):
3022 for field, field_def in LOG_ACCESS_COLUMNS.iteritems():
3025 FROM pg_class c, pg_attribute a
3026 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
3027 """, (self._table, field))
3029 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def))
3031 _schema.debug("Table '%s': added column '%s' with definition=%s",
3032 self._table, field, field_def)
3035 def _select_column_data(self, cr):
3036 # attlen is the number of bytes necessary to represent the type when
3037 # the type has a fixed size. If the type has a varying size attlen is
3038 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
3039 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
3040 "FROM pg_class c,pg_attribute a,pg_type t " \
3041 "WHERE c.relname=%s " \
3042 "AND c.oid=a.attrelid " \
3043 "AND a.atttypid=t.oid", (self._table,))
3044 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
3047 def _o2m_raise_on_missing_reference(self, cr, f):
3048 # TODO this check should be a method on fields.one2many.
3049 if f._obj in self.pool:
3050 other = self.pool[f._obj]
3051 # TODO the condition could use fields_get_keys().
3052 if f._fields_id not in other._columns.keys():
3053 if f._fields_id not in other._inherit_fields.keys():
3054 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
3056 def _m2m_raise_or_create_relation(self, cr, f):
3057 m2m_tbl, col1, col2 = f._sql_names(self)
3058 # do not create relations for custom fields as they do not belong to a module
3059 # they will be automatically removed when dropping the corresponding ir.model.field
3060 # table name for custom relation all starts with x_, see __init__
3061 if not m2m_tbl.startswith('x_'):
3062 self._save_relation_table(cr, m2m_tbl)
3063 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
3064 if not cr.dictfetchall():
3065 if f._obj not in self.pool:
3066 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
3067 dest_model = self.pool[f._obj]
3068 ref = dest_model._table
3069 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
3070 # create foreign key references with ondelete=cascade, unless the targets are SQL views
3071 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
3072 if not cr.fetchall():
3073 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
3074 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
3075 if not cr.fetchall():
3076 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
3078 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
3079 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
3080 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
3082 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
3085 def _add_sql_constraints(self, cr):
3088 Modify this model's database table constraints so they match the one in
3092 def unify_cons_text(txt):
3093 return txt.lower().replace(', ',',').replace(' (','(')
3095 for (key, con, _) in self._sql_constraints:
3096 conname = '%s_%s' % (self._table, key)
3098 self._save_constraint(cr, conname, 'u')
3099 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
3100 existing_constraints = cr.dictfetchall()
3104 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
3105 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
3106 self._table, conname, con),
3107 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
3112 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
3113 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
3114 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
3120 if not existing_constraints:
3121 # constraint does not exists:
3122 sql_actions['add']['execute'] = True
3123 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
3124 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
3125 # constraint exists but its definition has changed:
3126 sql_actions['drop']['execute'] = True
3127 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
3128 sql_actions['add']['execute'] = True
3129 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
3131 # we need to add the constraint:
3132 sql_actions = [item for item in sql_actions.values()]
3133 sql_actions.sort(key=lambda x: x['order'])
3134 for sql_action in [action for action in sql_actions if action['execute']]:
3136 cr.execute(sql_action['query'])
3138 _schema.debug(sql_action['msg_ok'])
3140 _schema.warning(sql_action['msg_err'])
3144 def _execute_sql(self, cr):
3145 """ Execute the SQL code from the _sql attribute (if any)."""
3146 if hasattr(self, "_sql"):
3147 for line in self._sql.split(';'):
3148 line2 = line.replace('\n', '').strip()
3154 # Update objects that uses this one to update their _inherits fields
3157 def _inherits_reload_src(self):
3158 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
3159 for obj in self.pool.models.values():
3160 if self._name in obj._inherits:
3161 obj._inherits_reload()
3164 def _inherits_reload(self):
3165 """ Recompute the _inherit_fields mapping.
3167 This will also call itself on each inherits'd child model.
3171 for table in self._inherits:
3172 other = self.pool[table]
3173 for col in other._columns.keys():
3174 res[col] = (table, self._inherits[table], other._columns[col], table)
3175 for col in other._inherit_fields.keys():
3176 res[col] = (table, self._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
3177 self._inherit_fields = res
3178 self._all_columns = self._get_column_infos()
3179 self._inherits_reload_src()
3182 def _get_column_infos(self):
3183 """Returns a dict mapping all fields names (direct fields and
3184 inherited field via _inherits) to a ``column_info`` struct
3185 giving detailed columns """
3187 for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems():
3188 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
3189 for k, col in self._columns.iteritems():
3190 result[k] = fields.column_info(k, col)
3194 def _inherits_check(self):
3195 for table, field_name in self._inherits.items():
3196 if field_name not in self._columns:
3197 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
3198 self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
3199 required=True, ondelete="cascade")
3200 elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
3201 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
3202 self._columns[field_name].required = True
3203 self._columns[field_name].ondelete = "cascade"
3206 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3207 """ Return the definition of each field.
3209 The returned value is a dictionary (indiced by field name) of
3210 dictionaries. The _inherits'd fields are included. The string, help,
3211 and selection (if present) attributes are translated.
3213 :param cr: database cursor
3214 :param user: current user id
3215 :param allfields: list of fields
3216 :param context: context arguments, like lang, time zone
3217 :return: dictionary of field dictionaries, each one describing a field of the business object
3218 :raise AccessError: * if user has no create/write rights on the requested object
3224 write_access = self.check_access_rights(cr, user, 'write', raise_exception=False) \
3225 or self.check_access_rights(cr, user, 'create', raise_exception=False)
3229 translation_obj = self.pool.get('ir.translation')
3230 for parent in self._inherits:
3231 res.update(self.pool[parent].fields_get(cr, user, allfields, context))
3233 for f, field in self._columns.iteritems():
3234 if (allfields and f not in allfields) or \
3235 (field.groups and not self.user_has_groups(cr, user, groups=field.groups, context=context)):
3238 res[f] = fields.field_to_dict(self, cr, user, field, context=context)
3240 if not write_access:
3241 res[f]['readonly'] = True
3242 res[f]['states'] = {}
3244 if 'lang' in context:
3245 if 'string' in res[f]:
3246 res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context['lang'])
3248 res[f]['string'] = res_trans
3249 if 'help' in res[f]:
3250 help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context['lang'])
3252 res[f]['help'] = help_trans
3256 def get_empty_list_help(self, cr, user, help, context=None):
3257 """ Generic method giving the help message displayed when having
3258 no result to display in a list or kanban view. By default it returns
3259 the help given in parameter that is generally the help message
3260 defined in the action.
3264 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3266 Check the user access rights on the given fields. This raises Access
3267 Denied if the user does not have the rights. Otherwise it returns the
3268 fields (as is if the fields is not falsy, or the readable/writable
3269 fields if fields is falsy).
3272 """Predicate to test if the user has access to the given field name."""
3273 # Ignore requested field if it doesn't exist. This is ugly but
3274 # it seems to happen at least with 'name_alias' on res.partner.
3275 if field_name not in self._all_columns:
3277 field = self._all_columns[field_name].column
3278 if user != SUPERUSER_ID and field.groups:
3279 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3283 fields = filter(p, self._all_columns.keys())
3285 filtered_fields = filter(lambda a: not p(a), fields)
3287 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s', operation, user, self._name, ', '.join(filtered_fields))
3290 _('The requested operation cannot be completed due to security restrictions. '
3291 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3292 (self._description, operation))
3295 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3296 """ Read records with given ids with the given fields
3298 :param cr: database cursor
3299 :param user: current user id
3300 :param ids: id or list of the ids of the records to read
3301 :param fields: optional list of field names to return (default: all fields would be returned)
3302 :type fields: list (example ['field_name_1', ...])
3303 :param context: optional context dictionary - it may contains keys for specifying certain options
3304 like ``context_lang``, ``context_tz`` to alter the results of the call.
3305 A special ``bin_size`` boolean flag may also be passed in the context to request the
3306 value of all fields.binary columns to be returned as the size of the binary instead of its
3307 contents. This can also be selectively overriden by passing a field-specific flag
3308 in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
3309 Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
3310 :return: list of dictionaries((dictionary per record asked)) with requested field values
3311 :rtype: [{‘name_of_the_field’: value, ...}, ...]
3312 :raise AccessError: * if user has no read rights on the requested object
3313 * if user tries to bypass access rules for read on the requested object
3317 self.check_access_rights(cr, user, 'read')
3318 fields = self.check_field_access_rights(cr, user, 'read', fields)
3319 if isinstance(ids, (int, long)):
3323 select = map(lambda x: isinstance(x, dict) and x['id'] or x, select)
3324 result = self._read_flat(cr, user, select, fields, context, load)
3326 if isinstance(ids, (int, long)):
3327 return result and result[0] or False
3330 def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
3335 if fields_to_read is None:
3336 fields_to_read = self._columns.keys()
3338 fields_to_read = list(set(fields_to_read))
3340 # all inherited fields + all non inherited fields for which the attribute whose name is in load is True
3341 fields_pre = [f for f in fields_to_read if
3342 f == self.CONCURRENCY_CHECK_FIELD
3343 or (f in self._columns and getattr(self._columns[f], '_classic_write'))
3344 ] + self._inherits.values()
3348 def convert_field(f):
3349 f_qual = '%s."%s"' % (self._table, f) # need fully-qualified references in case len(tables) > 1
3350 if f in ('create_date', 'write_date'):
3351 return "date_trunc('second', %s) as %s" % (f_qual, f)
3352 if f == self.CONCURRENCY_CHECK_FIELD:
3353 if self._log_access:
3354 return "COALESCE(%s.write_date, %s.create_date, (now() at time zone 'UTC'))::timestamp AS %s" % (self._table, self._table, f,)
3355 return "(now() at time zone 'UTC')::timestamp AS %s" % (f,)
3356 if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
3357 return 'length(%s) as "%s"' % (f_qual, f)
3360 # FIXME: The query construction needs to be rewritten using the internal Query
3361 # object, as in search(), to avoid ambiguous column references when
3362 # reading/sorting on a table that is auto_joined to another table with
3363 # common columns (e.g. the magical columns)
3365 # Construct a clause for the security rules.
3366 # 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
3367 # or will at least contain self._table.
3368 rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
3370 fields_pre2 = map(convert_field, fields_pre)
3371 order_by = self._parent_order or self._order
3372 select_fields = ','.join(fields_pre2 + ['%s.id' % self._table])
3373 query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table)
3375 query += " AND " + (' OR '.join(rule_clause))
3376 query += " ORDER BY " + order_by
3377 for sub_ids in cr.split_for_in_conditions(ids):
3378 cr.execute(query, [tuple(sub_ids)] + rule_params)
3379 results = cr.dictfetchall()
3380 result_ids = [x['id'] for x in results]
3381 self._check_record_rules_result_count(cr, user, sub_ids, result_ids, 'read', context=context)
3384 self.check_access_rule(cr, user, ids, 'read', context=context)
3385 res = map(lambda x: {'id': x}, ids)
3387 if context.get('lang'):
3388 for f in fields_pre:
3389 if f == self.CONCURRENCY_CHECK_FIELD:
3391 if self._columns[f].translate:
3392 ids = [x['id'] for x in res]
3393 #TODO: optimize out of this loop
3394 res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context['lang'], ids)
3396 r[f] = res_trans.get(r['id'], False) or r[f]
3398 for table in self._inherits:
3399 col = self._inherits[table]
3400 cols = [x for x in intersect(self._inherit_fields.keys(), fields_to_read) if x not in self._columns.keys()]
3403 res2 = self.pool[table].read(cr, user, [x[col] for x in res], cols, context, load)
3411 if not record[col]: # if the record is deleted from _inherits table?
3413 record.update(res3[record[col]])
3414 if col not in fields_to_read:
3417 # all fields which need to be post-processed by a simple function (symbol_get)
3418 fields_post = filter(lambda x: x in self._columns and self._columns[x]._symbol_get, fields_to_read)
3421 for f in fields_post:
3422 r[f] = self._columns[f]._symbol_get(r[f])
3423 ids = [x['id'] for x in res]
3425 # all non inherited fields for which the attribute whose name is in load is False
3426 fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read)
3428 # Compute POST fields
3430 for f in fields_post:
3431 todo.setdefault(self._columns[f]._multi, [])
3432 todo[self._columns[f]._multi].append(f)
3433 for key, val in todo.items():
3435 res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res)
3436 assert res2 is not None, \
3437 'The function field "%s" on the "%s" model returned None\n' \
3438 '(a dictionary was expected).' % (val[0], self._name)
3441 if isinstance(res2[record['id']], str): res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6
3442 multi_fields = res2.get(record['id'],{})
3444 record[pos] = multi_fields.get(pos,[])
3447 res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
3450 record[f] = res2[record['id']]
3454 # Warn about deprecated fields now that fields_pre and fields_post are computed
3455 # Explicitly use list() because we may receive tuples
3456 for f in list(fields_pre) + list(fields_post):
3457 field_column = self._all_columns.get(f) and self._all_columns.get(f).column
3458 if field_column and field_column.deprecated:
3459 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, field_column.deprecated)
3463 for field in vals.copy():
3465 if field in self._columns:
3466 fobj = self._columns[field]
3472 for group in groups:
3473 module = group.split(".")[0]
3474 grp = group.split(".")[1]
3475 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3476 (grp, module, 'res.groups', user))
3477 readonly = cr.fetchall()
3478 if readonly[0][0] >= 1:
3481 elif readonly[0][0] == 0:
3487 if type(vals[field]) == type([]):
3489 elif type(vals[field]) == type(0.0):
3491 elif type(vals[field]) == type(''):
3492 vals[field] = '=No Permission='
3496 if vals[field] is None:
3501 # TODO check READ access
3502 def perm_read(self, cr, user, ids, context=None, details=True):
3504 Returns some metadata about the given records.
3506 :param details: if True, \*_uid fields are replaced with the name of the user
3507 :return: list of ownership dictionaries for each requested record
3508 :rtype: list of dictionaries with the following keys:
3511 * create_uid: user who created the record
3512 * create_date: date when the record was created
3513 * write_uid: last user who changed the record
3514 * write_date: date of the last change to the record
3515 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3522 uniq = isinstance(ids, (int, long))
3526 if self._log_access:
3527 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3528 quoted_table = '"%s"' % self._table
3529 fields_str = ",".join('%s.%s'%(quoted_table, field) for field in fields)
3530 query = '''SELECT %s, __imd.module, __imd.name
3531 FROM %s LEFT JOIN ir_model_data __imd
3532 ON (__imd.model = %%s and __imd.res_id = %s.id)
3533 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3534 cr.execute(query, (self._name, tuple(ids)))
3535 res = cr.dictfetchall()
3538 r[key] = r[key] or False
3539 if details and key in ('write_uid', 'create_uid') and r[key]:
3541 r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0]
3543 pass # Leave the numeric uid there
3544 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3545 del r['name'], r['module']
3550 def _check_concurrency(self, cr, ids, context):
3553 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3555 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3556 for sub_ids in cr.split_for_in_conditions(ids):
3559 id_ref = "%s,%s" % (self._name, id)
3560 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3562 ids_to_check.extend([id, update_date])
3563 if not ids_to_check:
3565 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3568 # mention the first one only to keep the error message readable
3569 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3571 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3572 """Verify the returned rows after applying record rules matches
3573 the length of `ids`, and raise an appropriate exception if it does not.
3575 ids, result_ids = set(ids), set(result_ids)
3576 missing_ids = ids - result_ids
3578 # Attempt to distinguish record rule restriction vs deleted records,
3579 # to provide a more specific error message - check if the missinf
3580 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3581 forbidden_ids = [x[0] for x in cr.fetchall()]
3583 # the missing ids are (at least partially) hidden by access rules
3584 if uid == SUPERUSER_ID:
3586 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3587 raise except_orm(_('Access Denied'),
3588 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3589 (self._description, operation))
3591 # If we get here, the missing_ids are not in the database
3592 if operation in ('read','unlink'):
3593 # No need to warn about deleting an already deleted record.
3594 # And no error when reading a record that was deleted, to prevent spurious
3595 # errors for non-transactional search/read sequences coming from clients
3597 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3598 raise except_orm(_('Missing document(s)'),
3599 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3602 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3603 """Verifies that the operation given by ``operation`` is allowed for the user
3604 according to the access rights."""
3605 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3607 def check_access_rule(self, cr, uid, ids, operation, context=None):
3608 """Verifies that the operation given by ``operation`` is allowed for the user
3609 according to ir.rules.
3611 :param operation: one of ``write``, ``unlink``
3612 :raise except_orm: * if current ir.rules do not permit this operation.
3613 :return: None if the operation is allowed
3615 if uid == SUPERUSER_ID:
3618 if self.is_transient():
3619 # Only one single implicit access rule for transient models: owner only!
3620 # This is ok to hardcode because we assert that TransientModels always
3621 # have log_access enabled so that the create_uid column is always there.
3622 # And even with _inherits, these fields are always present in the local
3623 # table too, so no need for JOINs.
3624 cr.execute("""SELECT distinct create_uid
3626 WHERE id IN %%s""" % self._table, (tuple(ids),))
3627 uids = [x[0] for x in cr.fetchall()]
3628 if len(uids) != 1 or uids[0] != uid:
3629 raise except_orm(_('Access Denied'),
3630 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3632 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3634 where_clause = ' and ' + ' and '.join(where_clause)
3635 for sub_ids in cr.split_for_in_conditions(ids):
3636 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3637 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3638 [sub_ids] + where_params)
3639 returned_ids = [x['id'] for x in cr.dictfetchall()]
3640 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3642 def create_workflow(self, cr, uid, ids, context=None):
3643 """Create a workflow instance for each given record IDs."""
3644 from openerp import workflow
3646 workflow.trg_create(uid, self._name, res_id, cr)
3649 def delete_workflow(self, cr, uid, ids, context=None):
3650 """Delete the workflow instances bound to the given record IDs."""
3651 from openerp import workflow
3653 workflow.trg_delete(uid, self._name, res_id, cr)
3656 def step_workflow(self, cr, uid, ids, context=None):
3657 """Reevaluate the workflow instances of the given record IDs."""
3658 from openerp import workflow
3660 workflow.trg_write(uid, self._name, res_id, cr)
3663 def signal_workflow(self, cr, uid, ids, signal, context=None):
3664 """Send given workflow signal and return a dict mapping ids to workflow results"""
3665 from openerp import workflow
3668 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3671 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3672 """ Rebind the workflow instance bound to the given 'old' record IDs to
3673 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3675 from openerp import workflow
3676 for old_id, new_id in old_new_ids:
3677 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3680 def unlink(self, cr, uid, ids, context=None):
3682 Delete records with given ids
3684 :param cr: database cursor
3685 :param uid: current user id
3686 :param ids: id or list of ids
3687 :param context: (optional) context arguments, like lang, time zone
3689 :raise AccessError: * if user has no unlink rights on the requested object
3690 * if user tries to bypass access rules for unlink on the requested object
3691 :raise UserError: if the record is default property for other records
3696 if isinstance(ids, (int, long)):
3699 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3701 self._check_concurrency(cr, ids, context)
3703 self.check_access_rights(cr, uid, 'unlink')
3705 ir_property = self.pool.get('ir.property')
3706 ir_attachment_obj = self.pool.get('ir.attachment')
3708 # Check if the records are used as default properties.
3709 domain = [('res_id', '=', False),
3710 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3712 if ir_property.search(cr, uid, domain, context=context):
3713 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3715 # Delete the records' properties.
3716 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3717 ir_property.unlink(cr, uid, property_ids, context=context)
3719 self.delete_workflow(cr, uid, ids, context=context)
3721 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3722 pool_model_data = self.pool.get('ir.model.data')
3723 ir_values_obj = self.pool.get('ir.values')
3724 for sub_ids in cr.split_for_in_conditions(ids):
3725 cr.execute('delete from ' + self._table + ' ' \
3726 'where id IN %s', (sub_ids,))
3728 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3729 # as these are not connected with real database foreign keys, and would be dangling references.
3730 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3731 # to avoid possible side-effects during admin calls.
3732 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3733 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3734 # Step 2. Marching towards the real deletion of referenced records
3736 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3738 # For the same reason, removing the record relevant to ir_values
3739 ir_value_ids = ir_values_obj.search(cr, uid,
3740 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3743 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3745 # For the same reason, removing the record relevant to ir_attachment
3746 # The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
3747 cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
3748 ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
3749 if ir_attachment_ids:
3750 ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
3752 for order, obj_name, store_ids, fields in result_store:
3753 if obj_name == self._name:
3754 effective_store_ids = list(set(store_ids) - set(ids))
3756 effective_store_ids = store_ids
3757 if effective_store_ids:
3758 obj = self.pool[obj_name]
3759 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3760 rids = map(lambda x: x[0], cr.fetchall())
3762 obj._store_set_values(cr, uid, rids, fields, context)
3769 def write(self, cr, user, ids, vals, context=None):
3771 Update records with given ids with the given field values
3773 :param cr: database cursor
3774 :param user: current user id
3776 :param ids: object id or list of object ids to update according to **vals**
3777 :param vals: field values to update, e.g {'field_name': new_field_value, ...}
3778 :type vals: dictionary
3779 :param context: (optional) context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
3780 :type context: dictionary
3782 :raise AccessError: * if user has no write rights on the requested object
3783 * if user tries to bypass access rules for write on the requested object
3784 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3785 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3787 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
3789 + For a many2many field, a list of tuples is expected.
3790 Here is the list of tuple that are accepted, with the corresponding semantics ::
3792 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3793 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3794 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3795 (3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
3796 (4, ID) link to existing record with id = ID (adds a relationship)
3797 (5) unlink all (like using (3,ID) for all linked records)
3798 (6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
3801 [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
3803 + For a one2many field, a lits of tuples is expected.
3804 Here is the list of tuple that are accepted, with the corresponding semantics ::
3806 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3807 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3808 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3811 [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
3813 + For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
3814 + For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
3818 self.check_field_access_rights(cr, user, 'write', vals.keys())
3819 deleted_related = defaultdict(list)
3820 for field in vals.copy():
3822 if field in self._columns:
3823 fobj = self._columns[field]
3824 elif field in self._inherit_fields:
3825 fobj = self._inherit_fields[field][2]
3828 if fobj._type in ['one2many', 'many2many'] and vals[field]:
3829 for wtuple in vals[field]:
3830 if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
3831 deleted_related[fobj._obj].append(wtuple[1])
3836 for group in groups:
3837 module = group.split(".")[0]
3838 grp = group.split(".")[1]
3839 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3840 (grp, module, 'res.groups', user))
3841 readonly = cr.fetchall()
3842 if readonly[0][0] >= 1:
3853 if isinstance(ids, (int, long)):
3856 self._check_concurrency(cr, ids, context)
3857 self.check_access_rights(cr, user, 'write')
3859 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3861 # No direct update of parent_left/right
3862 vals.pop('parent_left', None)
3863 vals.pop('parent_right', None)
3865 parents_changed = []
3866 parent_order = self._parent_order or self._order
3867 if self._parent_store and (self._parent_name in vals):
3868 # The parent_left/right computation may take up to
3869 # 5 seconds. No need to recompute the values if the
3870 # parent is the same.
3871 # Note: to respect parent_order, nodes must be processed in
3872 # order, so ``parents_changed`` must be ordered properly.
3873 parent_val = vals[self._parent_name]
3875 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3876 (self._table, self._parent_name, self._parent_name, parent_order)
3877 cr.execute(query, (tuple(ids), parent_val))
3879 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3880 (self._table, self._parent_name, parent_order)
3881 cr.execute(query, (tuple(ids),))
3882 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3889 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3891 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3892 if field_column and field_column.deprecated:
3893 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3894 if field in self._columns:
3895 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3896 if (not totranslate) or not self._columns[field].translate:
3897 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3898 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3899 direct.append(field)
3901 upd_todo.append(field)
3903 updend.append(field)
3904 if field in self._columns \
3905 and hasattr(self._columns[field], 'selection') \
3907 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3909 if self._log_access:
3910 upd0.append('write_uid=%s')
3911 upd0.append("write_date=(now() at time zone 'UTC')")
3915 self.check_access_rule(cr, user, ids, 'write', context=context)
3916 for sub_ids in cr.split_for_in_conditions(ids):
3917 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3918 'where id IN %s', upd1 + [sub_ids])
3919 if cr.rowcount != len(sub_ids):
3920 raise except_orm(_('AccessError'),
3921 _('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3926 if self._columns[f].translate:
3927 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3930 # Inserting value to DB
3931 context_wo_lang = dict(context, lang=None)
3932 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3933 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3936 # call the 'set' method of fields which are not classic_write
3937 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3939 # default element in context must be removed when call a one2many or many2many
3940 rel_context = context.copy()
3941 for c in context.items():
3942 if c[0].startswith('default_'):
3943 del rel_context[c[0]]
3945 for field in upd_todo:
3947 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3949 unknown_fields = updend[:]
3950 for table in self._inherits:
3951 col = self._inherits[table]
3953 for sub_ids in cr.split_for_in_conditions(ids):
3954 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3955 'where id IN %s', (sub_ids,))
3956 nids.extend([x[0] for x in cr.fetchall()])
3960 if self._inherit_fields[val][0] == table:
3962 unknown_fields.remove(val)
3964 self.pool[table].write(cr, user, nids, v, context)
3968 'No such field(s) in model %s: %s.',
3969 self._name, ', '.join(unknown_fields))
3970 self._validate(cr, user, ids, context)
3972 # TODO: use _order to set dest at the right position and not first node of parent
3973 # We can't defer parent_store computation because the stored function
3974 # fields that are computer may refer (directly or indirectly) to
3975 # parent_left/right (via a child_of domain)
3978 self.pool._init_parent[self._name] = True
3980 order = self._parent_order or self._order
3981 parent_val = vals[self._parent_name]
3983 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
3985 clause, params = '%s IS NULL' % (self._parent_name,), ()
3987 for id in parents_changed:
3988 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
3989 pleft, pright = cr.fetchone()
3990 distance = pright - pleft + 1
3992 # Positions of current siblings, to locate proper insertion point;
3993 # this can _not_ be fetched outside the loop, as it needs to be refreshed
3994 # after each update, in case several nodes are sequentially inserted one
3995 # next to the other (i.e computed incrementally)
3996 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
3997 parents = cr.fetchall()
3999 # Find Position of the element
4001 for (parent_pright, parent_id) in parents:
4004 position = parent_pright and parent_pright + 1 or 1
4006 # It's the first node of the parent
4011 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
4012 position = cr.fetchone()[0] + 1
4014 if pleft < position <= pright:
4015 raise except_orm(_('UserError'), _('Recursivity Detected.'))
4017 if pleft < position:
4018 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
4019 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
4020 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
4022 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
4023 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
4024 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
4026 result += self._store_get_values(cr, user, ids, vals.keys(), context)
4030 for order, model_name, ids_to_update, fields_to_recompute in result:
4031 key = (model_name, tuple(fields_to_recompute))
4032 done.setdefault(key, {})
4033 # avoid to do several times the same computation
4035 for id in ids_to_update:
4036 if id not in done[key]:
4037 done[key][id] = True
4038 if id not in deleted_related[object]:
4040 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
4042 self.step_workflow(cr, user, ids, context=context)
4046 # TODO: Should set perm to user.xxx
4048 def create(self, cr, user, vals, context=None):
4050 Create a new record for the model.
4052 The values for the new record are initialized using the ``vals``
4053 argument, and if necessary the result of ``default_get()``.
4055 :param cr: database cursor
4056 :param user: current user id
4058 :param vals: field values for new record, e.g {'field_name': field_value, ...}
4059 :type vals: dictionary
4060 :param context: optional context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
4061 :type context: dictionary
4062 :return: id of new record created
4063 :raise AccessError: * if user has no create rights on the requested object
4064 * if user tries to bypass access rules for create on the requested object
4065 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
4066 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
4068 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific.
4069 Please see the description of the :py:meth:`~osv.osv.osv.write` method for details about the possible values and how
4076 if self.is_transient():
4077 self._transient_vacuum(cr, user)
4079 self.check_access_rights(cr, user, 'create')
4081 vals = self._add_missing_default_values(cr, user, vals, context)
4083 if self._log_access:
4084 for f in LOG_ACCESS_COLUMNS:
4085 if vals.pop(f, None) is not None:
4087 'Field `%s` is not allowed when creating the model `%s`.',
4091 for v in self._inherits:
4092 if self._inherits[v] not in vals:
4095 tocreate[v] = {'id': vals[self._inherits[v]]}
4098 # columns will contain a list of field defined as a tuple
4099 # tuple(field_name, format_string, field_value)
4100 # the tuple will be used by the string formatting for the INSERT
4102 ('id', "nextval('%s')" % self._sequence),
4107 for v in vals.keys():
4108 if v in self._inherit_fields and v not in self._columns:
4109 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4110 tocreate[table][v] = vals[v]
4113 if (v not in self._inherit_fields) and (v not in self._columns):
4115 unknown_fields.append(v)
4118 'No such field(s) in model %s: %s.',
4119 self._name, ', '.join(unknown_fields))
4121 if not self._sequence:
4124 _('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.')
4127 for table in tocreate:
4128 if self._inherits[table] in vals:
4129 del vals[self._inherits[table]]
4131 record_id = tocreate[table].pop('id', None)
4133 # When linking/creating parent records, force context without 'no_store_function' key that
4134 # defers stored functions computing, as these won't be computed in batch at the end of create().
4135 parent_context = dict(context)
4136 parent_context.pop('no_store_function', None)
4138 if record_id is None or not record_id:
4139 record_id = self.pool[table].create(cr, user, tocreate[table], context=parent_context)
4141 self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
4143 columns.append((self._inherits[table], '%s', record_id))
4145 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4146 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4148 for bool_field in bool_fields:
4149 if bool_field not in vals:
4150 vals[bool_field] = False
4152 for field in vals.copy():
4154 if field in self._columns:
4155 fobj = self._columns[field]
4157 fobj = self._inherit_fields[field][2]
4163 for group in groups:
4164 module = group.split(".")[0]
4165 grp = group.split(".")[1]
4166 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4167 (grp, module, 'res.groups', user))
4168 readonly = cr.fetchall()
4169 if readonly[0][0] >= 1:
4172 elif readonly[0][0] == 0:
4180 current_field = self._columns[field]
4181 if current_field._classic_write:
4182 columns.append((field, '%s', current_field._symbol_set[1](vals[field])))
4184 #for the function fields that receive a value, we set them directly in the database
4185 #(they may be required), but we also need to trigger the _fct_inv()
4186 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4187 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4188 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4189 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4190 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4191 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4192 #after the release but, definitively, the behavior shouldn't be different for related and function
4194 upd_todo.append(field)
4196 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4197 #related. See the above TODO comment for further explanations.
4198 if not isinstance(current_field, fields.related):
4199 upd_todo.append(field)
4200 if field in self._columns \
4201 and hasattr(current_field, 'selection') \
4203 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4204 if self._log_access:
4205 columns.append(('create_uid', '%s', user))
4206 columns.append(('write_uid', '%s', user))
4207 columns.append(('create_date', "(now() at time zone 'UTC')"))
4208 columns.append(('write_date', "(now() at time zone 'UTC')"))
4210 # the list of tuples used in this formatting corresponds to
4211 # tuple(field_name, format, value)
4212 # In some case, for example (id, create_date, write_date) we does not
4213 # need to read the third value of the tuple, because the real value is
4214 # encoded in the second value (the format).
4216 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4218 ', '.join('"%s"' % f[0] for f in columns),
4219 ', '.join(f[1] for f in columns)
4221 tuple([f[2] for f in columns if len(f) > 2])
4224 id_new, = cr.fetchone()
4225 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4227 if self._parent_store and not context.get('defer_parent_store_computation'):
4229 self.pool._init_parent[self._name] = True
4231 parent = vals.get(self._parent_name, False)
4233 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4235 result_p = cr.fetchall()
4236 for (pleft,) in result_p:
4241 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4242 pleft_old = cr.fetchone()[0]
4245 cr.execute('select max(parent_right) from '+self._table)
4246 pleft = cr.fetchone()[0] or 0
4247 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4248 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4249 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4251 # default element in context must be remove when call a one2many or many2many
4252 rel_context = context.copy()
4253 for c in context.items():
4254 if c[0].startswith('default_'):
4255 del rel_context[c[0]]
4258 for field in upd_todo:
4259 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4260 self._validate(cr, user, [id_new], context)
4262 if not context.get('no_store_function', False):
4263 result += self._store_get_values(cr, user, [id_new],
4264 list(set(vals.keys() + self._inherits.values())),
4268 for order, model_name, ids, fields2 in result:
4269 if not (model_name, ids, fields2) in done:
4270 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4271 done.append((model_name, ids, fields2))
4273 if self._log_create and not (context and context.get('no_store_function', False)):
4274 message = self._description + \
4276 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4277 "' " + _("created.")
4278 self.log(cr, user, id_new, message, True, context=context)
4279 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4280 self.create_workflow(cr, user, [id_new], context=context)
4283 def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None):
4284 """Fetch records as objects allowing to use dot notation to browse fields and relations
4286 :param cr: database cursor
4287 :param uid: current user id
4288 :param select: id or list of ids.
4289 :param context: context arguments, like lang, time zone
4290 :rtype: object or list of objects requested
4293 self._list_class = list_class or browse_record_list
4295 # need to accepts ints and longs because ids coming from a method
4296 # launched by button in the interface have a type long...
4297 if isinstance(select, (int, long)):
4298 return browse_record(cr, uid, select, self, cache, context=context, list_class=self._list_class, fields_process=fields_process)
4299 elif isinstance(select, list):
4300 return self._list_class((browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select), context=context)
4302 return browse_null()
4304 def _store_get_values(self, cr, uid, ids, fields, context):
4305 """Returns an ordered list of fields.function to call due to
4306 an update operation on ``fields`` of records with ``ids``,
4307 obtained by calling the 'store' triggers of these fields,
4308 as setup by their 'store' attribute.
4310 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4312 if fields is None: fields = []
4313 stored_functions = self.pool._store_function.get(self._name, [])
4315 # use indexed names for the details of the stored_functions:
4316 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4318 # only keep store triggers that should be triggered for the ``fields``
4320 triggers_to_compute = [f for f in stored_functions \
4321 if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
4324 target_id_results = {}
4325 for store_trigger in triggers_to_compute:
4326 target_func_id_ = id(store_trigger[target_ids_func_])
4327 if not target_func_id_ in target_id_results:
4328 # use admin user for accessing objects having rules defined on store fields
4329 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4330 target_ids = target_id_results[target_func_id_]
4332 # the compound key must consider the priority and model name
4333 key = (store_trigger[priority_], store_trigger[model_name_])
4334 for target_id in target_ids:
4335 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4337 # Here to_compute_map looks like:
4338 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4339 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4340 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4343 # Now we need to generate the batch function calls list
4345 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4347 for ((priority,model), id_map) in to_compute_map.iteritems():
4348 trigger_ids_maps = {}
4349 # function_ids_maps =
4350 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4351 for target_id, triggers in id_map.iteritems():
4352 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4353 for triggers, target_ids in trigger_ids_maps.iteritems():
4354 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4355 [t[func_field_to_compute_] for t in triggers]))
4356 ordered_keys = call_map.keys()
4360 result = reduce(operator.add, (call_map[k] for k in ordered_keys))
4363 def _store_set_values(self, cr, uid, ids, fields, context):
4364 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4365 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4370 if self._log_access:
4371 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4375 field_dict.setdefault(r[0], [])
4376 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4377 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4378 for i in self.pool._store_function.get(self._name, []):
4380 up_write_date = write_date + datetime.timedelta(hours=i[5])
4381 if datetime.datetime.now() < up_write_date:
4383 field_dict[r[0]].append(i[1])
4389 if self._columns[f]._multi not in keys:
4390 keys.append(self._columns[f]._multi)
4391 todo.setdefault(self._columns[f]._multi, [])
4392 todo[self._columns[f]._multi].append(f)
4396 # use admin user for accessing objects having rules defined on store fields
4397 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4398 for id, value in result.items():
4400 for f in value.keys():
4401 if f in field_dict[id]:
4408 if self._columns[v]._type == 'many2one':
4410 value[v] = value[v][0]
4413 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4414 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4417 cr.execute('update "' + self._table + '" set ' + \
4418 ','.join(upd0) + ' where id = %s', upd1)
4422 # use admin user for accessing objects having rules defined on store fields
4423 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4424 for r in result.keys():
4426 if r in field_dict.keys():
4427 if f in field_dict[r]:
4429 for id, value in result.items():
4430 if self._columns[f]._type == 'many2one':
4435 cr.execute('update "' + self._table + '" set ' + \
4436 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4442 def perm_write(self, cr, user, ids, fields, context=None):
4443 raise NotImplementedError(_('This method does not exist anymore'))
4445 # TODO: ameliorer avec NULL
4446 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4447 """Computes the WHERE clause needed to implement an OpenERP domain.
4448 :param domain: the domain to compute
4450 :param active_test: whether the default filtering of records with ``active``
4451 field set to ``False`` should be applied.
4452 :return: the query expressing the given domain as provided in domain
4453 :rtype: osv.query.Query
4458 # if the object has a field named 'active', filter out all inactive
4459 # records unless they were explicitely asked for
4460 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4462 # the item[0] trick below works for domain items and '&'/'|'/'!'
4464 if not any(item[0] == 'active' for item in domain):
4465 domain.insert(0, ('active', '=', 1))
4467 domain = [('active', '=', 1)]
4470 e = expression.expression(cr, user, domain, self, context)
4471 tables = e.get_tables()
4472 where_clause, where_params = e.to_sql()
4473 where_clause = where_clause and [where_clause] or []
4475 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4477 return Query(tables, where_clause, where_params)
4479 def _check_qorder(self, word):
4480 if not regex_order.match(word):
4481 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4484 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4485 """Add what's missing in ``query`` to implement all appropriate ir.rules
4486 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4488 :param query: the current query object
4490 if uid == SUPERUSER_ID:
4493 def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None):
4494 """ :param string parent_model: string of the parent model
4495 :param model child_object: model object, base of the rule application
4498 if parent_model and child_object:
4499 # as inherited rules are being applied, we need to add the missing JOIN
4500 # to reach the parent table (if it was not JOINed yet in the query)
4501 parent_alias = child_object._inherits_join_add(child_object, parent_model, query)
4502 # inherited rules are applied on the external table -> need to get the alias and replace
4503 parent_table = self.pool[parent_model]._table
4504 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4505 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4507 for table in added_tables:
4508 # table is just a table name -> switch to the full alias
4509 if table == '"%s"' % parent_table:
4510 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4511 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4513 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4514 added_tables = new_tables
4515 query.where_clause += added_clause
4516 query.where_clause_params += added_params
4517 for table in added_tables:
4518 if table not in query.tables:
4519 query.tables.append(table)
4523 # apply main rules on the object
4524 rule_obj = self.pool.get('ir.rule')
4525 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4526 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4528 # apply ir.rules from the parents (through _inherits)
4529 for inherited_model in self._inherits:
4530 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4531 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4532 parent_model=inherited_model, child_object=self)
4534 def _generate_m2o_order_by(self, order_field, query):
4536 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4537 either native m2o fields or function/related fields that are stored, including
4538 intermediate JOINs for inheritance if required.
4540 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4542 if order_field not in self._columns and order_field in self._inherit_fields:
4543 # also add missing joins for reaching the table containing the m2o field
4544 qualified_field = self._inherits_join_calc(order_field, query)
4545 order_field_column = self._inherit_fields[order_field][2]
4547 qualified_field = '"%s"."%s"' % (self._table, order_field)
4548 order_field_column = self._columns[order_field]
4550 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4551 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4552 _logger.debug("Many2one function/related fields must be stored " \
4553 "to be used as ordering fields! Ignoring sorting for %s.%s",
4554 self._name, order_field)
4557 # figure out the applicable order_by for the m2o
4558 dest_model = self.pool[order_field_column._obj]
4559 m2o_order = dest_model._order
4560 if not regex_order.match(m2o_order):
4561 # _order is complex, can't use it here, so we default to _rec_name
4562 m2o_order = dest_model._rec_name
4564 # extract the field names, to be able to qualify them and add desc/asc
4566 for order_part in m2o_order.split(","):
4567 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4568 m2o_order = m2o_order_list
4570 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4571 # as we don't want to exclude results that have NULL values for the m2o
4572 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4573 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4574 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4575 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4577 def _generate_order_by(self, order_spec, query):
4579 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4580 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4582 :raise" except_orm in case order_spec is malformed
4584 order_by_clause = ''
4585 order_spec = order_spec or self._order
4587 order_by_elements = []
4588 self._check_qorder(order_spec)
4589 for order_part in order_spec.split(','):
4590 order_split = order_part.strip().split(' ')
4591 order_field = order_split[0].strip()
4592 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4594 if order_field == 'id' or (self._log_access and order_field in LOG_ACCESS_COLUMNS.keys()):
4595 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4596 elif order_field in self._columns:
4597 order_column = self._columns[order_field]
4598 if order_column._classic_read:
4599 inner_clause = '"%s"."%s"' % (self._table, order_field)
4600 elif order_column._type == 'many2one':
4601 inner_clause = self._generate_m2o_order_by(order_field, query)
4603 continue # ignore non-readable or "non-joinable" fields
4604 elif order_field in self._inherit_fields:
4605 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4606 order_column = parent_obj._columns[order_field]
4607 if order_column._classic_read:
4608 inner_clause = self._inherits_join_calc(order_field, query)
4609 elif order_column._type == 'many2one':
4610 inner_clause = self._generate_m2o_order_by(order_field, query)
4612 continue # ignore non-readable or "non-joinable" fields
4614 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4616 if isinstance(inner_clause, list):
4617 for clause in inner_clause:
4618 order_by_elements.append("%s %s" % (clause, order_direction))
4620 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4621 if order_by_elements:
4622 order_by_clause = ",".join(order_by_elements)
4624 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4626 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4628 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4629 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4630 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4631 This is ok at the security level because this method is private and not callable through XML-RPC.
4633 :param access_rights_uid: optional user ID to use when checking access rights
4634 (not for ir.rules, this is only for ir.model.access)
4638 self.check_access_rights(cr, access_rights_uid or user, 'read')
4640 # For transient models, restrict acces to the current user, except for the super-user
4641 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4642 args = expression.AND(([('create_uid', '=', user)], args or []))
4644 query = self._where_calc(cr, user, args, context=context)
4645 self._apply_ir_rules(cr, user, query, 'read', context=context)
4646 order_by = self._generate_order_by(order, query)
4647 from_clause, where_clause, where_clause_params = query.get_sql()
4649 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4652 # Ignore order, limit and offset when just counting, they don't make sense and could
4654 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4655 cr.execute(query_str, where_clause_params)
4659 limit_str = limit and ' limit %d' % limit or ''
4660 offset_str = offset and ' offset %d' % offset or ''
4661 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4662 cr.execute(query_str, where_clause_params)
4665 # TDE note: with auto_join, we could have several lines about the same result
4666 # i.e. a lead with several unread messages; we uniquify the result using
4667 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4668 def _uniquify_list(seq):
4670 return [x for x in seq if x not in seen and not seen.add(x)]
4672 return _uniquify_list([x[0] for x in res])
4674 # returns the different values ever entered for one field
4675 # this is used, for example, in the client when the user hits enter on
4677 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4680 if field in self._inherit_fields:
4681 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4683 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4685 def copy_data(self, cr, uid, id, default=None, context=None):
4687 Copy given record's data with all its fields values
4689 :param cr: database cursor
4690 :param uid: current user id
4691 :param id: id of the record to copy
4692 :param default: field values to override in the original values of the copied record
4693 :type default: dictionary
4694 :param context: context arguments, like lang, time zone
4695 :type context: dictionary
4696 :return: dictionary containing all the field values
4702 # avoid recursion through already copied records in case of circular relationship
4703 seen_map = context.setdefault('__copy_data_seen', {})
4704 if id in seen_map.setdefault(self._name, []):
4706 seen_map[self._name].append(id)
4710 if 'state' not in default:
4711 if 'state' in self._defaults:
4712 if callable(self._defaults['state']):
4713 default['state'] = self._defaults['state'](self, cr, uid, context)
4715 default['state'] = self._defaults['state']
4717 # build a black list of fields that should not be copied
4718 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4719 def blacklist_given_fields(obj):
4720 # blacklist the fields that are given by inheritance
4721 for other, field_to_other in obj._inherits.items():
4722 blacklist.add(field_to_other)
4723 if field_to_other in default:
4724 # all the fields of 'other' are given by the record: default[field_to_other],
4725 # except the ones redefined in self
4726 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4728 blacklist_given_fields(self.pool[other])
4729 # blacklist deprecated fields
4730 for name, field in obj._columns.items():
4731 if field.deprecated:
4734 blacklist_given_fields(self)
4737 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4739 if f not in blacklist
4740 if not isinstance(fi.column, fields.function))
4742 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4746 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4749 for f, colinfo in fields_to_copy.iteritems():
4750 field = colinfo.column
4751 if field._type == 'many2one':
4752 res[f] = data[f] and data[f][0]
4753 elif field._type == 'one2many':
4754 other = self.pool[field._obj]
4755 # duplicate following the order of the ids because we'll rely on
4756 # it later for copying translations in copy_translation()!
4757 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4758 # the lines are duplicated using the wrong (old) parent, but then
4759 # are reassigned to the correct one thanks to the (0, 0, ...)
4760 res[f] = [(0, 0, line) for line in lines if line]
4761 elif field._type == 'many2many':
4762 res[f] = [(6, 0, data[f])]
4768 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4772 # avoid recursion through already copied records in case of circular relationship
4773 seen_map = context.setdefault('__copy_translations_seen',{})
4774 if old_id in seen_map.setdefault(self._name,[]):
4776 seen_map[self._name].append(old_id)
4778 trans_obj = self.pool.get('ir.translation')
4779 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4780 fields = self.fields_get(cr, uid, context=context)
4782 for field_name, field_def in fields.items():
4783 # removing the lang to compare untranslated values
4784 context_wo_lang = dict(context, lang=None)
4785 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4786 # we must recursively copy the translations for o2o and o2m
4787 if field_def['type'] == 'one2many':
4788 target_obj = self.pool[field_def['relation']]
4789 # here we rely on the order of the ids to match the translations
4790 # as foreseen in copy_data()
4791 old_children = sorted(r.id for r in old_record[field_name])
4792 new_children = sorted(r.id for r in new_record[field_name])
4793 for (old_child, new_child) in zip(old_children, new_children):
4794 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4795 # and for translatable fields we keep them for copy
4796 elif field_def.get('translate'):
4797 if field_name in self._columns:
4798 trans_name = self._name + "," + field_name
4801 elif field_name in self._inherit_fields:
4802 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4803 # get the id of the parent record to set the translation
4804 inherit_field_name = self._inherit_fields[field_name][1]
4805 target_id = new_record[inherit_field_name].id
4806 source_id = old_record[inherit_field_name].id
4810 trans_ids = trans_obj.search(cr, uid, [
4811 ('name', '=', trans_name),
4812 ('res_id', '=', source_id)
4814 user_lang = context.get('lang')
4815 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4817 # remove source to avoid triggering _set_src
4818 del record['source']
4819 record.update({'res_id': target_id})
4820 if user_lang and user_lang == record['lang']:
4821 # 'source' to force the call to _set_src
4822 # 'value' needed if value is changed in copy(), want to see the new_value
4823 record['source'] = old_record[field_name]
4824 record['value'] = new_record[field_name]
4825 trans_obj.create(cr, uid, record, context=context)
4828 def copy(self, cr, uid, id, default=None, context=None):
4830 Duplicate record with given id updating it with default values
4832 :param cr: database cursor
4833 :param uid: current user id
4834 :param id: id of the record to copy
4835 :param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4836 :type default: dictionary
4837 :param context: context arguments, like lang, time zone
4838 :type context: dictionary
4839 :return: id of the newly created record
4844 context = context.copy()
4845 data = self.copy_data(cr, uid, id, default, context)
4846 new_id = self.create(cr, uid, data, context)
4847 self.copy_translations(cr, uid, id, new_id, context)
4850 def exists(self, cr, uid, ids, context=None):
4851 """Checks whether the given id or ids exist in this model,
4852 and return the list of ids that do. This is simple to use for
4853 a truth test on a browse_record::
4858 :param ids: id or list of ids to check for existence
4859 :type ids: int or [int]
4860 :return: the list of ids that currently exist, out of
4863 if type(ids) in (int, long):
4867 query = 'SELECT id FROM "%s"' % self._table
4868 cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
4869 return [x[0] for x in cr.fetchall()]
4871 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4872 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4874 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4875 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4876 return self._check_recursion(cr, uid, ids, context, parent)
4878 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4880 Verifies that there is no loop in a hierarchical structure of records,
4881 by following the parent relationship using the **parent** field until a loop
4882 is detected or until a top-level record is found.
4884 :param cr: database cursor
4885 :param uid: current user id
4886 :param ids: list of ids of records to check
4887 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4888 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4891 parent = self._parent_name
4893 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4894 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4897 while current_id is not None:
4898 cr.execute(query, (current_id,))
4899 result = cr.fetchone()
4900 current_id = result[0] if result else None
4901 if current_id == id:
4905 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4907 Verifies that there is no loop in a hierarchical structure of records,
4908 by following the parent relationship using the **parent** field until a loop
4909 is detected or until a top-level record is found.
4911 :param cr: database cursor
4912 :param uid: current user id
4913 :param ids: list of ids of records to check
4914 :param field_name: field to check
4915 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4918 field = self._all_columns.get(field_name)
4919 field = field.column if field else None
4920 if not field or field._type != 'many2many' or field._obj != self._name:
4921 # field must be a many2many on itself
4922 raise ValueError('invalid field_name: %r' % (field_name,))
4924 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4928 for i in range(0, len(ids_parent), cr.IN_MAX):
4930 sub_ids_parent = ids_parent[i:j]
4931 cr.execute(query, (tuple(sub_ids_parent),))
4932 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4933 ids_parent = ids_parent2
4934 for i in ids_parent:
4939 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4940 """Retrieve the External ID(s) of any database record.
4942 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4944 :return: map of ids to the list of their fully qualified External IDs
4945 in the form ``module.key``, or an empty list when there's no External
4946 ID for a record, e.g.::
4948 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4951 ir_model_data = self.pool.get('ir.model.data')
4952 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4953 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4956 # can't use dict.fromkeys() as the list would be shared!
4958 for record in data_results:
4959 result[record['res_id']].append('%(module)s.%(name)s' % record)
4962 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4963 """Retrieve the External ID of any database record, if there
4964 is one. This method works as a possible implementation
4965 for a function field, to be able to add it to any
4966 model object easily, referencing it as ``Model.get_external_id``.
4968 When multiple External IDs exist for a record, only one
4969 of them is returned (randomly).
4971 :return: map of ids to their fully qualified XML ID,
4972 defaulting to an empty string when there's none
4973 (to be usable as a function field),
4976 { 'id': 'module.ext_id',
4979 results = self._get_xml_ids(cr, uid, ids)
4980 for k, v in results.iteritems():
4987 # backwards compatibility
4988 get_xml_id = get_external_id
4989 _get_xml_ids = _get_external_ids
4991 def print_report(self, cr, uid, ids, name, data, context=None):
4993 Render the report `name` for the given IDs. The report must be defined
4994 for this model, not another.
4996 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
4997 assert self._name == report.table
4998 return report.create(cr, uid, ids, data, context)
5001 def is_transient(self):
5002 """ Return whether the model is transient.
5004 See :class:`TransientModel`.
5007 return self._transient
5009 def _transient_clean_rows_older_than(self, cr, seconds):
5010 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
5011 # Never delete rows used in last 5 minutes
5012 seconds = max(seconds, 300)
5013 query = ("SELECT id FROM " + self._table + " WHERE"
5014 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
5015 " < ((now() at time zone 'UTC') - interval %s)")
5016 cr.execute(query, ("%s seconds" % seconds,))
5017 ids = [x[0] for x in cr.fetchall()]
5018 self.unlink(cr, SUPERUSER_ID, ids)
5020 def _transient_clean_old_rows(self, cr, max_count):
5021 # Check how many rows we have in the table
5022 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
5024 if res[0][0] <= max_count:
5025 return # max not reached, nothing to do
5026 self._transient_clean_rows_older_than(cr, 300)
5028 def _transient_vacuum(self, cr, uid, force=False):
5029 """Clean the transient records.
5031 This unlinks old records from the transient model tables whenever the
5032 "_transient_max_count" or "_max_age" conditions (if any) are reached.
5033 Actual cleaning will happen only once every "_transient_check_time" calls.
5034 This means this method can be called frequently called (e.g. whenever
5035 a new record is created).
5036 Example with both max_hours and max_count active:
5037 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
5038 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5039 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
5040 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
5041 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
5042 would immediately cause the maximum to be reached again.
5043 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
5045 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
5046 _transient_check_time = 20 # arbitrary limit on vacuum executions
5047 self._transient_check_count += 1
5048 if not force and (self._transient_check_count < _transient_check_time):
5049 return True # no vacuum cleaning this time
5050 self._transient_check_count = 0
5052 # Age-based expiration
5053 if self._transient_max_hours:
5054 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
5056 # Count-based expiration
5057 if self._transient_max_count:
5058 self._transient_clean_old_rows(cr, self._transient_max_count)
5062 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
5063 """ Serializes one2many and many2many commands into record dictionaries
5064 (as if all the records came from the database via a read()). This
5065 method is aimed at onchange methods on one2many and many2many fields.
5067 Because commands might be creation commands, not all record dicts
5068 will contain an ``id`` field. Commands matching an existing record
5069 will have an ``id``.
5071 :param field_name: name of the one2many or many2many field matching the commands
5072 :type field_name: str
5073 :param commands: one2many or many2many commands to execute on ``field_name``
5074 :type commands: list((int|False, int|False, dict|False))
5075 :param fields: list of fields to read from the database, when applicable
5076 :type fields: list(str)
5077 :returns: records in a shape similar to that returned by ``read()``
5078 (except records may be missing the ``id`` field if they don't exist in db)
5081 result = [] # result (list of dict)
5082 record_ids = [] # ids of records to read
5083 updates = {} # {id: dict} of updates on particular records
5085 for command in commands:
5086 if not isinstance(command, (list, tuple)):
5087 record_ids.append(command)
5088 elif command[0] == 0:
5089 result.append(command[2])
5090 elif command[0] == 1:
5091 record_ids.append(command[1])
5092 updates.setdefault(command[1], {}).update(command[2])
5093 elif command[0] in (2, 3):
5094 record_ids = [id for id in record_ids if id != command[1]]
5095 elif command[0] == 4:
5096 record_ids.append(command[1])
5097 elif command[0] == 5:
5098 result, record_ids = [], []
5099 elif command[0] == 6:
5100 result, record_ids = [], list(command[2])
5102 # read the records and apply the updates
5103 other_model = self.pool[self._all_columns[field_name].column._obj]
5104 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
5105 record.update(updates.get(record['id'], {}))
5106 result.append(record)
5110 # for backward compatibility
5111 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5113 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5115 Performs a ``search()`` followed by a ``read()``.
5117 :param cr: database cursor
5118 :param user: current user id
5119 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5120 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5121 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5122 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5123 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5124 :param context: context arguments.
5125 :return: List of dictionaries containing the asked fields.
5126 :rtype: List of dictionaries.
5129 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5133 if fields and fields == ['id']:
5134 # shortcut read if we only want the ids
5135 return [{'id': id} for id in record_ids]
5137 # read() ignores active_test, but it would forward it to any downstream search call
5138 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5139 # was presumably only meant for the main search().
5140 # TODO: Move this to read() directly?
5141 read_ctx = dict(context or {})
5142 read_ctx.pop('active_test', None)
5144 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5145 if len(result) <= 1:
5149 index = dict((r['id'], r) for r in result)
5150 return [index[x] for x in record_ids if x in index]
5152 def _register_hook(self, cr):
5153 """ stuff to do right after the registry is built """
5156 def __getattr__(self, name):
5157 if name.startswith('signal_'):
5158 signal_name = name[len('signal_'):]
5160 return (lambda *args, **kwargs:
5161 self.signal_workflow(*args, signal=signal_name, **kwargs))
5162 get = getattr(super(BaseModel, self), '__getattr__', None)
5163 if get is not None: return get(name)
5164 raise AttributeError(
5165 "'%s' object has no attribute '%s'" % (type(self).__name__, name))
5167 # keep this import here, at top it will cause dependency cycle errors
5170 class Model(BaseModel):
5171 """Main super-class for regular database-persisted OpenERP models.
5173 OpenERP models are created by inheriting from this class::
5178 The system will later instantiate the class once per database (on
5179 which the class' module is installed).
5182 _register = False # not visible in ORM registry, meant to be python-inherited only
5183 _transient = False # True in a TransientModel
5185 class TransientModel(BaseModel):
5186 """Model super-class for transient records, meant to be temporarily
5187 persisted, and regularly vaccuum-cleaned.
5189 A TransientModel has a simplified access rights management,
5190 all users can create new records, and may only access the
5191 records they created. The super-user has unrestricted access
5192 to all TransientModel records.
5195 _register = False # not visible in ORM registry, meant to be python-inherited only
5198 class AbstractModel(BaseModel):
5199 """Abstract Model super-class for creating an abstract class meant to be
5200 inherited by regular models (Models or TransientModels) but not meant to
5201 be usable on its own, or persisted.
5203 Technical note: we don't want to make AbstractModel the super-class of
5204 Model or BaseModel because it would not make sense to put the main
5205 definition of persistence methods such as create() in it, and still we
5206 should be able to override them within an AbstractModel.
5208 _auto = False # don't create any database backend for AbstractModels
5209 _register = False # not visible in ORM registry, meant to be python-inherited only
5212 def itemgetter_tuple(items):
5213 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5214 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5219 return lambda gettable: (gettable[items[0]],)
5220 return operator.itemgetter(*items)
5222 class ImportWarning(Warning):
5223 """ Used to send warnings upwards the stack during the import process
5227 def convert_pgerror_23502(model, fields, info, e):
5228 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5229 r'not-null constraint\n',
5231 field_name = m.group('field')
5232 if not m or field_name not in fields:
5233 return {'message': unicode(e)}
5234 message = _(u"Missing required value for the field '%s'.") % field_name
5235 field = fields.get(field_name)
5237 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5240 'field': field_name,
5243 def convert_pgerror_23505(model, fields, info, e):
5244 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5246 field_name = m.group('field')
5247 if not m or field_name not in fields:
5248 return {'message': unicode(e)}
5249 message = _(u"The value for the field '%s' already exists.") % field_name
5250 field = fields.get(field_name)
5252 message = _(u"%s This might be '%s' in the current model, or a field "
5253 u"of the same name in an o2m.") % (message, field['string'])
5256 'field': field_name,
5259 PGERROR_TO_OE = collections.defaultdict(
5260 # shape of mapped converters
5261 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5262 # not_null_violation
5263 '23502': convert_pgerror_23502,
5264 # unique constraint error
5265 '23505': convert_pgerror_23505,
5267 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: