1 # -*- coding: utf-8 -*-
2 ##############################################################################
4 # OpenERP, Open Source Management Solution
5 # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 ##############################################################################
24 Object relational mapping to database (postgresql) module
25 * Hierarchical structure
26 * Constraints consistency, validations
27 * Object meta Data depends on its status
28 * Optimised processing by complex query (multiple actions at once)
29 * Default fields value
30 * Permissions optimisation
31 * Persistant object: DB postgresql
33 * Multi-level caching system
34 * 2 different inheritancies
36 - classicals (varchar, integer, boolean, ...)
37 - relations (one2many, many2one, many2many)
58 import dateutil.relativedelta
60 from lxml import etree
64 import openerp.tools as tools
65 from openerp.tools.config import config
66 from openerp.tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
67 from openerp.tools.safe_eval import safe_eval as eval
68 from openerp.tools.translate import _
69 from openerp import SUPERUSER_ID
70 from query import Query
72 _logger = logging.getLogger(__name__)
73 _schema = logging.getLogger(__name__ + '.schema')
75 # List of etree._Element subclasses that we choose to ignore when parsing XML.
76 from openerp.tools import SKIPPED_ELEMENT_TYPES
78 regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
79 regex_object_name = re.compile(r'^[a-z0-9_.]+$')
81 AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
83 def transfer_field_to_modifiers(field, modifiers):
86 for attr in ('invisible', 'readonly', 'required'):
87 state_exceptions[attr] = []
88 default_values[attr] = bool(field.get(attr))
89 for state, modifs in (field.get("states",{})).items():
91 if default_values[modif[0]] != modif[1]:
92 state_exceptions[modif[0]].append(state)
94 for attr, default_value in default_values.items():
95 if state_exceptions[attr]:
96 modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
98 modifiers[attr] = default_value
101 # Don't deal with groups, it is done by check_group().
102 # Need the context to evaluate the invisible attribute on tree views.
103 # For non-tree views, the context shouldn't be given.
104 def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
105 if node.get('attrs'):
106 modifiers.update(eval(node.get('attrs')))
108 if node.get('states'):
109 if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
110 # TODO combine with AND or OR, use implicit AND for now.
111 modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
113 modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
115 for a in ('invisible', 'readonly', 'required'):
117 v = bool(eval(node.get(a), {'context': context or {}}))
118 if in_tree_view and a == 'invisible':
119 # Invisible in a tree view has a specific meaning, make it a
120 # new key in the modifiers attribute.
121 modifiers['tree_invisible'] = v
122 elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
123 # Don't set the attribute to False if a dynamic value was
124 # provided (i.e. a domain from attrs or states).
128 def simplify_modifiers(modifiers):
129 for a in ('invisible', 'readonly', 'required'):
130 if a in modifiers and not modifiers[a]:
134 def transfer_modifiers_to_node(modifiers, node):
136 simplify_modifiers(modifiers)
137 node.set('modifiers', simplejson.dumps(modifiers))
139 def setup_modifiers(node, field=None, context=None, in_tree_view=False):
140 """ Processes node attributes and field descriptors to generate
141 the ``modifiers`` node attribute and set it on the provided node.
143 Alters its first argument in-place.
145 :param node: ``field`` node from an OpenERP view
146 :type node: lxml.etree._Element
147 :param dict field: field descriptor corresponding to the provided node
148 :param dict context: execution context used to evaluate node attributes
149 :param bool in_tree_view: triggers the ``tree_invisible`` code
150 path (separate from ``invisible``): in
151 tree view there are two levels of
152 invisibility, cell content (a column is
153 present but the cell itself is not
154 displayed) with ``invisible`` and column
155 invisibility (the whole column is
156 hidden) with ``tree_invisible``.
160 if field is not None:
161 transfer_field_to_modifiers(field, modifiers)
162 transfer_node_to_modifiers(
163 node, modifiers, context=context, in_tree_view=in_tree_view)
164 transfer_modifiers_to_node(modifiers, node)
166 def test_modifiers(what, expected):
168 if isinstance(what, basestring):
169 node = etree.fromstring(what)
170 transfer_node_to_modifiers(node, modifiers)
171 simplify_modifiers(modifiers)
172 json = simplejson.dumps(modifiers)
173 assert json == expected, "%s != %s" % (json, expected)
174 elif isinstance(what, dict):
175 transfer_field_to_modifiers(what, modifiers)
176 simplify_modifiers(modifiers)
177 json = simplejson.dumps(modifiers)
178 assert json == expected, "%s != %s" % (json, expected)
183 # openerp.osv.orm.modifiers_tests()
184 def modifiers_tests():
185 test_modifiers('<field name="a"/>', '{}')
186 test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
187 test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
188 test_modifiers('<field name="a" required="1"/>', '{"required": true}')
189 test_modifiers('<field name="a" invisible="0"/>', '{}')
190 test_modifiers('<field name="a" readonly="0"/>', '{}')
191 test_modifiers('<field name="a" required="0"/>', '{}')
192 test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
193 test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
194 test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
195 test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
197 # The dictionary is supposed to be the result of fields_get().
198 test_modifiers({}, '{}')
199 test_modifiers({"invisible": True}, '{"invisible": true}')
200 test_modifiers({"invisible": False}, '{}')
203 def check_object_name(name):
204 """ Check if the given name is a valid openerp object name.
206 The _name attribute in osv and osv_memory object is subject to
207 some restrictions. This function returns True or False whether
208 the given name is allowed or not.
210 TODO: this is an approximation. The goal in this approximation
211 is to disallow uppercase characters (in some places, we quote
212 table/column names and in other not, which leads to this kind
215 psycopg2.ProgrammingError: relation "xxx" does not exist).
217 The same restriction should apply to both osv and osv_memory
218 objects for consistency.
221 if regex_object_name.match(name) is None:
225 def raise_on_invalid_object_name(name):
226 if not check_object_name(name):
227 msg = "The _name attribute %s is not valid." % name
229 raise except_orm('ValueError', msg)
231 POSTGRES_CONFDELTYPES = {
239 def intersect(la, lb):
240 return filter(lambda x: x in lb, la)
242 def fix_import_export_id_paths(fieldname):
244 Fixes the id fields in import and exports, and splits field paths
247 :param str fieldname: name of the field to import/export
248 :return: split field name
251 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
252 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
253 return fixed_external_id.split('/')
255 class except_orm(Exception):
256 def __init__(self, name, value):
259 self.args = (name, value)
261 class BrowseRecordError(Exception):
264 class browse_null(object):
265 """ Readonly python database object browser
271 def __getitem__(self, name):
274 def __getattr__(self, name):
275 return None # XXX: return self ?
283 def __nonzero__(self):
286 def __unicode__(self):
290 raise NotImplementedError("Iteration is not allowed on %s" % self)
294 # TODO: execute an object method on browse_record_list
296 class browse_record_list(list):
297 """ Collection of browse objects
299 Such an instance will be returned when doing a ``browse([ids..])``
300 and will be iterable, yielding browse() objects
303 def __init__(self, lst, context=None):
306 super(browse_record_list, self).__init__(lst)
307 self.context = context
310 class browse_record(object):
311 """ An object that behaves like a row of an object's table.
312 It has attributes after the columns of the corresponding object.
316 uobj = pool.get('res.users')
317 user_rec = uobj.browse(cr, uid, 104)
321 def __init__(self, cr, uid, id, table, cache, context=None,
322 list_class=browse_record_list, fields_process=None):
324 :param table: the browsed object (inherited from orm)
325 :param dict cache: a dictionary of model->field->data to be shared
326 across browse objects, thus reducing the SQL
327 read()s. It can speed up things a lot, but also be
328 disastrous if not discarded after write()/unlink()
330 :param dict context: dictionary with an optional context
332 if fields_process is None:
336 self._list_class = list_class
340 self._table = table # deprecated, use _model!
342 self._table_name = self._table._name
343 self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name)
344 self._context = context
345 self._fields_process = fields_process
347 cache.setdefault(table._name, {})
348 self._data = cache[table._name]
350 # if not (id and isinstance(id, (int, long,))):
351 # raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
352 # if not table.exists(cr, uid, id, context):
353 # raise BrowseRecordError(_('Object %s does not exists') % (self,))
355 if id not in self._data:
356 self._data[id] = {'id': id}
360 def __getitem__(self, name):
364 if name not in self._data[self._id]:
365 # build the list of fields we will fetch
367 # fetch the definition of the field which was asked for
368 if name in self._table._columns:
369 col = self._table._columns[name]
370 elif name in self._table._inherit_fields:
371 col = self._table._inherit_fields[name][2]
372 elif hasattr(self._table, str(name)):
373 attr = getattr(self._table, name)
374 if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)):
375 def function_proxy(*args, **kwargs):
376 if 'context' not in kwargs and self._context:
377 kwargs.update(context=self._context)
378 return attr(self._cr, self._uid, [self._id], *args, **kwargs)
379 return function_proxy
383 error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
384 self.__logger.warning(error_msg)
385 if self.__logger.isEnabledFor(logging.DEBUG):
386 self.__logger.debug(''.join(traceback.format_stack()))
387 raise KeyError(error_msg)
389 prefetchable = lambda f: f._classic_write and f._prefetch and not f.groups and not f.deprecated
391 # if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
392 if prefetchable(col):
393 # gen the list of "local" (ie not inherited) fields which are classic or many2one
394 field_filter = lambda x: prefetchable(x[1])
395 fields_to_fetch = filter(field_filter, self._table._columns.items())
396 # gen the list of inherited fields
397 inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
398 # complete the field list with the inherited fields which are classic or many2one
399 fields_to_fetch += filter(field_filter, inherits)
400 # otherwise we fetch only that field
402 fields_to_fetch = [(name, col)]
404 ids = filter(lambda id: name not in self._data[id], self._data.keys())
406 field_names = map(lambda x: x[0], fields_to_fetch)
408 field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
409 except (openerp.exceptions.AccessError, except_orm):
412 # prefetching attempt failed, perhaps we're violating ACL restrictions involuntarily
413 _logger.info('Prefetching attempt for fields %s on %s failed for ids %s, re-trying just for id %s', field_names, self._model._name, ids, self._id)
415 field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
417 # TODO: improve this, very slow for reports
418 if self._fields_process:
419 lang = self._context.get('lang', 'en_US') or 'en_US'
420 lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid, [('code', '=', lang)])
422 raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,))
423 lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid, lang_obj_ids[0])
425 for field_name, field_column in fields_to_fetch:
426 if field_column._type in self._fields_process:
427 for result_line in field_values:
428 result_line[field_name] = self._fields_process[field_column._type](result_line[field_name])
429 if result_line[field_name]:
430 result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj)
433 # Where did those ids come from? Perhaps old entries in ir_model_dat?
434 _logger.warning("No field_values found for ids %s in %s", ids, self)
435 raise KeyError('Field %s not found in %s'%(name, self))
436 # create browse records for 'remote' objects
437 for result_line in field_values:
439 for field_name, field_column in fields_to_fetch:
440 if field_column._type == 'many2one':
441 if result_line[field_name]:
442 obj = self._table.pool[field_column._obj]
443 if isinstance(result_line[field_name], (list, tuple)):
444 value = result_line[field_name][0]
446 value = result_line[field_name]
448 # FIXME: this happen when a _inherits object
449 # overwrite a field of it parent. Need
450 # testing to be sure we got the right
451 # object and not the parent one.
452 if not isinstance(value, browse_record):
454 # In some cases the target model is not available yet, so we must ignore it,
455 # which is safe in most cases, this value will just be loaded later when needed.
456 # This situation can be caused by custom fields that connect objects with m2o without
457 # respecting module dependencies, causing relationships to be connected to soon when
458 # the target is not loaded yet.
460 new_data[field_name] = browse_record(self._cr,
461 self._uid, value, obj, self._cache,
462 context=self._context,
463 list_class=self._list_class,
464 fields_process=self._fields_process)
466 new_data[field_name] = value
468 new_data[field_name] = browse_null()
470 new_data[field_name] = browse_null()
471 elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
472 new_data[field_name] = self._list_class(
473 (browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj),
474 self._cache, context=self._context, list_class=self._list_class,
475 fields_process=self._fields_process)
476 for id in result_line[field_name]),
477 context=self._context)
478 elif field_column._type == 'reference':
479 if result_line[field_name]:
480 if isinstance(result_line[field_name], browse_record):
481 new_data[field_name] = result_line[field_name]
483 ref_obj, ref_id = result_line[field_name].split(',')
484 ref_id = long(ref_id)
486 obj = self._table.pool[ref_obj]
487 new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
489 new_data[field_name] = browse_null()
491 new_data[field_name] = browse_null()
493 new_data[field_name] = result_line[field_name]
494 self._data[result_line['id']].update(new_data)
496 if not name in self._data[self._id]:
497 # How did this happen? Could be a missing model due to custom fields used too soon, see above.
498 self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values)
499 self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table)
500 raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
501 return self._data[self._id][name]
503 def __getattr__(self, name):
508 exc_info = sys.exc_info()
509 raise AttributeError, "Got %r while trying to get attribute %s on a %s record." % (e, name, self._table._name), exc_info[2]
511 def __contains__(self, name):
512 return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
515 raise NotImplementedError("Iteration is not allowed on %s" % self)
517 def __hasattr__(self, name):
524 return "browse_record(%s, %s)" % (self._table_name, self._id)
526 def __eq__(self, other):
527 if not isinstance(other, browse_record):
529 return (self._table_name, self._id) == (other._table_name, other._id)
531 def __ne__(self, other):
532 if not isinstance(other, browse_record):
534 return (self._table_name, self._id) != (other._table_name, other._id)
536 # we need to define __unicode__ even though we've already defined __str__
537 # because we have overridden __getattr__
538 def __unicode__(self):
539 return unicode(str(self))
542 return hash((self._table_name, self._id))
547 """Force refreshing this browse_record's data and all the data of the
548 records that belong to the same cache, by emptying the cache completely,
549 preserving only the record identifiers (for prefetching optimizations).
551 for model, model_cache in self._cache.iteritems():
552 # only preserve the ids of the records that were in the cache
553 cached_ids = dict([(i, {'id': i}) for i in model_cache.keys()])
554 self._cache[model].clear()
555 self._cache[model].update(cached_ids)
557 def pg_varchar(size=0):
558 """ Returns the VARCHAR declaration for the provided size:
560 * If no size (or an empty or negative size is provided) return an
562 * Otherwise return a VARCHAR(n)
564 :type int size: varchar size, optional
568 if not isinstance(size, int):
569 raise TypeError("VARCHAR parameter should be an int, got %s"
572 return 'VARCHAR(%d)' % size
575 FIELDS_TO_PGTYPES = {
576 fields.boolean: 'bool',
577 fields.integer: 'int4',
581 fields.datetime: 'timestamp',
582 fields.binary: 'bytea',
583 fields.many2one: 'int4',
584 fields.serialized: 'text',
587 def get_pg_type(f, type_override=None):
589 :param fields._column f: field to get a Postgres type for
590 :param type type_override: use the provided type for dispatching instead of the field's own type
591 :returns: (postgres_identification_type, postgres_type_specification)
594 field_type = type_override or type(f)
596 if field_type in FIELDS_TO_PGTYPES:
597 pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
598 elif issubclass(field_type, fields.float):
600 pg_type = ('numeric', 'NUMERIC')
602 pg_type = ('float8', 'DOUBLE PRECISION')
603 elif issubclass(field_type, (fields.char, fields.reference)):
604 pg_type = ('varchar', pg_varchar(f.size))
605 elif issubclass(field_type, fields.selection):
606 if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
607 or getattr(f, 'size', None) == -1:
608 pg_type = ('int4', 'INTEGER')
610 pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
611 elif issubclass(field_type, fields.function):
612 if f._type == 'selection':
613 pg_type = ('varchar', pg_varchar())
615 pg_type = get_pg_type(f, getattr(fields, f._type))
617 _logger.warning('%s type not supported!', field_type)
623 class MetaModel(type):
624 """ Metaclass for the Model.
626 This class is used as the metaclass for the Model class to discover
627 the models defined in a module (i.e. without instanciating them).
628 If the automatic discovery is not needed, it is possible to set the
629 model's _register attribute to False.
633 module_to_models = {}
635 def __init__(self, name, bases, attrs):
636 if not self._register:
637 self._register = True
638 super(MetaModel, self).__init__(name, bases, attrs)
641 # The (OpenERP) module name can be in the `openerp.addons` namespace
642 # or not. For instance module `sale` can be imported as
643 # `openerp.addons.sale` (the good way) or `sale` (for backward
645 module_parts = self.__module__.split('.')
646 if len(module_parts) > 2 and module_parts[0] == 'openerp' and \
647 module_parts[1] == 'addons':
648 module_name = self.__module__.split('.')[2]
650 module_name = self.__module__.split('.')[0]
651 if not hasattr(self, '_module'):
652 self._module = module_name
654 # Remember which models to instanciate for this module.
656 self.module_to_models.setdefault(self._module, []).append(self)
659 # Definition of log access columns, automatically added to models if
660 # self._log_access is True
661 LOG_ACCESS_COLUMNS = {
662 'create_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
663 'create_date': 'TIMESTAMP',
664 'write_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
665 'write_date': 'TIMESTAMP'
667 # special columns automatically created by the ORM
668 MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS.keys()
670 class BaseModel(object):
671 """ Base class for OpenERP models.
673 OpenERP models are created by inheriting from this class' subclasses:
675 * Model: for regular database-persisted models
676 * TransientModel: for temporary data, stored in the database but automatically
677 vaccuumed every so often
678 * AbstractModel: for abstract super classes meant to be shared by multiple
679 _inheriting classes (usually Models or TransientModels)
681 The system will later instantiate the class once per database (on
682 which the class' module is installed).
684 To create a class that should not be instantiated, the _register class attribute
687 __metaclass__ = MetaModel
688 _auto = True # create database backend
689 _register = False # Set to false if the model shouldn't be automatically discovered.
696 _parent_name = 'parent_id'
697 _parent_store = False
698 _parent_order = False
705 # dict of {field:method}, with method returning the (name_get of records, {id: fold})
706 # to include in the _read_group, if grouped on this field
710 _transient = False # True in a TransientModel
713 # { 'parent_model': 'm2o_field', ... }
716 # Mapping from inherits'd field name to triple (m, r, f, n) where m is the
717 # model from which it is inherits'd, r is the (local) field towards m, f
718 # is the _column object itself, and n is the original (i.e. top-most)
721 # { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
722 # field_column_obj, origina_parent_model), ... }
725 # Mapping field name/column_info object
726 # This is similar to _inherit_fields but:
727 # 1. includes self fields,
728 # 2. uses column_info instead of a triple.
734 _sql_constraints = []
735 _protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
737 CONCURRENCY_CHECK_FIELD = '__last_update'
739 def log(self, cr, uid, id, message, secondary=False, context=None):
740 return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
742 def view_init(self, cr, uid, fields_list, context=None):
743 """Override this method to do specific things when a view on the object is opened."""
746 def _field_create(self, cr, context=None):
747 """ Create entries in ir_model_fields for all the model's fields.
749 If necessary, also create an entry in ir_model, and if called from the
750 modules loading scheme (by receiving 'module' in the context), also
751 create entries in ir_model_data (for the model and the fields).
753 - create an entry in ir_model (if there is not already one),
754 - create an entry in ir_model_data (if there is not already one, and if
755 'module' is in the context),
756 - update ir_model_fields with the fields found in _columns
757 (TODO there is some redundancy as _columns is updated from
758 ir_model_fields in __init__).
763 cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
765 cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
766 model_id = cr.fetchone()[0]
767 cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
769 model_id = cr.fetchone()[0]
770 if 'module' in context:
771 name_id = 'model_'+self._name.replace('.', '_')
772 cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
774 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
775 (name_id, context['module'], 'ir.model', model_id)
778 cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
780 for rec in cr.dictfetchall():
781 cols[rec['name']] = rec
783 ir_model_fields_obj = self.pool.get('ir.model.fields')
785 # sparse field should be created at the end, as it depends on its serialized field already existing
786 model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
787 for (k, f) in model_fields:
789 'model_id': model_id,
792 'field_description': f.string,
794 'relation': f._obj or '',
795 'select_level': tools.ustr(f.select or 0),
796 'readonly': (f.readonly and 1) or 0,
797 'required': (f.required and 1) or 0,
798 'selectable': (f.selectable and 1) or 0,
799 'translate': (f.translate and 1) or 0,
800 'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
801 'serialization_field_id': None,
803 if getattr(f, 'serialization_field', None):
804 # resolve link to serialization_field if specified by name
805 serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
806 if not serialization_field_id:
807 raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
808 vals['serialization_field_id'] = serialization_field_id[0]
810 # When its a custom field,it does not contain f.select
811 if context.get('field_state', 'base') == 'manual':
812 if context.get('field_name', '') == k:
813 vals['select_level'] = context.get('select', '0')
814 #setting value to let the problem NOT occur next time
816 vals['select_level'] = cols[k]['select_level']
819 cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
820 id = cr.fetchone()[0]
822 cr.execute("""INSERT INTO ir_model_fields (
823 id, model_id, model, name, field_description, ttype,
824 relation,state,select_level,relation_field, translate, serialization_field_id
826 %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
828 id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
829 vals['relation'], 'base',
830 vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
832 if 'module' in context:
833 name1 = 'field_' + self._table + '_' + k
834 cr.execute("select name from ir_model_data where name=%s", (name1,))
836 name1 = name1 + "_" + str(id)
837 cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
838 (name1, context['module'], 'ir.model.fields', id)
841 for key, val in vals.items():
842 if cols[k][key] != vals[key]:
843 cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
844 cr.execute("""UPDATE ir_model_fields SET
845 model_id=%s, field_description=%s, ttype=%s, relation=%s,
846 select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
848 model=%s AND name=%s""", (
849 vals['model_id'], vals['field_description'], vals['ttype'],
851 vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
856 # Goal: try to apply inheritance at the instanciation level and
857 # put objects in the pool var
860 def create_instance(cls, pool, cr):
861 """ Instanciate a given model.
863 This class method instanciates the class of some model (i.e. a class
864 deriving from osv or osv_memory). The class might be the class passed
865 in argument or, if it inherits from another class, a class constructed
866 by combining the two classes.
868 The ``attributes`` argument specifies which parent class attributes
871 TODO: the creation of the combined class is repeated at each call of
872 this method. This is probably unnecessary.
875 attributes = ['_columns', '_defaults', '_inherits', '_constraints',
878 parent_names = getattr(cls, '_inherit', None)
880 if isinstance(parent_names, (str, unicode)):
881 name = cls._name or parent_names
882 parent_names = [parent_names]
886 raise TypeError('_name is mandatory in case of multiple inheritance')
888 for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
889 if parent_name not in pool:
890 raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
891 'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
892 parent_model = pool[parent_name]
893 if not getattr(cls, '_original_module', None) and name == parent_model._name:
894 cls._original_module = parent_model._original_module
895 parent_class = parent_model.__class__
898 new = copy.copy(getattr(parent_model, s, {}))
900 # Don't _inherit custom fields.
904 if hasattr(new, 'update'):
905 new.update(cls.__dict__.get(s, {}))
906 elif s=='_constraints':
907 for c in cls.__dict__.get(s, []):
909 for c2 in range(len(new)):
910 #For _constraints, we should check field and methods as well
911 if new[c2][2]==c[2] and (new[c2][0] == c[0] \
912 or getattr(new[c2][0],'__name__', True) == \
913 getattr(c[0],'__name__', False)):
914 # If new class defines a constraint with
915 # same function name, we let it override
924 new.extend(cls.__dict__.get(s, []))
927 # Keep links to non-inherited constraints, e.g. useful when exporting translations
928 nattr['_local_constraints'] = cls.__dict__.get('_constraints', [])
929 nattr['_local_sql_constraints'] = cls.__dict__.get('_sql_constraints', [])
931 cls = type(name, (cls, parent_class), dict(nattr, _register=False))
933 cls._local_constraints = getattr(cls, '_constraints', [])
934 cls._local_sql_constraints = getattr(cls, '_sql_constraints', [])
936 if not getattr(cls, '_original_module', None):
937 cls._original_module = cls._module
938 obj = object.__new__(cls)
940 if hasattr(obj, '_columns'):
941 # float fields are registry-dependent (digit attribute). Duplicate them to avoid issues.
942 for c, f in obj._columns.items():
943 if f._type == 'float':
944 obj._columns[c] = copy.copy(f)
946 obj.__init__(pool, cr)
950 """Register this model.
952 This doesn't create an instance but simply register the model
953 as being part of the module where it is defined.
958 # Set the module name (e.g. base, sale, accounting, ...) on the class.
959 module = cls.__module__.split('.')[0]
960 if not hasattr(cls, '_module'):
963 # Record this class in the list of models to instantiate for this module,
964 # managed by the metaclass.
965 module_model_list = MetaModel.module_to_models.setdefault(cls._module, [])
966 if cls not in module_model_list:
968 module_model_list.append(cls)
970 # Since we don't return an instance here, the __init__
971 # method won't be called.
974 def __init__(self, pool, cr):
975 """ Initialize a model and make it part of the given registry.
977 - copy the stored fields' functions in the osv_pool,
978 - update the _columns with the fields found in ir_model_fields,
979 - ensure there is a many2one for each _inherits'd parent,
980 - update the children's _columns,
981 - give a chance to each field to initialize itself.
984 pool.add(self._name, self)
987 if not self._name and not hasattr(self, '_inherit'):
988 name = type(self).__name__.split('.')[0]
989 msg = "The class %s has to have a _name attribute" % name
992 raise except_orm('ValueError', msg)
994 if not self._description:
995 self._description = self._name
997 self._table = self._name.replace('.', '_')
999 if not hasattr(self, '_log_access'):
1000 # If _log_access is not specified, it is the same value as _auto.
1001 self._log_access = getattr(self, "_auto", True)
1003 self._columns = self._columns.copy()
1004 for store_field in self._columns:
1005 f = self._columns[store_field]
1006 if hasattr(f, 'digits_change'):
1008 def not_this_field(stored_func):
1009 x, y, z, e, f, l = stored_func
1010 return x != self._name or y != store_field
1011 self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, []))
1012 if not isinstance(f, fields.function):
1018 sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, f.priority, None)}
1019 for object, aa in sm.items():
1021 (fnct, fields2, order, length) = aa
1023 (fnct, fields2, order) = aa
1026 raise except_orm('Error',
1027 ('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
1028 self.pool._store_function.setdefault(object, [])
1029 t = (self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)
1030 if not t in self.pool._store_function[object]:
1031 self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
1032 self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
1034 for (key, _, msg) in self._sql_constraints:
1035 self.pool._sql_error[self._table+'_'+key] = msg
1037 # Load manual fields
1039 # Check the query is already done for all modules of if we need to
1041 if self.pool.fields_by_model is not None:
1042 manual_fields = self.pool.fields_by_model.get(self._name, [])
1044 cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual'))
1045 manual_fields = cr.dictfetchall()
1046 for field in manual_fields:
1047 if field['name'] in self._columns:
1050 'string': field['field_description'],
1051 'required': bool(field['required']),
1052 'readonly': bool(field['readonly']),
1053 'domain': eval(field['domain']) if field['domain'] else None,
1054 'size': field['size'] or None,
1055 'ondelete': field['on_delete'],
1056 'translate': (field['translate']),
1059 #'select': int(field['select_level'])
1062 if field['serialization_field_id']:
1063 cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
1064 attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
1065 if field['ttype'] in ['many2one', 'one2many', 'many2many']:
1066 attrs.update({'relation': field['relation']})
1067 self._columns[field['name']] = fields.sparse(**attrs)
1068 elif field['ttype'] == 'selection':
1069 self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
1070 elif field['ttype'] == 'reference':
1071 self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
1072 elif field['ttype'] == 'many2one':
1073 self._columns[field['name']] = fields.many2one(field['relation'], **attrs)
1074 elif field['ttype'] == 'one2many':
1075 self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
1076 elif field['ttype'] == 'many2many':
1077 _rel1 = field['relation'].replace('.', '_')
1078 _rel2 = field['model'].replace('.', '_')
1079 _rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
1080 self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
1082 self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
1084 self._inherits_check()
1085 self._inherits_reload()
1086 if not self._sequence:
1087 self._sequence = self._table + '_id_seq'
1088 for k in self._defaults:
1089 assert (k in self._columns) or (k in self._inherit_fields), 'Default function defined in %s but field %s does not exist !' % (self._name, k,)
1090 for f in self._columns:
1091 self._columns[f].restart()
1094 if self.is_transient():
1095 self._transient_check_count = 0
1096 self._transient_max_count = config.get('osv_memory_count_limit')
1097 self._transient_max_hours = config.get('osv_memory_age_limit')
1098 assert self._log_access, "TransientModels must have log_access turned on, "\
1099 "in order to implement their access rights policy"
1102 if self._rec_name is not None:
1103 assert self._rec_name in self._all_columns.keys() + ['id'], "Invalid rec_name %s for model %s" % (self._rec_name, self._name)
1105 self._rec_name = 'name'
1108 def __export_row(self, cr, uid, row, fields, raw_data=False, context=None):
1112 def check_type(field_type):
1113 if field_type == 'float':
1115 elif field_type == 'integer':
1117 elif field_type == 'boolean':
1121 def selection_field(in_field):
1122 col_obj = self.pool[in_field.keys()[0]]
1123 if f[i] in col_obj._columns.keys():
1124 return col_obj._columns[f[i]]
1125 elif f[i] in col_obj._inherits.keys():
1126 selection_field(col_obj._inherits)
1130 def _get_xml_id(self, cr, uid, r):
1131 model_data = self.pool.get('ir.model.data')
1132 data_ids = model_data.search(cr, uid, [('model', '=', r._model._name), ('res_id', '=', r['id'])])
1134 d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
1136 r = '%s.%s' % (d['module'], d['name'])
1142 n = r._model._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
1143 if not model_data.search(cr, uid, [('name', '=', n)]):
1146 model_data.create(cr, SUPERUSER_ID, {
1148 'model': r._model._name,
1150 'module': '__export__',
1156 data = map(lambda x: '', range(len(fields)))
1158 for fpos in range(len(fields)):
1168 r = _get_xml_id(self, cr, uid, r)
1171 # To display external name of selection field when its exported
1172 if f[i] in self._columns.keys():
1173 cols = self._columns[f[i]]
1174 elif f[i] in self._inherit_fields.keys():
1175 cols = selection_field(self._inherits)
1176 if cols and cols._type == 'selection':
1177 sel_list = cols.selection
1178 if r and type(sel_list) == type([]):
1179 r = [x[1] for x in sel_list if r==x[0]]
1180 r = r and r[0] or False
1182 if f[i] in self._columns:
1183 r = check_type(self._columns[f[i]]._type)
1184 elif f[i] in self._inherit_fields:
1185 r = check_type(self._inherit_fields[f[i]][2]._type)
1186 data[fpos] = r or False
1188 if isinstance(r, (browse_record_list, list)):
1190 fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \
1193 if [x for x in fields2 if x]:
1195 done.append(fields2)
1196 if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'):
1197 data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r])
1201 lines2 = row2._model.__export_row(cr, uid, row2, fields2, context=context)
1203 for fpos2 in range(len(fields)):
1204 if lines2 and lines2[0][fpos2]:
1205 data[fpos2] = lines2[0][fpos2]
1209 name_relation = self.pool[rr._table_name]._rec_name
1210 if isinstance(rr[name_relation], browse_record):
1211 rr = rr[name_relation]
1212 rr_name = self.pool[rr._table_name].name_get(cr, uid, [rr.id], context=context)
1213 rr_name = rr_name and rr_name[0] and rr_name[0][1] or ''
1214 dt += tools.ustr(rr_name or '') + ','
1215 data[fpos] = dt[:-1]
1225 if isinstance(r, browse_record):
1226 r = self.pool[r._table_name].name_get(cr, uid, [r.id], context=context)
1227 r = r and r[0] and r[0][1] or ''
1228 if raw_data and cols and cols._type in ('integer', 'boolean', 'float'):
1230 elif raw_data and cols and cols._type == 'date':
1231 data[fpos] = datetime.datetime.strptime(r, tools.DEFAULT_SERVER_DATE_FORMAT).date()
1232 elif raw_data and cols and cols._type == 'datetime':
1233 data[fpos] = datetime.datetime.strptime(r, tools.DEFAULT_SERVER_DATETIME_FORMAT)
1235 data[fpos] = tools.ustr(r or '')
1236 return [data] + lines
1238 def export_data(self, cr, uid, ids, fields_to_export, raw_data=False, context=None):
1240 Export fields for selected objects
1242 :param cr: database cursor
1243 :param uid: current user id
1244 :param ids: list of ids
1245 :param fields_to_export: list of fields
1246 :param raw_data: True to return value in fields type, False for string values
1247 :param context: context arguments, like lang, time zone
1248 :rtype: dictionary with a *datas* matrix
1250 This method is used when exporting data via client menu
1255 cols = self._columns.copy()
1256 for f in self._inherit_fields:
1257 cols.update({f: self._inherit_fields[f][2]})
1258 fields_to_export = map(fix_import_export_id_paths, fields_to_export)
1260 for row in self.browse(cr, uid, ids, context):
1261 datas += self.__export_row(cr, uid, row, fields_to_export, raw_data=raw_data, context=context)
1262 return {'datas': datas}
1264 def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
1267 Use :meth:`~load` instead
1269 Import given data in given module
1271 This method is used when importing data via client menu.
1273 Example of fields to import for a sale.order::
1276 partner_id, (=name_search)
1277 order_line/.id, (=database_id)
1279 order_line/product_id/id, (=xml id)
1280 order_line/price_unit,
1281 order_line/product_uom_qty,
1282 order_line/product_uom/id (=xml_id)
1284 This method returns a 4-tuple with the following structure::
1286 (return_code, errored_resource, error_message, unused)
1288 * The first item is a return code, it is ``-1`` in case of
1289 import error, or the last imported row number in case of success
1290 * The second item contains the record data dict that failed to import
1291 in case of error, otherwise it's 0
1292 * The third item contains an error message string in case of error,
1294 * The last item is currently unused, with no specific semantics
1296 :param fields: list of fields to import
1297 :param datas: data to import
1298 :param mode: 'init' or 'update' for record creation
1299 :param current_module: module name
1300 :param noupdate: flag for record creation
1301 :param filename: optional file to store partial import state for recovery
1302 :returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
1303 :rtype: (int, dict or 0, str or 0, str or 0)
1305 context = dict(context) if context is not None else {}
1306 context['_import_current_module'] = current_module
1308 fields = map(fix_import_export_id_paths, fields)
1309 ir_model_data_obj = self.pool.get('ir.model.data')
1312 if m['type'] == 'error':
1313 raise Exception(m['message'])
1315 if config.get('import_partial') and filename:
1316 with open(config.get('import_partial'), 'rb') as partial_import_file:
1317 data = pickle.load(partial_import_file)
1318 position = data.get(filename, 0)
1322 for res_id, xml_id, res, info in self._convert_records(cr, uid,
1323 self._extract_records(cr, uid, fields, datas,
1324 context=context, log=log),
1325 context=context, log=log):
1326 ir_model_data_obj._update(cr, uid, self._name,
1327 current_module, res, mode=mode, xml_id=xml_id,
1328 noupdate=noupdate, res_id=res_id, context=context)
1329 position = info.get('rows', {}).get('to', 0) + 1
1330 if config.get('import_partial') and filename and (not (position%100)):
1331 with open(config.get('import_partial'), 'rb') as partial_import:
1332 data = pickle.load(partial_import)
1333 data[filename] = position
1334 with open(config.get('import_partial'), 'wb') as partial_import:
1335 pickle.dump(data, partial_import)
1336 if context.get('defer_parent_store_computation'):
1337 self._parent_store_compute(cr)
1339 except Exception, e:
1341 return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
1343 if context.get('defer_parent_store_computation'):
1344 self._parent_store_compute(cr)
1345 return position, 0, 0, 0
1347 def load(self, cr, uid, fields, data, context=None):
1349 Attempts to load the data matrix, and returns a list of ids (or
1350 ``False`` if there was an error and no id could be generated) and a
1353 The ids are those of the records created and saved (in database), in
1354 the same order they were extracted from the file. They can be passed
1355 directly to :meth:`~read`
1357 :param fields: list of fields to import, at the same index as the corresponding data
1358 :type fields: list(str)
1359 :param data: row-major matrix of data to import
1360 :type data: list(list(str))
1361 :param dict context:
1362 :returns: {ids: list(int)|False, messages: [Message]}
1364 cr.execute('SAVEPOINT model_load')
1367 fields = map(fix_import_export_id_paths, fields)
1368 ModelData = self.pool['ir.model.data'].clear_caches()
1370 fg = self.fields_get(cr, uid, context=context)
1377 for id, xid, record, info in self._convert_records(cr, uid,
1378 self._extract_records(cr, uid, fields, data,
1379 context=context, log=messages.append),
1380 context=context, log=messages.append):
1382 cr.execute('SAVEPOINT model_load_save')
1383 except psycopg2.InternalError, e:
1384 # broken transaction, exit and hope the source error was
1386 if not any(message['type'] == 'error' for message in messages):
1387 messages.append(dict(info, type='error',message=
1388 u"Unknown database error: '%s'" % e))
1391 ids.append(ModelData._update(cr, uid, self._name,
1392 current_module, record, mode=mode, xml_id=xid,
1393 noupdate=noupdate, res_id=id, context=context))
1394 cr.execute('RELEASE SAVEPOINT model_load_save')
1395 except psycopg2.Warning, e:
1396 messages.append(dict(info, type='warning', message=str(e)))
1397 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1398 except psycopg2.Error, e:
1399 messages.append(dict(
1401 **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
1402 # Failed to write, log to messages, rollback savepoint (to
1403 # avoid broken transaction) and keep going
1404 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1405 except Exception, e:
1406 message = (_('Unknown error during import:') +
1407 u' %s: %s' % (type(e), unicode(e)))
1408 moreinfo = _('Resolve other errors first')
1409 messages.append(dict(info, type='error',
1412 # Failed for some reason, perhaps due to invalid data supplied,
1413 # rollback savepoint and keep going
1414 cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
1415 if any(message['type'] == 'error' for message in messages):
1416 cr.execute('ROLLBACK TO SAVEPOINT model_load')
1418 return {'ids': ids, 'messages': messages}
1419 def _extract_records(self, cr, uid, fields_, data,
1420 context=None, log=lambda a: None):
1421 """ Generates record dicts from the data sequence.
1423 The result is a generator of dicts mapping field names to raw
1424 (unconverted, unvalidated) values.
1426 For relational fields, if sub-fields were provided the value will be
1427 a list of sub-records
1429 The following sub-fields may be set on the record (by key):
1430 * None is the name_get for the record (to use with name_create/name_search)
1431 * "id" is the External ID for the record
1432 * ".id" is the Database ID for the record
1434 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1435 # Fake columns to avoid special cases in extractor
1436 columns[None] = fields.char('rec_name')
1437 columns['id'] = fields.char('External ID')
1438 columns['.id'] = fields.integer('Database ID')
1440 # m2o fields can't be on multiple lines so exclude them from the
1441 # is_relational field rows filter, but special-case it later on to
1442 # be handled with relational fields (as it can have subfields)
1443 is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
1444 get_o2m_values = itemgetter_tuple(
1445 [index for index, field in enumerate(fields_)
1446 if columns[field[0]]._type == 'one2many'])
1447 get_nono2m_values = itemgetter_tuple(
1448 [index for index, field in enumerate(fields_)
1449 if columns[field[0]]._type != 'one2many'])
1450 # Checks if the provided row has any non-empty non-relational field
1451 def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
1452 return any(g(row)) and not any(f(row))
1456 if index >= len(data): return
1459 # copy non-relational fields to record dict
1460 record = dict((field[0], value)
1461 for field, value in itertools.izip(fields_, row)
1462 if not is_relational(field[0]))
1464 # Get all following rows which have relational values attached to
1465 # the current record (no non-relational values)
1466 record_span = itertools.takewhile(
1467 only_o2m_values, itertools.islice(data, index + 1, None))
1468 # stitch record row back on for relational fields
1469 record_span = list(itertools.chain([row], record_span))
1470 for relfield in set(
1471 field[0] for field in fields_
1472 if is_relational(field[0])):
1473 column = columns[relfield]
1474 # FIXME: how to not use _obj without relying on fields_get?
1475 Model = self.pool[column._obj]
1477 # get only cells for this sub-field, should be strictly
1478 # non-empty, field path [None] is for name_get column
1479 indices, subfields = zip(*((index, field[1:] or [None])
1480 for index, field in enumerate(fields_)
1481 if field[0] == relfield))
1483 # return all rows which have at least one value for the
1484 # subfields of relfield
1485 relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
1486 record[relfield] = [subrecord
1487 for subrecord, _subinfo in Model._extract_records(
1488 cr, uid, subfields, relfield_data,
1489 context=context, log=log)]
1491 yield record, {'rows': {
1493 'to': index + len(record_span) - 1
1495 index += len(record_span)
1496 def _convert_records(self, cr, uid, records,
1497 context=None, log=lambda a: None):
1498 """ Converts records from the source iterable (recursive dicts of
1499 strings) into forms which can be written to the database (via
1500 self.create or (ir.model.data)._update)
1502 :returns: a list of triplets of (id, xid, record)
1503 :rtype: list((int|None, str|None, dict))
1505 if context is None: context = {}
1506 Converter = self.pool['ir.fields.converter']
1507 columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
1508 Translation = self.pool['ir.translation']
1510 (f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
1511 context.get('lang'))
1513 for f, column in columns.iteritems())
1515 convert = Converter.for_model(cr, uid, self, context=context)
1517 def _log(base, field, exception):
1518 type = 'warning' if isinstance(exception, Warning) else 'error'
1519 # logs the logical (not human-readable) field name for automated
1520 # processing of response, but injects human readable in message
1521 record = dict(base, type=type, field=field,
1522 message=unicode(exception.args[0]) % base)
1523 if len(exception.args) > 1 and exception.args[1]:
1524 record.update(exception.args[1])
1527 stream = CountingStream(records)
1528 for record, extras in stream:
1531 # name_get/name_create
1532 if None in record: pass
1539 dbid = int(record['.id'])
1541 # in case of overridden id column
1542 dbid = record['.id']
1543 if not self.search(cr, uid, [('id', '=', dbid)], context=context):
1546 record=stream.index,
1548 message=_(u"Unknown database identifier '%s'") % dbid))
1551 converted = convert(record, lambda field, err:\
1552 _log(dict(extras, record=stream.index, field=field_names[field]), field, err))
1554 yield dbid, xid, converted, dict(extras, record=stream.index)
1556 def get_invalid_fields(self, cr, uid):
1557 return list(self._invalids)
1559 def _validate(self, cr, uid, ids, context=None):
1560 context = context or {}
1561 lng = context.get('lang')
1562 trans = self.pool.get('ir.translation')
1564 for constraint in self._constraints:
1565 fun, msg, fields = constraint
1567 # We don't pass around the context here: validation code
1568 # must always yield the same results.
1569 valid = fun(self, cr, uid, ids)
1571 except Exception, e:
1572 _logger.debug('Exception while validating constraint', exc_info=True)
1574 extra_error = tools.ustr(e)
1576 # Check presence of __call__ directly instead of using
1577 # callable() because it will be deprecated as of Python 3.0
1578 if hasattr(msg, '__call__'):
1579 tmp_msg = msg(self, cr, uid, ids, context=context)
1580 if isinstance(tmp_msg, tuple):
1581 tmp_msg, params = tmp_msg
1582 translated_msg = tmp_msg % params
1584 translated_msg = tmp_msg
1586 translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
1588 translated_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
1590 _("The field(s) `%s` failed against a constraint: %s") % (', '.join(fields), translated_msg)
1592 self._invalids.update(fields)
1594 raise except_orm('ValidateError', '\n'.join(error_msgs))
1596 self._invalids.clear()
1598 def default_get(self, cr, uid, fields_list, context=None):
1600 Returns default values for the fields in fields_list.
1602 :param fields_list: list of fields to get the default values for (example ['field1', 'field2',])
1603 :type fields_list: list
1604 :param context: optional context dictionary - it may contains keys for specifying certain options
1605 like ``context_lang`` (language) or ``context_tz`` (timezone) to alter the results of the call.
1606 It may contain keys in the form ``default_XXX`` (where XXX is a field name), to set
1607 or override a default value for a field.
1608 A special ``bin_size`` boolean flag may also be passed in the context to request the
1609 value of all fields.binary columns to be returned as the size of the binary instead of its
1610 contents. This can also be selectively overriden by passing a field-specific flag
1611 in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
1612 Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
1613 :return: dictionary of the default values (set on the object model class, through user preferences, or in the context)
1615 # trigger view init hook
1616 self.view_init(cr, uid, fields_list, context)
1622 # get the default values for the inherited fields
1623 for t in self._inherits.keys():
1624 defaults.update(self.pool[t].default_get(cr, uid, fields_list, context))
1626 # get the default values defined in the object
1627 for f in fields_list:
1628 if f in self._defaults:
1629 if callable(self._defaults[f]):
1630 defaults[f] = self._defaults[f](self, cr, uid, context)
1632 defaults[f] = self._defaults[f]
1634 fld_def = ((f in self._columns) and self._columns[f]) \
1635 or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
1638 if isinstance(fld_def, fields.property):
1639 property_obj = self.pool.get('ir.property')
1640 prop_value = property_obj.get(cr, uid, f, self._name, context=context)
1642 if isinstance(prop_value, (browse_record, browse_null)):
1643 defaults[f] = prop_value.id
1645 defaults[f] = prop_value
1647 if f not in defaults:
1650 # get the default values set by the user and override the default
1651 # values defined in the object
1652 ir_values_obj = self.pool.get('ir.values')
1653 res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
1654 for id, field, field_value in res:
1655 if field in fields_list:
1656 fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
1657 if fld_def._type == 'many2one':
1658 obj = self.pool[fld_def._obj]
1659 if not obj.search(cr, uid, [('id', '=', field_value or False)]):
1661 if fld_def._type == 'many2many':
1662 obj = self.pool[fld_def._obj]
1664 for i in range(len(field_value or [])):
1665 if not obj.search(cr, uid, [('id', '=',
1668 field_value2.append(field_value[i])
1669 field_value = field_value2
1670 if fld_def._type == 'one2many':
1671 obj = self.pool[fld_def._obj]
1673 for i in range(len(field_value or [])):
1674 field_value2.append({})
1675 for field2 in field_value[i]:
1676 if field2 in obj._columns.keys() and obj._columns[field2]._type == 'many2one':
1677 obj2 = self.pool[obj._columns[field2]._obj]
1678 if not obj2.search(cr, uid,
1679 [('id', '=', field_value[i][field2])]):
1681 elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type == 'many2one':
1682 obj2 = self.pool[obj._inherit_fields[field2][2]._obj]
1683 if not obj2.search(cr, uid,
1684 [('id', '=', field_value[i][field2])]):
1686 # TODO add test for many2many and one2many
1687 field_value2[i][field2] = field_value[i][field2]
1688 field_value = field_value2
1689 defaults[field] = field_value
1691 # get the default values from the context
1692 for key in context or {}:
1693 if key.startswith('default_') and (key[8:] in fields_list):
1694 defaults[key[8:]] = context[key]
1697 def fields_get_keys(self, cr, user, context=None):
1698 res = self._columns.keys()
1699 # TODO I believe this loop can be replace by
1700 # res.extend(self._inherit_fields.key())
1701 for parent in self._inherits:
1702 res.extend(self.pool[parent].fields_get_keys(cr, user, context))
1705 def _rec_name_fallback(self, cr, uid, context=None):
1706 rec_name = self._rec_name
1707 if rec_name not in self._columns:
1708 rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
1712 # Overload this method if you need a window title which depends on the context
1714 def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
1717 def user_has_groups(self, cr, uid, groups, context=None):
1718 """Return true if the user is at least member of one of the groups
1719 in groups_str. Typically used to resolve ``groups`` attribute
1720 in view and model definitions.
1722 :param str groups: comma-separated list of fully-qualified group
1723 external IDs, e.g.: ``base.group_user,base.group_system``
1724 :return: True if the current user is a member of one of the
1727 return any([self.pool.get('res.users').has_group(cr, uid, group_ext_id)
1728 for group_ext_id in groups.split(',')])
1730 def _get_default_form_view(self, cr, user, context=None):
1731 """ Generates a default single-line form view using all fields
1732 of the current model except the m2m and o2m ones.
1734 :param cr: database cursor
1735 :param int user: user id
1736 :param dict context: connection context
1737 :returns: a form view as an lxml document
1738 :rtype: etree._Element
1740 view = etree.Element('form', string=self._description)
1741 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
1742 for field, descriptor in self.fields_get(cr, user, context=context).iteritems():
1743 if descriptor['type'] in ('one2many', 'many2many'):
1745 etree.SubElement(view, 'field', name=field)
1746 if descriptor['type'] == 'text':
1747 etree.SubElement(view, 'newline')
1750 def _get_default_search_view(self, cr, user, context=None):
1751 """ Generates a single-field search view, based on _rec_name.
1753 :param cr: database cursor
1754 :param int user: user id
1755 :param dict context: connection context
1756 :returns: a tree view as an lxml document
1757 :rtype: etree._Element
1759 view = etree.Element('search', string=self._description)
1760 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1763 def _get_default_tree_view(self, cr, user, context=None):
1764 """ Generates a single-field tree view, based on _rec_name.
1766 :param cr: database cursor
1767 :param int user: user id
1768 :param dict context: connection context
1769 :returns: a tree view as an lxml document
1770 :rtype: etree._Element
1772 view = etree.Element('tree', string=self._description)
1773 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1776 def _get_default_calendar_view(self, cr, user, context=None):
1777 """ Generates a default calendar view by trying to infer
1778 calendar fields from a number of pre-set attribute names
1780 :param cr: database cursor
1781 :param int user: user id
1782 :param dict context: connection context
1783 :returns: a calendar view
1784 :rtype: etree._Element
1786 def set_first_of(seq, in_, to):
1787 """Sets the first value of ``seq`` also found in ``in_`` to
1788 the ``to`` attribute of the view being closed over.
1790 Returns whether it's found a suitable value (and set it on
1791 the attribute) or not
1799 view = etree.Element('calendar', string=self._description)
1800 etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
1802 if self._date_name not in self._columns:
1804 for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
1805 if dt in self._columns:
1806 self._date_name = dt
1811 raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
1812 view.set('date_start', self._date_name)
1814 set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
1815 self._columns, 'color')
1817 if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
1818 self._columns, 'date_stop'):
1819 if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
1820 self._columns, 'date_delay'):
1822 _('Invalid Object Architecture!'),
1823 _("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
1827 def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
1829 Get the detailed composition of the requested view like fields, model, view architecture
1831 :param view_id: id of the view or None
1832 :param view_type: type of the view to return if view_id is None ('form', tree', ...)
1833 :param toolbar: true to include contextual actions
1834 :param submenu: deprecated
1835 :return: dictionary describing the composition of the requested view (including inherited views and extensions)
1836 :raise AttributeError:
1837 * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
1838 * if some tag other than 'position' is found in parent view
1839 :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
1843 View = self.pool['ir.ui.view']
1846 'model': self._name,
1847 'field_parent': False,
1850 # try to find a view_id if none provided
1852 # <view_type>_view_ref in context can be used to overrride the default view
1853 view_ref_key = view_type + '_view_ref'
1854 view_ref = context.get(view_ref_key)
1857 module, view_ref = view_ref.split('.', 1)
1858 cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
1859 view_ref_res = cr.fetchone()
1861 view_id = view_ref_res[0]
1863 _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
1864 'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
1868 # otherwise try to find the lowest priority matching ir.ui.view
1869 view_id = View.default_view(cr, uid, self._name, view_type, context=context)
1871 # context for post-processing might be overriden
1874 # read the view with inherited views applied
1875 root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
1876 result['arch'] = root_view['arch']
1877 result['name'] = root_view['name']
1878 result['type'] = root_view['type']
1879 result['view_id'] = root_view['id']
1880 result['field_parent'] = root_view['field_parent']
1881 # override context fro postprocessing
1882 if root_view.get('model') != self._name:
1883 ctx = dict(context, base_model_name=root_view.get('model'))
1885 # fallback on default views methods if no ir.ui.view could be found
1887 get_func = getattr(self, '_get_default_%s_view' % view_type)
1888 arch_etree = get_func(cr, uid, context)
1889 result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
1890 result['type'] = view_type
1891 result['name'] = 'default'
1892 except AttributeError:
1893 raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
1895 # Apply post processing, groups and modifiers etc...
1896 xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
1897 result['arch'] = xarch
1898 result['fields'] = xfields
1900 # Add related action information if aksed
1902 toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
1908 ir_values_obj = self.pool.get('ir.values')
1909 resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
1910 resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
1911 resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
1912 resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
1913 resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
1914 #When multi="True" set it will display only in More of the list view
1915 resrelate = [clean(action) for action in resrelate
1916 if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
1918 for x in itertools.chain(resprint, resaction, resrelate):
1919 x['string'] = x['name']
1921 result['toolbar'] = {
1923 'action': resaction,
1928 def get_formview_id(self, cr, uid, id, context=None):
1929 """ Return an view id to open the document with. This method is meant to be
1930 overridden in addons that want to give specific view ids for example.
1932 :param int id: id of the document to open
1936 def get_formview_action(self, cr, uid, id, context=None):
1937 """ Return an action to open the document. This method is meant to be
1938 overridden in addons that want to give specific view ids for example.
1940 :param int id: id of the document to open
1942 view_id = self.get_formview_id(cr, uid, id, context=context)
1944 'type': 'ir.actions.act_window',
1945 'res_model': self._name,
1946 'view_type': 'form',
1947 'view_mode': 'form',
1948 'views': [(view_id, 'form')],
1949 'target': 'current',
1953 def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
1954 return self.pool['ir.ui.view'].postprocess_and_fields(
1955 cr, uid, self._name, node, view_id, context=context)
1957 def search_count(self, cr, user, args, context=None):
1958 res = self.search(cr, user, args, context=context, count=True)
1959 if isinstance(res, list):
1963 def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
1965 Search for records based on a search domain.
1967 :param cr: database cursor
1968 :param user: current user id
1969 :param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
1970 :param offset: optional number of results to skip in the returned values (default: 0)
1971 :param limit: optional max number of records to return (default: **None**)
1972 :param order: optional columns to sort by (default: self._order=id )
1973 :param context: optional context arguments, like lang, time zone
1974 :type context: dictionary
1975 :param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
1976 :return: id or list of ids of records matching the criteria
1977 :rtype: integer or list of integers
1978 :raise AccessError: * if user tries to bypass access rules for read on the requested object.
1980 **Expressing a search domain (args)**
1982 Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
1984 * **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
1985 * **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
1986 The semantics of most of these operators are obvious.
1987 The ``child_of`` operator will look for records who are children or grand-children of a given record,
1988 according to the semantics of this model (i.e following the relationship field named by
1989 ``self._parent_name``, by default ``parent_id``.
1990 * **value** must be a valid value to compare with the values of **field_name**, depending on its type.
1992 Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
1993 These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
1994 Be very careful about this when you combine them the first time.
1996 Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
1998 [('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
2000 The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
2002 (name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
2005 return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
2007 def name_get(self, cr, user, ids, context=None):
2008 """Returns the preferred display value (text representation) for the records with the
2009 given ``ids``. By default this will be the value of the ``name`` column, unless
2010 the model implements a custom behavior.
2011 Can sometimes be seen as the inverse function of :meth:`~.name_search`, but it is not
2015 :return: list of pairs ``(id,text_repr)`` for all records with the given ``ids``.
2019 if isinstance(ids, (int, long)):
2022 if self._rec_name in self._all_columns:
2023 rec_name_column = self._all_columns[self._rec_name].column
2024 return [(r['id'], rec_name_column.as_display_name(cr, user, self, r[self._rec_name], context=context))
2025 for r in self.read(cr, user, ids, [self._rec_name],
2026 load='_classic_write', context=context)]
2027 return [(id, "%s,%s" % (self._name, id)) for id in ids]
2029 def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
2030 """Search for records that have a display name matching the given ``name`` pattern if compared
2031 with the given ``operator``, while also matching the optional search domain (``args``).
2032 This is used for example to provide suggestions based on a partial value for a relational
2034 Sometimes be seen as the inverse function of :meth:`~.name_get`, but it is not
2037 This method is equivalent to calling :meth:`~.search` with a search domain based on ``name``
2038 and then :meth:`~.name_get` on the result of the search.
2040 :param list args: optional search domain (see :meth:`~.search` for syntax),
2041 specifying further restrictions
2042 :param str operator: domain operator for matching the ``name`` pattern, such as ``'like'``
2044 :param int limit: optional max number of records to return
2046 :return: list of pairs ``(id,text_repr)`` for all matching records.
2048 return self._name_search(cr, user, name, args, operator, context, limit)
2050 def name_create(self, cr, uid, name, context=None):
2051 """Creates a new record by calling :meth:`~.create` with only one
2052 value provided: the name of the new record (``_rec_name`` field).
2053 The new record will also be initialized with any default values applicable
2054 to this model, or provided through the context. The usual behavior of
2055 :meth:`~.create` applies.
2056 Similarly, this method may raise an exception if the model has multiple
2057 required fields and some do not have default values.
2059 :param name: name of the record to create
2062 :return: the :meth:`~.name_get` pair value for the newly-created record.
2064 rec_id = self.create(cr, uid, {self._rec_name: name}, context)
2065 return self.name_get(cr, uid, [rec_id], context)[0]
2067 # private implementation of name_search, allows passing a dedicated user for the name_get part to
2068 # solve some access rights issues
2069 def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
2075 # optimize out the default criterion of ``ilike ''`` that matches everything
2076 if not (name == '' and operator == 'ilike'):
2077 args += [(self._rec_name, operator, name)]
2078 access_rights_uid = name_get_uid or user
2079 ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
2080 res = self.name_get(cr, access_rights_uid, ids, context)
2083 def read_string(self, cr, uid, id, langs, fields=None, context=None):
2086 self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
2088 fields = self._columns.keys() + self._inherit_fields.keys()
2089 #FIXME: collect all calls to _get_source into one SQL call.
2091 res[lang] = {'code': lang}
2093 if f in self._columns:
2094 res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
2096 res[lang][f] = res_trans
2098 res[lang][f] = self._columns[f].string
2099 for table in self._inherits:
2100 cols = intersect(self._inherit_fields.keys(), fields)
2101 res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
2104 res[lang]['code'] = lang
2105 for f in res2[lang]:
2106 res[lang][f] = res2[lang][f]
2109 def write_string(self, cr, uid, id, langs, vals, context=None):
2110 self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
2111 #FIXME: try to only call the translation in one SQL
2114 if field in self._columns:
2115 src = self._columns[field].string
2116 self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
2117 for table in self._inherits:
2118 cols = intersect(self._inherit_fields.keys(), vals)
2120 self.pool[table].write_string(cr, uid, id, langs, vals, context)
2123 def _add_missing_default_values(self, cr, uid, values, context=None):
2124 missing_defaults = []
2125 avoid_tables = [] # avoid overriding inherited values when parent is set
2126 for tables, parent_field in self._inherits.items():
2127 if parent_field in values:
2128 avoid_tables.append(tables)
2129 for field in self._columns.keys():
2130 if not field in values:
2131 missing_defaults.append(field)
2132 for field in self._inherit_fields.keys():
2133 if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
2134 missing_defaults.append(field)
2136 if len(missing_defaults):
2137 # override defaults with the provided values, never allow the other way around
2138 defaults = self.default_get(cr, uid, missing_defaults, context)
2140 if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
2141 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
2142 and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
2143 defaults[dv] = [(6, 0, defaults[dv])]
2144 if (dv in self._columns and self._columns[dv]._type == 'one2many' \
2145 or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
2146 and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
2147 defaults[dv] = [(0, 0, x) for x in defaults[dv]]
2148 defaults.update(values)
2152 def clear_caches(self):
2153 """ Clear the caches
2155 This clears the caches associated to methods decorated with
2156 ``tools.ormcache`` or ``tools.ormcache_multi``.
2159 getattr(self, '_ormcache')
2161 self.pool._any_cache_cleared = True
2162 except AttributeError:
2166 def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,
2167 read_group_result, read_group_order=None, context=None):
2168 """Helper method for filling in empty groups for all possible values of
2169 the field being grouped by"""
2171 # self._group_by_full should map groupable fields to a method that returns
2172 # a list of all aggregated values that we want to display for this field,
2173 # in the form of a m2o-like pair (key,label).
2174 # This is useful to implement kanban views for instance, where all columns
2175 # should be displayed even if they don't contain any record.
2177 # Grab the list of all groups that should be displayed, including all present groups
2178 present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
2179 all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
2180 read_group_order=read_group_order,
2181 access_rights_uid=openerp.SUPERUSER_ID,
2184 result_template = dict.fromkeys(aggregated_fields, False)
2185 result_template[groupby + '_count'] = 0
2186 if remaining_groupbys:
2187 result_template['__context'] = {'group_by': remaining_groupbys}
2189 # Merge the left_side (current results as dicts) with the right_side (all
2190 # possible values as m2o pairs). Both lists are supposed to be using the
2191 # same ordering, and can be merged in one pass.
2194 def append_left(left_side):
2195 grouped_value = left_side[groupby] and left_side[groupby][0]
2196 if not grouped_value in known_values:
2197 result.append(left_side)
2198 known_values[grouped_value] = left_side
2200 count_attr = groupby + '_count'
2201 known_values[grouped_value].update({count_attr: left_side[count_attr]})
2202 def append_right(right_side):
2203 grouped_value = right_side[0]
2204 if not grouped_value in known_values:
2205 line = dict(result_template)
2206 line[groupby] = right_side
2207 line['__domain'] = [(groupby,'=',grouped_value)] + domain
2209 known_values[grouped_value] = line
2210 while read_group_result or all_groups:
2211 left_side = read_group_result[0] if read_group_result else None
2212 right_side = all_groups[0] if all_groups else None
2213 assert left_side is None or left_side[groupby] is False \
2214 or isinstance(left_side[groupby], (tuple,list)), \
2215 'M2O-like pair expected, got %r' % left_side[groupby]
2216 assert right_side is None or isinstance(right_side, (tuple,list)), \
2217 'M2O-like pair expected, got %r' % right_side
2218 if left_side is None:
2219 append_right(all_groups.pop(0))
2220 elif right_side is None:
2221 append_left(read_group_result.pop(0))
2222 elif left_side[groupby] == right_side:
2223 append_left(read_group_result.pop(0))
2224 all_groups.pop(0) # discard right_side
2225 elif not left_side[groupby] or not left_side[groupby][0]:
2226 # left side == "Undefined" entry, not present on right_side
2227 append_left(read_group_result.pop(0))
2229 append_right(all_groups.pop(0))
2233 r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
2236 def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
2238 Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
2239 to the query if order should be computed against m2o field.
2240 :param orderby: the orderby definition in the form "%(field)s %(order)s"
2241 :param aggregated_fields: list of aggregated fields in the query
2242 :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
2243 These dictionaries contains the qualified name of each groupby
2244 (fully qualified SQL name for the corresponding field),
2245 and the (non raw) field name.
2246 :param osv.Query query: the query under construction
2247 :return: (groupby_terms, orderby_terms)
2250 groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
2251 groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
2253 return groupby_terms, orderby_terms
2255 self._check_qorder(orderby)
2256 for order_part in orderby.split(','):
2257 order_split = order_part.split()
2258 order_field = order_split[0]
2259 if order_field in groupby_fields:
2261 if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
2262 order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
2264 orderby_terms.append(order_clause)
2265 groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
2267 order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
2268 orderby_terms.append(order)
2269 elif order_field in aggregated_fields:
2270 orderby_terms.append(order_part)
2272 # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
2273 _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
2274 self._name, order_part)
2275 return groupby_terms, orderby_terms
2277 def _read_group_process_groupby(self, gb, query, context):
2279 Helper method to collect important information about groupbys: raw
2280 field name, type, time informations, qualified name, ...
2282 split = gb.split(':')
2283 field_type = self._all_columns[split[0]].column._type
2284 gb_function = split[1] if len(split) == 2 else None
2285 temporal = field_type in ('date', 'datetime')
2286 tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
2287 qualified_field = self._inherits_join_calc(split[0], query)
2290 'day': 'dd MMM YYYY',
2291 'week': "'W'w YYYY",
2292 'month': 'MMMM YYYY',
2293 'quarter': 'QQQ YYYY',
2297 'day': dateutil.relativedelta.relativedelta(days=1),
2298 'week': datetime.timedelta(days=7),
2299 'month': dateutil.relativedelta.relativedelta(months=1),
2300 'quarter': dateutil.relativedelta.relativedelta(months=3),
2301 'year': dateutil.relativedelta.relativedelta(years=1)
2304 qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
2305 qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
2306 if field_type == 'boolean':
2307 qualified_field = "coalesce(%s,false)" % qualified_field
2312 'display_format': display_formats[gb_function or 'month'] if temporal else None,
2313 'interval': time_intervals[gb_function or 'month'] if temporal else None,
2314 'tz_convert': tz_convert,
2315 'qualified_field': qualified_field
2318 def _read_group_prepare_data(self, key, value, groupby_dict, context):
2320 Helper method to sanitize the data received by read_group. The None
2321 values are converted to False, and the date/datetime are formatted,
2322 and corrected according to the timezones.
2324 value = False if value is None else value
2325 gb = groupby_dict.get(key)
2326 if gb and gb['type'] in ('date', 'datetime') and value:
2327 if isinstance(value, basestring):
2328 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2329 value = datetime.datetime.strptime(value, dt_format)
2330 if gb['tz_convert']:
2331 value = pytz.timezone(context['tz']).localize(value)
2334 def _read_group_get_domain(self, groupby, value):
2336 Helper method to construct the domain corresponding to a groupby and
2337 a given value. This is mostly relevant for date/datetime.
2339 if groupby['type'] in ('date', 'datetime') and value:
2340 dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2341 domain_dt_begin = value
2342 domain_dt_end = value + groupby['interval']
2343 if groupby['tz_convert']:
2344 domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
2345 domain_dt_end = domain_dt_end.astimezone(pytz.utc)
2346 return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
2347 (groupby['field'], '<', domain_dt_end.strftime(dt_format))]
2348 if groupby['type'] == 'many2one' and value:
2350 return [(groupby['field'], '=', value)]
2352 def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
2354 Helper method to format the data contained in the dictianary data by
2355 adding the domain corresponding to its values, the groupbys in the
2356 context and by properly formatting the date/datetime values.
2358 domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
2359 for k,v in data.iteritems():
2360 gb = groupby_dict.get(k)
2361 if gb and gb['type'] in ('date', 'datetime') and v:
2362 data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
2364 data['__domain'] = domain_group + domain
2365 if len(groupby) - len(annotated_groupbys) >= 1:
2366 data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
2370 def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
2372 Get the list of records in list view grouped by the given ``groupby`` fields
2374 :param cr: database cursor
2375 :param uid: current user id
2376 :param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
2377 :param list fields: list of fields present in the list view specified on the object
2378 :param list groupby: list of groupby descriptions by which the records will be grouped.
2379 A groupby description is either a field (then it will be grouped by that field)
2380 or a string 'field:groupby_function'. Right now, the only functions supported
2381 are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
2382 date/datetime fields.
2383 :param int offset: optional number of records to skip
2384 :param int limit: optional max number of records to return
2385 :param dict context: context arguments, like lang, time zone.
2386 :param list orderby: optional ``order by`` specification, for
2387 overriding the natural sort ordering of the
2388 groups, see also :py:meth:`~osv.osv.osv.search`
2389 (supported only for many2one fields currently)
2390 :param bool lazy: if true, the results are only grouped by the first groupby and the
2391 remaining groupbys are put in the __context key. If false, all the groupbys are
2393 :return: list of dictionaries(one dictionary for each record) containing:
2395 * the values of fields grouped by the fields in ``groupby`` argument
2396 * __domain: list of tuples specifying the search criteria
2397 * __context: dictionary with argument like ``groupby``
2398 :rtype: [{'field_name_1': value, ...]
2399 :raise AccessError: * if user has no read rights on the requested object
2400 * if user tries to bypass access rules for read on the requested object
2404 self.check_access_rights(cr, uid, 'read')
2405 query = self._where_calc(cr, uid, domain, context=context)
2406 fields = fields or self._columns.keys()
2408 groupby = [groupby] if isinstance(groupby, basestring) else groupby
2409 groupby_list = groupby[:1] if lazy else groupby
2410 annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
2411 for gb in groupby_list]
2412 groupby_fields = [g['field'] for g in annotated_groupbys]
2413 order = orderby or ','.join([g for g in groupby_list])
2414 groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
2416 self._apply_ir_rules(cr, uid, query, 'read', context=context)
2417 for gb in groupby_fields:
2418 assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
2419 groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
2420 assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
2421 if not (gb in self._all_columns):
2422 # Don't allow arbitrary values, as this would be a SQL injection vector!
2423 raise except_orm(_('Invalid group_by'),
2424 _('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
2426 aggregated_fields = [
2428 if f not in ('id', 'sequence')
2429 if f not in groupby_fields
2430 if self._all_columns[f].column._type in ('integer', 'float')
2431 if getattr(self._all_columns[f].column, '_classic_write')]
2433 field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
2434 select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
2436 for gb in annotated_groupbys:
2437 select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
2439 groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
2440 from_clause, where_clause, where_clause_params = query.get_sql()
2441 if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
2442 count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
2446 prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
2447 prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
2450 SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count %(extra_fields)s
2458 'table': self._table,
2459 'count_field': count_field,
2460 'extra_fields': prefix_terms(',', select_terms),
2461 'from': from_clause,
2462 'where': prefix_term('WHERE', where_clause),
2463 'groupby': prefix_terms('GROUP BY', groupby_terms),
2464 'orderby': prefix_terms('ORDER BY', orderby_terms),
2465 'limit': prefix_term('LIMIT', int(limit) if limit else None),
2466 'offset': prefix_term('OFFSET', int(offset) if limit else None),
2468 cr.execute(query, where_clause_params)
2469 fetched_data = cr.dictfetchall()
2471 if not groupby_fields:
2474 many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
2476 data_ids = [r['id'] for r in fetched_data]
2477 many2onefields = list(set(many2onefields))
2478 data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
2479 for d in fetched_data:
2480 d.update(data_dict[d['id']])
2482 data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
2483 result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
2484 if lazy and groupby_fields[0] in self._group_by_full:
2485 # Right now, read_group only fill results in lazy mode (by default).
2486 # If you need to have the empty groups in 'eager' mode, then the
2487 # method _read_group_fill_results need to be completely reimplemented
2489 result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
2490 aggregated_fields, result, read_group_order=order,
2494 def _inherits_join_add(self, current_model, parent_model_name, query):
2496 Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
2497 :param current_model: current model object
2498 :param parent_model_name: name of the parent model for which the clauses should be added
2499 :param query: query object on which the JOIN should be added
2501 inherits_field = current_model._inherits[parent_model_name]
2502 parent_model = self.pool[parent_model_name]
2503 parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
2506 def _inherits_join_calc(self, field, query):
2508 Adds missing table select and join clause(s) to ``query`` for reaching
2509 the field coming from an '_inherits' parent table (no duplicates).
2511 :param field: name of inherited field to reach
2512 :param query: query object on which the JOIN should be added
2513 :return: qualified name of field, to be used in SELECT clause
2515 current_table = self
2516 parent_alias = '"%s"' % current_table._table
2517 while field in current_table._inherit_fields and not field in current_table._columns:
2518 parent_model_name = current_table._inherit_fields[field][0]
2519 parent_table = self.pool[parent_model_name]
2520 parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
2521 current_table = parent_table
2522 return '%s."%s"' % (parent_alias, field)
2524 def _parent_store_compute(self, cr):
2525 if not self._parent_store:
2527 _logger.info('Computing parent left and right for table %s...', self._table)
2528 def browse_rec(root, pos=0):
2530 where = self._parent_name+'='+str(root)
2532 where = self._parent_name+' IS NULL'
2533 if self._parent_order:
2534 where += ' order by '+self._parent_order
2535 cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
2537 for id in cr.fetchall():
2538 pos2 = browse_rec(id[0], pos2)
2539 cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
2541 query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
2542 if self._parent_order:
2543 query += ' order by ' + self._parent_order
2546 for (root,) in cr.fetchall():
2547 pos = browse_rec(root, pos)
2550 def _update_store(self, cr, f, k):
2551 _logger.info("storing computed values of fields.function '%s'", k)
2552 ss = self._columns[k]._symbol_set
2553 update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
2554 cr.execute('select id from '+self._table)
2555 ids_lst = map(lambda x: x[0], cr.fetchall())
2557 iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
2558 ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
2559 res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
2560 for key, val in res.items():
2563 # if val is a many2one, just write the ID
2564 if type(val) == tuple:
2566 if val is not False:
2567 cr.execute(update_query, (ss[1](val), key))
2569 def _check_selection_field_value(self, cr, uid, field, value, context=None):
2570 """Raise except_orm if value is not among the valid values for the selection field"""
2571 if self._columns[field]._type == 'reference':
2572 val_model, val_id_str = value.split(',', 1)
2575 val_id = long(val_id_str)
2579 raise except_orm(_('ValidateError'),
2580 _('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
2584 if isinstance(self._columns[field].selection, (tuple, list)):
2585 if val in dict(self._columns[field].selection):
2587 elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
2589 raise except_orm(_('ValidateError'),
2590 _('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field))
2592 def _check_removed_columns(self, cr, log=False):
2593 # iterate on the database columns to drop the NOT NULL constraints
2594 # of fields which were required but have been removed (or will be added by another module)
2595 columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
2596 columns += MAGIC_COLUMNS
2597 cr.execute("SELECT a.attname, a.attnotnull"
2598 " FROM pg_class c, pg_attribute a"
2599 " WHERE c.relname=%s"
2600 " AND c.oid=a.attrelid"
2601 " AND a.attisdropped=%s"
2602 " AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
2603 " AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
2605 for column in cr.dictfetchall():
2607 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
2608 column['attname'], self._table, self._name)
2609 if column['attnotnull']:
2610 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
2611 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2612 self._table, column['attname'])
2614 def _save_constraint(self, cr, constraint_name, type):
2616 Record the creation of a constraint for this model, to make it possible
2617 to delete it later when the module is uninstalled. Type can be either
2618 'f' or 'u' depending on the constraint being a foreign key or not.
2620 if not self._module:
2621 # no need to save constraints for custom models as they're not part
2624 assert type in ('f', 'u')
2626 SELECT 1 FROM ir_model_constraint, ir_module_module
2627 WHERE ir_model_constraint.module=ir_module_module.id
2628 AND ir_model_constraint.name=%s
2629 AND ir_module_module.name=%s
2630 """, (constraint_name, self._module))
2633 INSERT INTO ir_model_constraint
2634 (name, date_init, date_update, module, model, type)
2635 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2636 (SELECT id FROM ir_module_module WHERE name=%s),
2637 (SELECT id FROM ir_model WHERE model=%s), %s)""",
2638 (constraint_name, self._module, self._name, type))
2640 def _save_relation_table(self, cr, relation_table):
2642 Record the creation of a many2many for this model, to make it possible
2643 to delete it later when the module is uninstalled.
2646 SELECT 1 FROM ir_model_relation, ir_module_module
2647 WHERE ir_model_relation.module=ir_module_module.id
2648 AND ir_model_relation.name=%s
2649 AND ir_module_module.name=%s
2650 """, (relation_table, self._module))
2652 cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
2653 VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
2654 (SELECT id FROM ir_module_module WHERE name=%s),
2655 (SELECT id FROM ir_model WHERE model=%s))""",
2656 (relation_table, self._module, self._name))
2658 # checked version: for direct m2o starting from `self`
2659 def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
2660 assert self.is_transient() or not dest_model.is_transient(), \
2661 'Many2One relationships from non-transient Model to TransientModel are forbidden'
2662 if self.is_transient() and not dest_model.is_transient():
2663 # TransientModel relationships to regular Models are annoying
2664 # usually because they could block deletion due to the FKs.
2665 # So unless stated otherwise we default them to ondelete=cascade.
2666 ondelete = ondelete or 'cascade'
2667 fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
2668 self._foreign_keys.add(fk_def)
2669 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2671 # unchecked version: for custom cases, such as m2m relationships
2672 def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
2673 fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
2674 self._foreign_keys.add(fk_def)
2675 _schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
2677 def _drop_constraint(self, cr, source_table, constraint_name):
2678 cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
2680 def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
2681 # Find FK constraint(s) currently established for the m2o field,
2682 # and see whether they are stale or not
2683 cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
2684 cl2.relname as foreign_table
2685 FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
2686 pg_attribute as att1, pg_attribute as att2
2687 WHERE con.conrelid = cl1.oid
2688 AND cl1.relname = %s
2689 AND con.confrelid = cl2.oid
2690 AND array_lower(con.conkey, 1) = 1
2691 AND con.conkey[1] = att1.attnum
2692 AND att1.attrelid = cl1.oid
2693 AND att1.attname = %s
2694 AND array_lower(con.confkey, 1) = 1
2695 AND con.confkey[1] = att2.attnum
2696 AND att2.attrelid = cl2.oid
2697 AND att2.attname = %s
2698 AND con.contype = 'f'""", (source_table, source_field, 'id'))
2699 constraints = cr.dictfetchall()
2701 if len(constraints) == 1:
2702 # Is it the right constraint?
2704 if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
2705 or cons['foreign_table'] != dest_model._table:
2706 # Wrong FK: drop it and recreate
2707 _schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
2708 source_table, cons['constraint_name'])
2709 self._drop_constraint(cr, source_table, cons['constraint_name'])
2711 # it's all good, nothing to do!
2714 # Multiple FKs found for the same field, drop them all, and re-create
2715 for cons in constraints:
2716 _schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
2717 source_table, cons['constraint_name'])
2718 self._drop_constraint(cr, source_table, cons['constraint_name'])
2720 # (re-)create the FK
2721 self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
2725 def _auto_init(self, cr, context=None):
2728 Call _field_create and, unless _auto is False:
2730 - create the corresponding table in database for the model,
2731 - possibly add the parent columns in database,
2732 - possibly add the columns 'create_uid', 'create_date', 'write_uid',
2733 'write_date' in database if _log_access is True (the default),
2734 - report on database columns no more existing in _columns,
2735 - remove no more existing not null constraints,
2736 - alter existing database columns to match _columns,
2737 - create database tables to match _columns,
2738 - add database indices to match _columns,
2739 - save in self._foreign_keys a list a foreign keys to create (see
2743 self._foreign_keys = set()
2744 raise_on_invalid_object_name(self._name)
2747 store_compute = False
2749 update_custom_fields = context.get('update_custom_fields', False)
2750 self._field_create(cr, context=context)
2751 create = not self._table_exist(cr)
2755 self._create_table(cr)
2758 if self._parent_store:
2759 if not self._parent_columns_exist(cr):
2760 self._create_parent_columns(cr)
2761 store_compute = True
2763 # Create the create_uid, create_date, write_uid, write_date, columns if desired.
2764 if self._log_access:
2765 self._add_log_columns(cr)
2767 self._check_removed_columns(cr, log=False)
2769 # iterate on the "object columns"
2770 column_data = self._select_column_data(cr)
2772 for k, f in self._columns.iteritems():
2773 if k in MAGIC_COLUMNS:
2775 # Don't update custom (also called manual) fields
2776 if f.manual and not update_custom_fields:
2779 if isinstance(f, fields.one2many):
2780 self._o2m_raise_on_missing_reference(cr, f)
2782 elif isinstance(f, fields.many2many):
2783 self._m2m_raise_or_create_relation(cr, f)
2786 res = column_data.get(k)
2788 # The field is not found as-is in database, try if it
2789 # exists with an old name.
2790 if not res and hasattr(f, 'oldname'):
2791 res = column_data.get(f.oldname)
2793 cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
2795 column_data[k] = res
2796 _schema.debug("Table '%s': renamed column '%s' to '%s'",
2797 self._table, f.oldname, k)
2799 # The field already exists in database. Possibly
2800 # change its type, rename it, drop it or change its
2803 f_pg_type = res['typname']
2804 f_pg_size = res['size']
2805 f_pg_notnull = res['attnotnull']
2806 if isinstance(f, fields.function) and not f.store and\
2807 not getattr(f, 'nodrop', False):
2808 _logger.info('column %s (%s) converted to a function, removed from table %s',
2809 k, f.string, self._table)
2810 cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
2812 _schema.debug("Table '%s': dropped column '%s' with cascade",
2816 f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
2821 ('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
2822 ('varchar', 'text', 'TEXT', ''),
2823 ('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2824 ('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
2825 ('timestamp', 'date', 'date', '::date'),
2826 ('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2827 ('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
2829 if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
2831 with cr.savepoint():
2832 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
2833 except psycopg2.NotSupportedError:
2834 # In place alter table cannot be done because a view is depending of this field.
2835 # Do a manual copy. This will drop the view (that will be recreated later)
2836 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
2837 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
2838 cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
2839 cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
2841 _schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
2842 self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
2844 if (f_pg_type==c[0]) and (f._type==c[1]):
2845 if f_pg_type != f_obj_type:
2847 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
2848 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
2849 cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
2850 cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
2852 _schema.debug("Table '%s': column '%s' changed type from %s to %s",
2853 self._table, k, c[0], c[1])
2856 if f_pg_type != f_obj_type:
2860 newname = k + '_moved' + str(i)
2861 cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
2862 "WHERE c.relname=%s " \
2863 "AND a.attname=%s " \
2864 "AND c.oid=a.attrelid ", (self._table, newname))
2865 if not cr.fetchone()[0]:
2869 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2870 cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
2871 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2872 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2873 _schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
2874 self._table, k, f_pg_type, f._type, newname)
2876 # if the field is required and hasn't got a NOT NULL constraint
2877 if f.required and f_pg_notnull == 0:
2878 # set the field to the default value if any
2879 if k in self._defaults:
2880 if callable(self._defaults[k]):
2881 default = self._defaults[k](self, cr, SUPERUSER_ID, context)
2883 default = self._defaults[k]
2885 if default is not None:
2886 ss = self._columns[k]._symbol_set
2887 query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k)
2888 cr.execute(query, (ss[1](default),))
2889 # add the NOT NULL constraint
2892 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2894 _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
2897 msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
2898 "If you want to have it, you should update the records and execute manually:\n"\
2899 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2900 _schema.warning(msg, self._table, k, self._table, k)
2902 elif not f.required and f_pg_notnull == 1:
2903 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
2905 _schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
2908 indexname = '%s_%s_index' % (self._table, k)
2909 cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
2910 res2 = cr.dictfetchall()
2911 if not res2 and f.select:
2912 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2914 if f._type == 'text':
2915 # FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
2916 msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
2917 "This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
2918 " because there is a length limit for indexable btree values!\n"\
2919 "Use a search view instead if you simply want to make the field searchable."
2920 _schema.warning(msg, self._table, f._type, k)
2921 if res2 and not f.select:
2922 cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
2924 msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
2925 _schema.debug(msg, self._table, k, f._type)
2927 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2928 dest_model = self.pool[f._obj]
2929 if dest_model._table != 'ir_actions':
2930 self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
2932 # The field doesn't exist in database. Create it if necessary.
2934 if not isinstance(f, fields.function) or f.store:
2935 # add the missing field
2936 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
2937 cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
2938 _schema.debug("Table '%s': added column '%s' with definition=%s",
2939 self._table, k, get_pg_type(f)[1])
2942 if not create and k in self._defaults:
2943 if callable(self._defaults[k]):
2944 default = self._defaults[k](self, cr, SUPERUSER_ID, context)
2946 default = self._defaults[k]
2948 ss = self._columns[k]._symbol_set
2949 query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
2950 cr.execute(query, (ss[1](default),))
2952 _logger.debug("Table '%s': setting default value of new column %s", self._table, k)
2954 # remember the functions to call for the stored fields
2955 if isinstance(f, fields.function):
2957 if f.store is not True: # i.e. if f.store is a dict
2958 order = f.store[f.store.keys()[0]][2]
2959 todo_end.append((order, self._update_store, (f, k)))
2961 # and add constraints if needed
2962 if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
2963 if f._obj not in self.pool:
2964 raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
2965 dest_model = self.pool[f._obj]
2966 ref = dest_model._table
2967 # ir_actions is inherited so foreign key doesn't work on it
2968 if ref != 'ir_actions':
2969 self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
2971 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
2975 cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
2976 _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
2979 msg = "WARNING: unable to set column %s of table %s not null !\n"\
2980 "Try to re-run: openerp-server --update=module\n"\
2981 "If it doesn't work, update records and execute manually:\n"\
2982 "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
2983 _logger.warning(msg, k, self._table, self._table, k)
2987 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
2988 create = not bool(cr.fetchone())
2990 cr.commit() # start a new transaction
2993 self._add_sql_constraints(cr)
2996 self._execute_sql(cr)
2999 self._parent_store_compute(cr)
3004 def _auto_end(self, cr, context=None):
3005 """ Create the foreign keys recorded by _auto_init. """
3006 for t, k, r, d in self._foreign_keys:
3007 cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
3008 self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
3010 del self._foreign_keys
3013 def _table_exist(self, cr):
3014 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
3018 def _create_table(self, cr):
3019 cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
3020 cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
3021 _schema.debug("Table '%s': created", self._table)
3024 def _parent_columns_exist(self, cr):
3025 cr.execute("""SELECT c.relname
3026 FROM pg_class c, pg_attribute a
3027 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
3028 """, (self._table, 'parent_left'))
3032 def _create_parent_columns(self, cr):
3033 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
3034 cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
3035 if 'parent_left' not in self._columns:
3036 _logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
3038 _schema.debug("Table '%s': added column '%s' with definition=%s",
3039 self._table, 'parent_left', 'INTEGER')
3040 elif not self._columns['parent_left'].select:
3041 _logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
3043 if 'parent_right' not in self._columns:
3044 _logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
3046 _schema.debug("Table '%s': added column '%s' with definition=%s",
3047 self._table, 'parent_right', 'INTEGER')
3048 elif not self._columns['parent_right'].select:
3049 _logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
3051 if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
3052 _logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
3053 self._parent_name, self._name)
3058 def _add_log_columns(self, cr):
3059 for field, field_def in LOG_ACCESS_COLUMNS.iteritems():
3062 FROM pg_class c, pg_attribute a
3063 WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
3064 """, (self._table, field))
3066 cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def))
3068 _schema.debug("Table '%s': added column '%s' with definition=%s",
3069 self._table, field, field_def)
3072 def _select_column_data(self, cr):
3073 # attlen is the number of bytes necessary to represent the type when
3074 # the type has a fixed size. If the type has a varying size attlen is
3075 # -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
3076 cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
3077 "FROM pg_class c,pg_attribute a,pg_type t " \
3078 "WHERE c.relname=%s " \
3079 "AND c.oid=a.attrelid " \
3080 "AND a.atttypid=t.oid", (self._table,))
3081 return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
3084 def _o2m_raise_on_missing_reference(self, cr, f):
3085 # TODO this check should be a method on fields.one2many.
3086 if f._obj in self.pool:
3087 other = self.pool[f._obj]
3088 # TODO the condition could use fields_get_keys().
3089 if f._fields_id not in other._columns.keys():
3090 if f._fields_id not in other._inherit_fields.keys():
3091 raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
3093 def _m2m_raise_or_create_relation(self, cr, f):
3094 m2m_tbl, col1, col2 = f._sql_names(self)
3095 # do not create relations for custom fields as they do not belong to a module
3096 # they will be automatically removed when dropping the corresponding ir.model.field
3097 # table name for custom relation all starts with x_, see __init__
3098 if not m2m_tbl.startswith('x_'):
3099 self._save_relation_table(cr, m2m_tbl)
3100 cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
3101 if not cr.dictfetchall():
3102 if f._obj not in self.pool:
3103 raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
3104 dest_model = self.pool[f._obj]
3105 ref = dest_model._table
3106 cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
3107 # create foreign key references with ondelete=cascade, unless the targets are SQL views
3108 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
3109 if not cr.fetchall():
3110 self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
3111 cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
3112 if not cr.fetchall():
3113 self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
3115 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
3116 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
3117 cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
3119 _schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
3122 def _add_sql_constraints(self, cr):
3125 Modify this model's database table constraints so they match the one in
3129 def unify_cons_text(txt):
3130 return txt.lower().replace(', ',',').replace(' (','(')
3132 for (key, con, _) in self._sql_constraints:
3133 conname = '%s_%s' % (self._table, key)
3135 self._save_constraint(cr, conname, 'u')
3136 cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
3137 existing_constraints = cr.dictfetchall()
3141 'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
3142 'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
3143 self._table, conname, con),
3144 'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
3149 'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
3150 'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
3151 'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
3157 if not existing_constraints:
3158 # constraint does not exists:
3159 sql_actions['add']['execute'] = True
3160 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
3161 elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
3162 # constraint exists but its definition has changed:
3163 sql_actions['drop']['execute'] = True
3164 sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
3165 sql_actions['add']['execute'] = True
3166 sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
3168 # we need to add the constraint:
3169 sql_actions = [item for item in sql_actions.values()]
3170 sql_actions.sort(key=lambda x: x['order'])
3171 for sql_action in [action for action in sql_actions if action['execute']]:
3173 cr.execute(sql_action['query'])
3175 _schema.debug(sql_action['msg_ok'])
3177 _schema.warning(sql_action['msg_err'])
3181 def _execute_sql(self, cr):
3182 """ Execute the SQL code from the _sql attribute (if any)."""
3183 if hasattr(self, "_sql"):
3184 for line in self._sql.split(';'):
3185 line2 = line.replace('\n', '').strip()
3191 # Update objects that uses this one to update their _inherits fields
3194 def _inherits_reload_src(self):
3195 """ Recompute the _inherit_fields mapping on each _inherits'd child model."""
3196 for obj in self.pool.models.values():
3197 if self._name in obj._inherits:
3198 obj._inherits_reload()
3201 def _inherits_reload(self):
3202 """ Recompute the _inherit_fields mapping.
3204 This will also call itself on each inherits'd child model.
3208 for table in self._inherits:
3209 other = self.pool[table]
3210 for col in other._columns.keys():
3211 res[col] = (table, self._inherits[table], other._columns[col], table)
3212 for col in other._inherit_fields.keys():
3213 res[col] = (table, self._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
3214 self._inherit_fields = res
3215 self._all_columns = self._get_column_infos()
3216 self._inherits_reload_src()
3219 def _get_column_infos(self):
3220 """Returns a dict mapping all fields names (direct fields and
3221 inherited field via _inherits) to a ``column_info`` struct
3222 giving detailed columns """
3224 for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems():
3225 result[k] = fields.column_info(k, col, parent, m2o, original_parent)
3226 for k, col in self._columns.iteritems():
3227 result[k] = fields.column_info(k, col)
3231 def _inherits_check(self):
3232 for table, field_name in self._inherits.items():
3233 if field_name not in self._columns:
3234 _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
3235 self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
3236 required=True, ondelete="cascade")
3237 elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
3238 _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
3239 self._columns[field_name].required = True
3240 self._columns[field_name].ondelete = "cascade"
3243 def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
3244 """ Return the definition of each field.
3246 The returned value is a dictionary (indiced by field name) of
3247 dictionaries. The _inherits'd fields are included. The string, help,
3248 and selection (if present) attributes are translated.
3250 :param cr: database cursor
3251 :param user: current user id
3252 :param allfields: list of fields
3253 :param context: context arguments, like lang, time zone
3254 :return: dictionary of field dictionaries, each one describing a field of the business object
3255 :raise AccessError: * if user has no create/write rights on the requested object
3261 write_access = self.check_access_rights(cr, user, 'write', raise_exception=False) \
3262 or self.check_access_rights(cr, user, 'create', raise_exception=False)
3266 translation_obj = self.pool.get('ir.translation')
3267 for parent in self._inherits:
3268 res.update(self.pool[parent].fields_get(cr, user, allfields, context))
3270 for f, field in self._columns.iteritems():
3271 if (allfields and f not in allfields) or \
3272 (field.groups and not self.user_has_groups(cr, user, groups=field.groups, context=context)):
3275 res[f] = fields.field_to_dict(self, cr, user, field, context=context)
3277 if not write_access:
3278 res[f]['readonly'] = True
3279 res[f]['states'] = {}
3281 if 'lang' in context:
3282 if 'string' in res[f]:
3283 res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context['lang'])
3285 res[f]['string'] = res_trans
3286 if 'help' in res[f]:
3287 help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context['lang'])
3289 res[f]['help'] = help_trans
3293 def get_empty_list_help(self, cr, user, help, context=None):
3294 """ Generic method giving the help message displayed when having
3295 no result to display in a list or kanban view. By default it returns
3296 the help given in parameter that is generally the help message
3297 defined in the action.
3301 def check_field_access_rights(self, cr, user, operation, fields, context=None):
3303 Check the user access rights on the given fields. This raises Access
3304 Denied if the user does not have the rights. Otherwise it returns the
3305 fields (as is if the fields is not falsy, or the readable/writable
3306 fields if fields is falsy).
3309 """Predicate to test if the user has access to the given field name."""
3310 # Ignore requested field if it doesn't exist. This is ugly but
3311 # it seems to happen at least with 'name_alias' on res.partner.
3312 if field_name not in self._all_columns:
3314 field = self._all_columns[field_name].column
3315 if user != SUPERUSER_ID and field.groups:
3316 return self.user_has_groups(cr, user, groups=field.groups, context=context)
3320 fields = filter(p, self._all_columns.keys())
3322 filtered_fields = filter(lambda a: not p(a), fields)
3324 _logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s', operation, user, self._name, ', '.join(filtered_fields))
3327 _('The requested operation cannot be completed due to security restrictions. '
3328 'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3329 (self._description, operation))
3332 def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
3333 """ Read records with given ids with the given fields
3335 :param cr: database cursor
3336 :param user: current user id
3337 :param ids: id or list of the ids of the records to read
3338 :param fields: optional list of field names to return (default: all fields would be returned)
3339 :type fields: list (example ['field_name_1', ...])
3340 :param context: optional context dictionary - it may contains keys for specifying certain options
3341 like ``context_lang``, ``context_tz`` to alter the results of the call.
3342 A special ``bin_size`` boolean flag may also be passed in the context to request the
3343 value of all fields.binary columns to be returned as the size of the binary instead of its
3344 contents. This can also be selectively overriden by passing a field-specific flag
3345 in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
3346 Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
3347 :return: list of dictionaries((dictionary per record asked)) with requested field values
3348 :rtype: [{‘name_of_the_field’: value, ...}, ...]
3349 :raise AccessError: * if user has no read rights on the requested object
3350 * if user tries to bypass access rules for read on the requested object
3354 self.check_access_rights(cr, user, 'read')
3355 fields = self.check_field_access_rights(cr, user, 'read', fields)
3356 if isinstance(ids, (int, long)):
3360 select = map(lambda x: isinstance(x, dict) and x['id'] or x, select)
3361 result = self._read_flat(cr, user, select, fields, context, load)
3363 if isinstance(ids, (int, long)):
3364 return result and result[0] or False
3367 def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
3372 if fields_to_read is None:
3373 fields_to_read = self._columns.keys()
3375 fields_to_read = list(set(fields_to_read))
3377 # all inherited fields + all non inherited fields for which the attribute whose name is in load is True
3378 fields_pre = [f for f in fields_to_read if
3379 f == self.CONCURRENCY_CHECK_FIELD
3380 or (f in self._columns and getattr(self._columns[f], '_classic_write'))
3381 ] + self._inherits.values()
3385 def convert_field(f):
3386 f_qual = '%s."%s"' % (self._table, f) # need fully-qualified references in case len(tables) > 1
3387 if f in ('create_date', 'write_date'):
3388 return "date_trunc('second', %s) as %s" % (f_qual, f)
3389 if f == self.CONCURRENCY_CHECK_FIELD:
3390 if self._log_access:
3391 return "COALESCE(%s.write_date, %s.create_date, (now() at time zone 'UTC'))::timestamp AS %s" % (self._table, self._table, f,)
3392 return "(now() at time zone 'UTC')::timestamp AS %s" % (f,)
3393 if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
3394 return 'length(%s) as "%s"' % (f_qual, f)
3397 # Construct a clause for the security rules.
3398 # 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
3399 # or will at least contain self._table.
3400 rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
3402 fields_pre2 = map(convert_field, fields_pre)
3403 order_by = self._parent_order or self._order
3404 select_fields = ','.join(fields_pre2 + ['%s.id' % self._table])
3405 query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table)
3407 query += " AND " + (' OR '.join(rule_clause))
3408 query += " ORDER BY " + order_by
3409 for sub_ids in cr.split_for_in_conditions(ids):
3410 cr.execute(query, [tuple(sub_ids)] + rule_params)
3411 results = cr.dictfetchall()
3412 result_ids = [x['id'] for x in results]
3413 self._check_record_rules_result_count(cr, user, sub_ids, result_ids, 'read', context=context)
3416 self.check_access_rule(cr, user, ids, 'read', context=context)
3417 res = map(lambda x: {'id': x}, ids)
3419 if context.get('lang'):
3420 for f in fields_pre:
3421 if f == self.CONCURRENCY_CHECK_FIELD:
3423 if self._columns[f].translate:
3424 ids = [x['id'] for x in res]
3425 #TODO: optimize out of this loop
3426 res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context['lang'], ids)
3428 r[f] = res_trans.get(r['id'], False) or r[f]
3430 for table in self._inherits:
3431 col = self._inherits[table]
3432 cols = [x for x in intersect(self._inherit_fields.keys(), fields_to_read) if x not in self._columns.keys()]
3435 res2 = self.pool[table].read(cr, user, [x[col] for x in res], cols, context, load)
3443 if not record[col]: # if the record is deleted from _inherits table?
3445 record.update(res3[record[col]])
3446 if col not in fields_to_read:
3449 # all fields which need to be post-processed by a simple function (symbol_get)
3450 fields_post = filter(lambda x: x in self._columns and self._columns[x]._symbol_get, fields_to_read)
3453 for f in fields_post:
3454 r[f] = self._columns[f]._symbol_get(r[f])
3455 ids = [x['id'] for x in res]
3457 # all non inherited fields for which the attribute whose name is in load is False
3458 fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read)
3460 # Compute POST fields
3462 for f in fields_post:
3463 todo.setdefault(self._columns[f]._multi, [])
3464 todo[self._columns[f]._multi].append(f)
3465 for key, val in todo.items():
3467 res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res)
3468 assert res2 is not None, \
3469 'The function field "%s" on the "%s" model returned None\n' \
3470 '(a dictionary was expected).' % (val[0], self._name)
3473 if isinstance(res2[record['id']], str): res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6
3474 multi_fields = res2.get(record['id'],{})
3476 record[pos] = multi_fields.get(pos,[])
3479 res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
3482 record[f] = res2[record['id']]
3486 # Warn about deprecated fields now that fields_pre and fields_post are computed
3487 # Explicitly use list() because we may receive tuples
3488 for f in list(fields_pre) + list(fields_post):
3489 field_column = self._all_columns.get(f) and self._all_columns.get(f).column
3490 if field_column and field_column.deprecated:
3491 _logger.warning('Field %s.%s is deprecated: %s', self._name, f, field_column.deprecated)
3495 for field in vals.copy():
3497 if field in self._columns:
3498 fobj = self._columns[field]
3504 for group in groups:
3505 module = group.split(".")[0]
3506 grp = group.split(".")[1]
3507 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3508 (grp, module, 'res.groups', user))
3509 readonly = cr.fetchall()
3510 if readonly[0][0] >= 1:
3513 elif readonly[0][0] == 0:
3519 if type(vals[field]) == type([]):
3521 elif type(vals[field]) == type(0.0):
3523 elif type(vals[field]) == type(''):
3524 vals[field] = '=No Permission='
3528 if vals[field] is None:
3533 # TODO check READ access
3534 def perm_read(self, cr, user, ids, context=None, details=True):
3536 Returns some metadata about the given records.
3538 :param details: if True, \*_uid fields are replaced with the name of the user
3539 :return: list of ownership dictionaries for each requested record
3540 :rtype: list of dictionaries with the following keys:
3543 * create_uid: user who created the record
3544 * create_date: date when the record was created
3545 * write_uid: last user who changed the record
3546 * write_date: date of the last change to the record
3547 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3554 uniq = isinstance(ids, (int, long))
3558 if self._log_access:
3559 fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
3560 quoted_table = '"%s"' % self._table
3561 fields_str = ",".join('%s.%s'%(quoted_table, field) for field in fields)
3562 query = '''SELECT %s, __imd.module, __imd.name
3563 FROM %s LEFT JOIN ir_model_data __imd
3564 ON (__imd.model = %%s and __imd.res_id = %s.id)
3565 WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
3566 cr.execute(query, (self._name, tuple(ids)))
3567 res = cr.dictfetchall()
3570 r[key] = r[key] or False
3571 if details and key in ('write_uid', 'create_uid') and r[key]:
3573 r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0]
3575 pass # Leave the numeric uid there
3576 r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
3577 del r['name'], r['module']
3582 def _check_concurrency(self, cr, ids, context):
3585 if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
3587 check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
3588 for sub_ids in cr.split_for_in_conditions(ids):
3591 id_ref = "%s,%s" % (self._name, id)
3592 update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
3594 ids_to_check.extend([id, update_date])
3595 if not ids_to_check:
3597 cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
3600 # mention the first one only to keep the error message readable
3601 raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
3603 def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
3604 """Verify the returned rows after applying record rules matches
3605 the length of `ids`, and raise an appropriate exception if it does not.
3607 ids, result_ids = set(ids), set(result_ids)
3608 missing_ids = ids - result_ids
3610 # Attempt to distinguish record rule restriction vs deleted records,
3611 # to provide a more specific error message - check if the missinf
3612 cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
3613 forbidden_ids = [x[0] for x in cr.fetchall()]
3615 # the missing ids are (at least partially) hidden by access rules
3616 if uid == SUPERUSER_ID:
3618 _logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
3619 raise except_orm(_('Access Denied'),
3620 _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
3621 (self._description, operation))
3623 # If we get here, the missing_ids are not in the database
3624 if operation in ('read','unlink'):
3625 # No need to warn about deleting an already deleted record.
3626 # And no error when reading a record that was deleted, to prevent spurious
3627 # errors for non-transactional search/read sequences coming from clients
3629 _logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
3630 raise except_orm(_('Missing document(s)'),
3631 _('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
3634 def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
3635 """Verifies that the operation given by ``operation`` is allowed for the user
3636 according to the access rights."""
3637 return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
3639 def check_access_rule(self, cr, uid, ids, operation, context=None):
3640 """Verifies that the operation given by ``operation`` is allowed for the user
3641 according to ir.rules.
3643 :param operation: one of ``write``, ``unlink``
3644 :raise except_orm: * if current ir.rules do not permit this operation.
3645 :return: None if the operation is allowed
3647 if uid == SUPERUSER_ID:
3650 if self.is_transient():
3651 # Only one single implicit access rule for transient models: owner only!
3652 # This is ok to hardcode because we assert that TransientModels always
3653 # have log_access enabled so that the create_uid column is always there.
3654 # And even with _inherits, these fields are always present in the local
3655 # table too, so no need for JOINs.
3656 cr.execute("""SELECT distinct create_uid
3658 WHERE id IN %%s""" % self._table, (tuple(ids),))
3659 uids = [x[0] for x in cr.fetchall()]
3660 if len(uids) != 1 or uids[0] != uid:
3661 raise except_orm(_('Access Denied'),
3662 _('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
3664 where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
3666 where_clause = ' and ' + ' and '.join(where_clause)
3667 for sub_ids in cr.split_for_in_conditions(ids):
3668 cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
3669 ' WHERE ' + self._table + '.id IN %s' + where_clause,
3670 [sub_ids] + where_params)
3671 returned_ids = [x['id'] for x in cr.dictfetchall()]
3672 self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
3674 def create_workflow(self, cr, uid, ids, context=None):
3675 """Create a workflow instance for each given record IDs."""
3676 from openerp import workflow
3678 workflow.trg_create(uid, self._name, res_id, cr)
3681 def delete_workflow(self, cr, uid, ids, context=None):
3682 """Delete the workflow instances bound to the given record IDs."""
3683 from openerp import workflow
3685 workflow.trg_delete(uid, self._name, res_id, cr)
3688 def step_workflow(self, cr, uid, ids, context=None):
3689 """Reevaluate the workflow instances of the given record IDs."""
3690 from openerp import workflow
3692 workflow.trg_write(uid, self._name, res_id, cr)
3695 def signal_workflow(self, cr, uid, ids, signal, context=None):
3696 """Send given workflow signal and return a dict mapping ids to workflow results"""
3697 from openerp import workflow
3700 result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
3703 def redirect_workflow(self, cr, uid, old_new_ids, context=None):
3704 """ Rebind the workflow instance bound to the given 'old' record IDs to
3705 the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
3707 from openerp import workflow
3708 for old_id, new_id in old_new_ids:
3709 workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
3712 def unlink(self, cr, uid, ids, context=None):
3714 Delete records with given ids
3716 :param cr: database cursor
3717 :param uid: current user id
3718 :param ids: id or list of ids
3719 :param context: (optional) context arguments, like lang, time zone
3721 :raise AccessError: * if user has no unlink rights on the requested object
3722 * if user tries to bypass access rules for unlink on the requested object
3723 :raise UserError: if the record is default property for other records
3728 if isinstance(ids, (int, long)):
3731 result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
3733 self._check_concurrency(cr, ids, context)
3735 self.check_access_rights(cr, uid, 'unlink')
3737 ir_property = self.pool.get('ir.property')
3739 # Check if the records are used as default properties.
3740 domain = [('res_id', '=', False),
3741 ('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
3743 if ir_property.search(cr, uid, domain, context=context):
3744 raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
3746 # Delete the records' properties.
3747 property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
3748 ir_property.unlink(cr, uid, property_ids, context=context)
3750 self.delete_workflow(cr, uid, ids, context=context)
3752 self.check_access_rule(cr, uid, ids, 'unlink', context=context)
3753 pool_model_data = self.pool.get('ir.model.data')
3754 ir_values_obj = self.pool.get('ir.values')
3755 for sub_ids in cr.split_for_in_conditions(ids):
3756 cr.execute('delete from ' + self._table + ' ' \
3757 'where id IN %s', (sub_ids,))
3759 # Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
3760 # as these are not connected with real database foreign keys, and would be dangling references.
3761 # Note: following steps performed as admin to avoid access rights restrictions, and with no context
3762 # to avoid possible side-effects during admin calls.
3763 # Step 1. Calling unlink of ir_model_data only for the affected IDS
3764 reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
3765 # Step 2. Marching towards the real deletion of referenced records
3767 pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
3769 # For the same reason, removing the record relevant to ir_values
3770 ir_value_ids = ir_values_obj.search(cr, uid,
3771 ['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
3774 ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
3776 for order, obj_name, store_ids, fields in result_store:
3777 if obj_name == self._name:
3778 effective_store_ids = list(set(store_ids) - set(ids))
3780 effective_store_ids = store_ids
3781 if effective_store_ids:
3782 obj = self.pool[obj_name]
3783 cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
3784 rids = map(lambda x: x[0], cr.fetchall())
3786 obj._store_set_values(cr, uid, rids, fields, context)
3793 def write(self, cr, user, ids, vals, context=None):
3795 Update records with given ids with the given field values
3797 :param cr: database cursor
3798 :param user: current user id
3800 :param ids: object id or list of object ids to update according to **vals**
3801 :param vals: field values to update, e.g {'field_name': new_field_value, ...}
3802 :type vals: dictionary
3803 :param context: (optional) context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
3804 :type context: dictionary
3806 :raise AccessError: * if user has no write rights on the requested object
3807 * if user tries to bypass access rules for write on the requested object
3808 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
3809 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
3811 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
3813 + For a many2many field, a list of tuples is expected.
3814 Here is the list of tuple that are accepted, with the corresponding semantics ::
3816 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3817 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3818 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3819 (3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
3820 (4, ID) link to existing record with id = ID (adds a relationship)
3821 (5) unlink all (like using (3,ID) for all linked records)
3822 (6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
3825 [(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
3827 + For a one2many field, a lits of tuples is expected.
3828 Here is the list of tuple that are accepted, with the corresponding semantics ::
3830 (0, 0, { values }) link to a new record that needs to be created with the given values dictionary
3831 (1, ID, { values }) update the linked record with id = ID (write *values* on it)
3832 (2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
3835 [(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
3837 + For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
3838 + For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
3842 self.check_field_access_rights(cr, user, 'write', vals.keys())
3843 for field in vals.copy():
3845 if field in self._columns:
3846 fobj = self._columns[field]
3847 elif field in self._inherit_fields:
3848 fobj = self._inherit_fields[field][2]
3855 for group in groups:
3856 module = group.split(".")[0]
3857 grp = group.split(".")[1]
3858 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
3859 (grp, module, 'res.groups', user))
3860 readonly = cr.fetchall()
3861 if readonly[0][0] >= 1:
3872 if isinstance(ids, (int, long)):
3875 self._check_concurrency(cr, ids, context)
3876 self.check_access_rights(cr, user, 'write')
3878 result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
3880 # No direct update of parent_left/right
3881 vals.pop('parent_left', None)
3882 vals.pop('parent_right', None)
3884 parents_changed = []
3885 parent_order = self._parent_order or self._order
3886 if self._parent_store and (self._parent_name in vals):
3887 # The parent_left/right computation may take up to
3888 # 5 seconds. No need to recompute the values if the
3889 # parent is the same.
3890 # Note: to respect parent_order, nodes must be processed in
3891 # order, so ``parents_changed`` must be ordered properly.
3892 parent_val = vals[self._parent_name]
3894 query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
3895 (self._table, self._parent_name, self._parent_name, parent_order)
3896 cr.execute(query, (tuple(ids), parent_val))
3898 query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
3899 (self._table, self._parent_name, parent_order)
3900 cr.execute(query, (tuple(ids),))
3901 parents_changed = map(operator.itemgetter(0), cr.fetchall())
3908 totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
3910 field_column = self._all_columns.get(field) and self._all_columns.get(field).column
3911 if field_column and field_column.deprecated:
3912 _logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
3913 if field in self._columns:
3914 if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
3915 if (not totranslate) or not self._columns[field].translate:
3916 upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
3917 upd1.append(self._columns[field]._symbol_set[1](vals[field]))
3918 direct.append(field)
3920 upd_todo.append(field)
3922 updend.append(field)
3923 if field in self._columns \
3924 and hasattr(self._columns[field], 'selection') \
3926 self._check_selection_field_value(cr, user, field, vals[field], context=context)
3928 if self._log_access:
3929 upd0.append('write_uid=%s')
3930 upd0.append("write_date=(now() at time zone 'UTC')")
3934 self.check_access_rule(cr, user, ids, 'write', context=context)
3935 for sub_ids in cr.split_for_in_conditions(ids):
3936 cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
3937 'where id IN %s', upd1 + [sub_ids])
3938 if cr.rowcount != len(sub_ids):
3939 raise except_orm(_('AccessError'),
3940 _('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
3945 if self._columns[f].translate:
3946 src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
3949 # Inserting value to DB
3950 context_wo_lang = dict(context, lang=None)
3951 self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
3952 self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
3955 # call the 'set' method of fields which are not classic_write
3956 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
3958 # default element in context must be removed when call a one2many or many2many
3959 rel_context = context.copy()
3960 for c in context.items():
3961 if c[0].startswith('default_'):
3962 del rel_context[c[0]]
3964 for field in upd_todo:
3966 result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
3968 unknown_fields = updend[:]
3969 for table in self._inherits:
3970 col = self._inherits[table]
3972 for sub_ids in cr.split_for_in_conditions(ids):
3973 cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
3974 'where id IN %s', (sub_ids,))
3975 nids.extend([x[0] for x in cr.fetchall()])
3979 if self._inherit_fields[val][0] == table:
3981 unknown_fields.remove(val)
3983 self.pool[table].write(cr, user, nids, v, context)
3987 'No such field(s) in model %s: %s.',
3988 self._name, ', '.join(unknown_fields))
3989 self._validate(cr, user, ids, context)
3991 # TODO: use _order to set dest at the right position and not first node of parent
3992 # We can't defer parent_store computation because the stored function
3993 # fields that are computer may refer (directly or indirectly) to
3994 # parent_left/right (via a child_of domain)
3997 self.pool._init_parent[self._name] = True
3999 order = self._parent_order or self._order
4000 parent_val = vals[self._parent_name]
4002 clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
4004 clause, params = '%s IS NULL' % (self._parent_name,), ()
4006 for id in parents_changed:
4007 cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
4008 pleft, pright = cr.fetchone()
4009 distance = pright - pleft + 1
4011 # Positions of current siblings, to locate proper insertion point;
4012 # this can _not_ be fetched outside the loop, as it needs to be refreshed
4013 # after each update, in case several nodes are sequentially inserted one
4014 # next to the other (i.e computed incrementally)
4015 cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
4016 parents = cr.fetchall()
4018 # Find Position of the element
4020 for (parent_pright, parent_id) in parents:
4023 position = parent_pright and parent_pright + 1 or 1
4025 # It's the first node of the parent
4030 cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
4031 position = cr.fetchone()[0] + 1
4033 if pleft < position <= pright:
4034 raise except_orm(_('UserError'), _('Recursivity Detected.'))
4036 if pleft < position:
4037 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
4038 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
4039 cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
4041 cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
4042 cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
4043 cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
4045 result += self._store_get_values(cr, user, ids, vals.keys(), context)
4049 for order, model_name, ids_to_update, fields_to_recompute in result:
4050 key = (model_name, tuple(fields_to_recompute))
4051 done.setdefault(key, {})
4052 # avoid to do several times the same computation
4054 for id in ids_to_update:
4055 if id not in done[key]:
4056 done[key][id] = True
4058 self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
4060 self.step_workflow(cr, user, ids, context=context)
4064 # TODO: Should set perm to user.xxx
4066 def create(self, cr, user, vals, context=None):
4068 Create a new record for the model.
4070 The values for the new record are initialized using the ``vals``
4071 argument, and if necessary the result of ``default_get()``.
4073 :param cr: database cursor
4074 :param user: current user id
4076 :param vals: field values for new record, e.g {'field_name': field_value, ...}
4077 :type vals: dictionary
4078 :param context: optional context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
4079 :type context: dictionary
4080 :return: id of new record created
4081 :raise AccessError: * if user has no create rights on the requested object
4082 * if user tries to bypass access rules for create on the requested object
4083 :raise ValidateError: if user tries to enter invalid value for a field that is not in selection
4084 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
4086 **Note**: The type of field values to pass in ``vals`` for relationship fields is specific.
4087 Please see the description of the :py:meth:`~osv.osv.osv.write` method for details about the possible values and how
4094 if self.is_transient():
4095 self._transient_vacuum(cr, user)
4097 self.check_access_rights(cr, user, 'create')
4099 vals = self._add_missing_default_values(cr, user, vals, context)
4101 if self._log_access:
4102 for f in LOG_ACCESS_COLUMNS:
4103 if vals.pop(f, None) is not None:
4105 'Field `%s` is not allowed when creating the model `%s`.',
4109 for v in self._inherits:
4110 if self._inherits[v] not in vals:
4113 tocreate[v] = {'id': vals[self._inherits[v]]}
4116 # columns will contain a list of field defined as a tuple
4117 # tuple(field_name, format_string, field_value)
4118 # the tuple will be used by the string formatting for the INSERT
4120 ('id', "nextval('%s')" % self._sequence),
4125 for v in vals.keys():
4126 if v in self._inherit_fields and v not in self._columns:
4127 (table, col, col_detail, original_parent) = self._inherit_fields[v]
4128 tocreate[table][v] = vals[v]
4131 if (v not in self._inherit_fields) and (v not in self._columns):
4133 unknown_fields.append(v)
4136 'No such field(s) in model %s: %s.',
4137 self._name, ', '.join(unknown_fields))
4139 if not self._sequence:
4142 _('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.')
4145 for table in tocreate:
4146 if self._inherits[table] in vals:
4147 del vals[self._inherits[table]]
4149 record_id = tocreate[table].pop('id', None)
4151 # When linking/creating parent records, force context without 'no_store_function' key that
4152 # defers stored functions computing, as these won't be computed in batch at the end of create().
4153 parent_context = dict(context)
4154 parent_context.pop('no_store_function', None)
4156 if record_id is None or not record_id:
4157 record_id = self.pool[table].create(cr, user, tocreate[table], context=parent_context)
4159 self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
4161 columns.append((self._inherits[table], '%s', record_id))
4163 #Start : Set bool fields to be False if they are not touched(to make search more powerful)
4164 bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
4166 for bool_field in bool_fields:
4167 if bool_field not in vals:
4168 vals[bool_field] = False
4170 for field in vals.copy():
4172 if field in self._columns:
4173 fobj = self._columns[field]
4175 fobj = self._inherit_fields[field][2]
4181 for group in groups:
4182 module = group.split(".")[0]
4183 grp = group.split(".")[1]
4184 cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
4185 (grp, module, 'res.groups', user))
4186 readonly = cr.fetchall()
4187 if readonly[0][0] >= 1:
4190 elif readonly[0][0] == 0:
4198 current_field = self._columns[field]
4199 if current_field._classic_write:
4200 columns.append((field, '%s', current_field._symbol_set[1](vals[field])))
4202 #for the function fields that receive a value, we set them directly in the database
4203 #(they may be required), but we also need to trigger the _fct_inv()
4204 if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
4205 #TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
4206 #one week of the release candidate. It seems the only good way to handle correctly this is to add an
4207 #attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
4208 #if, for example, the related has a default value (for usability) then the fct_inv is called and it
4209 #may raise some access rights error. Changing this is a too big change for now, and is thus postponed
4210 #after the release but, definitively, the behavior shouldn't be different for related and function
4212 upd_todo.append(field)
4214 #TODO: this `if´ statement should be removed because there is no good reason to special case the fields
4215 #related. See the above TODO comment for further explanations.
4216 if not isinstance(current_field, fields.related):
4217 upd_todo.append(field)
4218 if field in self._columns \
4219 and hasattr(current_field, 'selection') \
4221 self._check_selection_field_value(cr, user, field, vals[field], context=context)
4222 if self._log_access:
4223 columns.append(('create_uid', '%s', user))
4224 columns.append(('write_uid', '%s', user))
4225 columns.append(('create_date', "(now() at time zone 'UTC')"))
4226 columns.append(('write_date', "(now() at time zone 'UTC')"))
4228 # the list of tuples used in this formatting corresponds to
4229 # tuple(field_name, format, value)
4230 # In some case, for example (id, create_date, write_date) we does not
4231 # need to read the third value of the tuple, because the real value is
4232 # encoded in the second value (the format).
4234 """INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
4236 ', '.join('"%s"' % f[0] for f in columns),
4237 ', '.join(f[1] for f in columns)
4239 tuple([f[2] for f in columns if len(f) > 2])
4242 id_new, = cr.fetchone()
4243 upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
4245 if self._parent_store and not context.get('defer_parent_store_computation'):
4247 self.pool._init_parent[self._name] = True
4249 parent = vals.get(self._parent_name, False)
4251 cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
4253 result_p = cr.fetchall()
4254 for (pleft,) in result_p:
4259 cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
4260 pleft_old = cr.fetchone()[0]
4263 cr.execute('select max(parent_right) from '+self._table)
4264 pleft = cr.fetchone()[0] or 0
4265 cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
4266 cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
4267 cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
4269 # default element in context must be remove when call a one2many or many2many
4270 rel_context = context.copy()
4271 for c in context.items():
4272 if c[0].startswith('default_'):
4273 del rel_context[c[0]]
4276 for field in upd_todo:
4277 result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
4278 self._validate(cr, user, [id_new], context)
4280 if not context.get('no_store_function', False):
4281 result += self._store_get_values(cr, user, [id_new],
4282 list(set(vals.keys() + self._inherits.values())),
4286 for order, model_name, ids, fields2 in result:
4287 if not (model_name, ids, fields2) in done:
4288 self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
4289 done.append((model_name, ids, fields2))
4291 if self._log_create and not (context and context.get('no_store_function', False)):
4292 message = self._description + \
4294 self.name_get(cr, user, [id_new], context=context)[0][1] + \
4295 "' " + _("created.")
4296 self.log(cr, user, id_new, message, True, context=context)
4297 self.check_access_rule(cr, user, [id_new], 'create', context=context)
4298 self.create_workflow(cr, user, [id_new], context=context)
4301 def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None):
4302 """Fetch records as objects allowing to use dot notation to browse fields and relations
4304 :param cr: database cursor
4305 :param uid: current user id
4306 :param select: id or list of ids.
4307 :param context: context arguments, like lang, time zone
4308 :rtype: object or list of objects requested
4311 self._list_class = list_class or browse_record_list
4313 # need to accepts ints and longs because ids coming from a method
4314 # launched by button in the interface have a type long...
4315 if isinstance(select, (int, long)):
4316 return browse_record(cr, uid, select, self, cache, context=context, list_class=self._list_class, fields_process=fields_process)
4317 elif isinstance(select, list):
4318 return self._list_class((browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select), context=context)
4320 return browse_null()
4322 def _store_get_values(self, cr, uid, ids, fields, context):
4323 """Returns an ordered list of fields.function to call due to
4324 an update operation on ``fields`` of records with ``ids``,
4325 obtained by calling the 'store' triggers of these fields,
4326 as setup by their 'store' attribute.
4328 :return: [(priority, model_name, [record_ids,], [function_fields,])]
4330 if fields is None: fields = []
4331 stored_functions = self.pool._store_function.get(self._name, [])
4333 # use indexed names for the details of the stored_functions:
4334 model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
4336 # only keep store triggers that should be triggered for the ``fields``
4338 triggers_to_compute = [f for f in stored_functions \
4339 if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
4342 target_id_results = {}
4343 for store_trigger in triggers_to_compute:
4344 target_func_id_ = id(store_trigger[target_ids_func_])
4345 if not target_func_id_ in target_id_results:
4346 # use admin user for accessing objects having rules defined on store fields
4347 target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
4348 target_ids = target_id_results[target_func_id_]
4350 # the compound key must consider the priority and model name
4351 key = (store_trigger[priority_], store_trigger[model_name_])
4352 for target_id in target_ids:
4353 to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
4355 # Here to_compute_map looks like:
4356 # { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
4357 # (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
4358 # (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
4361 # Now we need to generate the batch function calls list
4363 # { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
4365 for ((priority,model), id_map) in to_compute_map.iteritems():
4366 trigger_ids_maps = {}
4367 # function_ids_maps =
4368 # { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
4369 for target_id, triggers in id_map.iteritems():
4370 trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
4371 for triggers, target_ids in trigger_ids_maps.iteritems():
4372 call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
4373 [t[func_field_to_compute_] for t in triggers]))
4374 ordered_keys = call_map.keys()
4378 result = reduce(operator.add, (call_map[k] for k in ordered_keys))
4381 def _store_set_values(self, cr, uid, ids, fields, context):
4382 """Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
4383 respecting ``multi`` attributes), and stores the resulting values in the database directly."""
4388 if self._log_access:
4389 cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
4393 field_dict.setdefault(r[0], [])
4394 res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
4395 write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
4396 for i in self.pool._store_function.get(self._name, []):
4398 up_write_date = write_date + datetime.timedelta(hours=i[5])
4399 if datetime.datetime.now() < up_write_date:
4401 field_dict[r[0]].append(i[1])
4407 if self._columns[f]._multi not in keys:
4408 keys.append(self._columns[f]._multi)
4409 todo.setdefault(self._columns[f]._multi, [])
4410 todo[self._columns[f]._multi].append(f)
4414 # use admin user for accessing objects having rules defined on store fields
4415 result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
4416 for id, value in result.items():
4418 for f in value.keys():
4419 if f in field_dict[id]:
4426 if self._columns[v]._type == 'many2one':
4428 value[v] = value[v][0]
4431 upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
4432 upd1.append(self._columns[v]._symbol_set[1](value[v]))
4435 cr.execute('update "' + self._table + '" set ' + \
4436 ','.join(upd0) + ' where id = %s', upd1)
4440 # use admin user for accessing objects having rules defined on store fields
4441 result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
4442 for r in result.keys():
4444 if r in field_dict.keys():
4445 if f in field_dict[r]:
4447 for id, value in result.items():
4448 if self._columns[f]._type == 'many2one':
4453 cr.execute('update "' + self._table + '" set ' + \
4454 '"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
4460 def perm_write(self, cr, user, ids, fields, context=None):
4461 raise NotImplementedError(_('This method does not exist anymore'))
4463 # TODO: ameliorer avec NULL
4464 def _where_calc(self, cr, user, domain, active_test=True, context=None):
4465 """Computes the WHERE clause needed to implement an OpenERP domain.
4466 :param domain: the domain to compute
4468 :param active_test: whether the default filtering of records with ``active``
4469 field set to ``False`` should be applied.
4470 :return: the query expressing the given domain as provided in domain
4471 :rtype: osv.query.Query
4476 # if the object has a field named 'active', filter out all inactive
4477 # records unless they were explicitely asked for
4478 if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
4480 # the item[0] trick below works for domain items and '&'/'|'/'!'
4482 if not any(item[0] == 'active' for item in domain):
4483 domain.insert(0, ('active', '=', 1))
4485 domain = [('active', '=', 1)]
4488 e = expression.expression(cr, user, domain, self, context)
4489 tables = e.get_tables()
4490 where_clause, where_params = e.to_sql()
4491 where_clause = where_clause and [where_clause] or []
4493 where_clause, where_params, tables = [], [], ['"%s"' % self._table]
4495 return Query(tables, where_clause, where_params)
4497 def _check_qorder(self, word):
4498 if not regex_order.match(word):
4499 raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
4502 def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
4503 """Add what's missing in ``query`` to implement all appropriate ir.rules
4504 (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
4506 :param query: the current query object
4508 if uid == SUPERUSER_ID:
4511 def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None):
4512 """ :param string parent_model: string of the parent model
4513 :param model child_object: model object, base of the rule application
4516 if parent_model and child_object:
4517 # as inherited rules are being applied, we need to add the missing JOIN
4518 # to reach the parent table (if it was not JOINed yet in the query)
4519 parent_alias = child_object._inherits_join_add(child_object, parent_model, query)
4520 # inherited rules are applied on the external table -> need to get the alias and replace
4521 parent_table = self.pool[parent_model]._table
4522 added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
4523 # change references to parent_table to parent_alias, because we now use the alias to refer to the table
4525 for table in added_tables:
4526 # table is just a table name -> switch to the full alias
4527 if table == '"%s"' % parent_table:
4528 new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
4529 # table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
4531 new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
4532 added_tables = new_tables
4533 query.where_clause += added_clause
4534 query.where_clause_params += added_params
4535 for table in added_tables:
4536 if table not in query.tables:
4537 query.tables.append(table)
4541 # apply main rules on the object
4542 rule_obj = self.pool.get('ir.rule')
4543 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
4544 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
4546 # apply ir.rules from the parents (through _inherits)
4547 for inherited_model in self._inherits:
4548 rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
4549 apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
4550 parent_model=inherited_model, child_object=self)
4552 def _generate_m2o_order_by(self, order_field, query):
4554 Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
4555 either native m2o fields or function/related fields that are stored, including
4556 intermediate JOINs for inheritance if required.
4558 :return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
4560 if order_field not in self._columns and order_field in self._inherit_fields:
4561 # also add missing joins for reaching the table containing the m2o field
4562 qualified_field = self._inherits_join_calc(order_field, query)
4563 order_field_column = self._inherit_fields[order_field][2]
4565 qualified_field = '"%s"."%s"' % (self._table, order_field)
4566 order_field_column = self._columns[order_field]
4568 assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
4569 if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
4570 _logger.debug("Many2one function/related fields must be stored " \
4571 "to be used as ordering fields! Ignoring sorting for %s.%s",
4572 self._name, order_field)
4575 # figure out the applicable order_by for the m2o
4576 dest_model = self.pool[order_field_column._obj]
4577 m2o_order = dest_model._order
4578 if not regex_order.match(m2o_order):
4579 # _order is complex, can't use it here, so we default to _rec_name
4580 m2o_order = dest_model._rec_name
4582 # extract the field names, to be able to qualify them and add desc/asc
4584 for order_part in m2o_order.split(","):
4585 m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
4586 m2o_order = m2o_order_list
4588 # Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
4589 # as we don't want to exclude results that have NULL values for the m2o
4590 src_table, src_field = qualified_field.replace('"', '').split('.', 1)
4591 dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
4592 qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
4593 return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
4595 def _generate_order_by(self, order_spec, query):
4597 Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
4598 a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
4600 :raise" except_orm in case order_spec is malformed
4602 order_by_clause = ''
4603 order_spec = order_spec or self._order
4605 order_by_elements = []
4606 self._check_qorder(order_spec)
4607 for order_part in order_spec.split(','):
4608 order_split = order_part.strip().split(' ')
4609 order_field = order_split[0].strip()
4610 order_direction = order_split[1].strip() if len(order_split) == 2 else ''
4612 if order_field == 'id' or (self._log_access and order_field in LOG_ACCESS_COLUMNS.keys()):
4613 order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
4614 elif order_field in self._columns:
4615 order_column = self._columns[order_field]
4616 if order_column._classic_read:
4617 inner_clause = '"%s"."%s"' % (self._table, order_field)
4618 elif order_column._type == 'many2one':
4619 inner_clause = self._generate_m2o_order_by(order_field, query)
4621 continue # ignore non-readable or "non-joinable" fields
4622 elif order_field in self._inherit_fields:
4623 parent_obj = self.pool[self._inherit_fields[order_field][3]]
4624 order_column = parent_obj._columns[order_field]
4625 if order_column._classic_read:
4626 inner_clause = self._inherits_join_calc(order_field, query)
4627 elif order_column._type == 'many2one':
4628 inner_clause = self._generate_m2o_order_by(order_field, query)
4630 continue # ignore non-readable or "non-joinable" fields
4632 raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
4634 if isinstance(inner_clause, list):
4635 for clause in inner_clause:
4636 order_by_elements.append("%s %s" % (clause, order_direction))
4638 order_by_elements.append("%s %s" % (inner_clause, order_direction))
4639 if order_by_elements:
4640 order_by_clause = ",".join(order_by_elements)
4642 return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
4644 def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
4646 Private implementation of search() method, allowing specifying the uid to use for the access right check.
4647 This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
4648 by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
4649 This is ok at the security level because this method is private and not callable through XML-RPC.
4651 :param access_rights_uid: optional user ID to use when checking access rights
4652 (not for ir.rules, this is only for ir.model.access)
4656 self.check_access_rights(cr, access_rights_uid or user, 'read')
4658 # For transient models, restrict acces to the current user, except for the super-user
4659 if self.is_transient() and self._log_access and user != SUPERUSER_ID:
4660 args = expression.AND(([('create_uid', '=', user)], args or []))
4662 query = self._where_calc(cr, user, args, context=context)
4663 self._apply_ir_rules(cr, user, query, 'read', context=context)
4664 order_by = self._generate_order_by(order, query)
4665 from_clause, where_clause, where_clause_params = query.get_sql()
4667 where_str = where_clause and (" WHERE %s" % where_clause) or ''
4670 # Ignore order, limit and offset when just counting, they don't make sense and could
4672 query_str = 'SELECT count(1) FROM ' + from_clause + where_str
4673 cr.execute(query_str, where_clause_params)
4677 limit_str = limit and ' limit %d' % limit or ''
4678 offset_str = offset and ' offset %d' % offset or ''
4679 query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
4680 cr.execute(query_str, where_clause_params)
4683 # TDE note: with auto_join, we could have several lines about the same result
4684 # i.e. a lead with several unread messages; we uniquify the result using
4685 # a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
4686 def _uniquify_list(seq):
4688 return [x for x in seq if x not in seen and not seen.add(x)]
4690 return _uniquify_list([x[0] for x in res])
4692 # returns the different values ever entered for one field
4693 # this is used, for example, in the client when the user hits enter on
4695 def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
4698 if field in self._inherit_fields:
4699 return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
4701 return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
4703 def copy_data(self, cr, uid, id, default=None, context=None):
4705 Copy given record's data with all its fields values
4707 :param cr: database cursor
4708 :param uid: current user id
4709 :param id: id of the record to copy
4710 :param default: field values to override in the original values of the copied record
4711 :type default: dictionary
4712 :param context: context arguments, like lang, time zone
4713 :type context: dictionary
4714 :return: dictionary containing all the field values
4720 # avoid recursion through already copied records in case of circular relationship
4721 seen_map = context.setdefault('__copy_data_seen', {})
4722 if id in seen_map.setdefault(self._name, []):
4724 seen_map[self._name].append(id)
4728 if 'state' not in default:
4729 if 'state' in self._defaults:
4730 if callable(self._defaults['state']):
4731 default['state'] = self._defaults['state'](self, cr, uid, context)
4733 default['state'] = self._defaults['state']
4735 # build a black list of fields that should not be copied
4736 blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
4737 def blacklist_given_fields(obj):
4738 # blacklist the fields that are given by inheritance
4739 for other, field_to_other in obj._inherits.items():
4740 blacklist.add(field_to_other)
4741 if field_to_other in default:
4742 # all the fields of 'other' are given by the record: default[field_to_other],
4743 # except the ones redefined in self
4744 blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
4746 blacklist_given_fields(self.pool[other])
4747 # blacklist deprecated fields
4748 for name, field in obj._columns.items():
4749 if field.deprecated:
4752 blacklist_given_fields(self)
4755 fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
4757 if f not in blacklist
4758 if not isinstance(fi.column, fields.function))
4760 data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
4764 raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
4767 for f, colinfo in fields_to_copy.iteritems():
4768 field = colinfo.column
4769 if field._type == 'many2one':
4770 res[f] = data[f] and data[f][0]
4771 elif field._type == 'one2many':
4772 other = self.pool[field._obj]
4773 # duplicate following the order of the ids because we'll rely on
4774 # it later for copying translations in copy_translation()!
4775 lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
4776 # the lines are duplicated using the wrong (old) parent, but then
4777 # are reassigned to the correct one thanks to the (0, 0, ...)
4778 res[f] = [(0, 0, line) for line in lines if line]
4779 elif field._type == 'many2many':
4780 res[f] = [(6, 0, data[f])]
4786 def copy_translations(self, cr, uid, old_id, new_id, context=None):
4790 # avoid recursion through already copied records in case of circular relationship
4791 seen_map = context.setdefault('__copy_translations_seen',{})
4792 if old_id in seen_map.setdefault(self._name,[]):
4794 seen_map[self._name].append(old_id)
4796 trans_obj = self.pool.get('ir.translation')
4797 # TODO it seems fields_get can be replaced by _all_columns (no need for translation)
4798 fields = self.fields_get(cr, uid, context=context)
4800 for field_name, field_def in fields.items():
4801 # removing the lang to compare untranslated values
4802 context_wo_lang = dict(context, lang=None)
4803 old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
4804 # we must recursively copy the translations for o2o and o2m
4805 if field_def['type'] == 'one2many':
4806 target_obj = self.pool[field_def['relation']]
4807 # here we rely on the order of the ids to match the translations
4808 # as foreseen in copy_data()
4809 old_children = sorted(r.id for r in old_record[field_name])
4810 new_children = sorted(r.id for r in new_record[field_name])
4811 for (old_child, new_child) in zip(old_children, new_children):
4812 target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
4813 # and for translatable fields we keep them for copy
4814 elif field_def.get('translate'):
4815 if field_name in self._columns:
4816 trans_name = self._name + "," + field_name
4819 elif field_name in self._inherit_fields:
4820 trans_name = self._inherit_fields[field_name][0] + "," + field_name
4821 # get the id of the parent record to set the translation
4822 inherit_field_name = self._inherit_fields[field_name][1]
4823 target_id = new_record[inherit_field_name].id
4824 source_id = old_record[inherit_field_name].id
4828 trans_ids = trans_obj.search(cr, uid, [
4829 ('name', '=', trans_name),
4830 ('res_id', '=', source_id)
4832 user_lang = context.get('lang')
4833 for record in trans_obj.read(cr, uid, trans_ids, context=context):
4835 # remove source to avoid triggering _set_src
4836 del record['source']
4837 record.update({'res_id': target_id})
4838 if user_lang and user_lang == record['lang']:
4839 # 'source' to force the call to _set_src
4840 # 'value' needed if value is changed in copy(), want to see the new_value
4841 record['source'] = old_record[field_name]
4842 record['value'] = new_record[field_name]
4843 trans_obj.create(cr, uid, record, context=context)
4846 def copy(self, cr, uid, id, default=None, context=None):
4848 Duplicate record with given id updating it with default values
4850 :param cr: database cursor
4851 :param uid: current user id
4852 :param id: id of the record to copy
4853 :param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
4854 :type default: dictionary
4855 :param context: context arguments, like lang, time zone
4856 :type context: dictionary
4857 :return: id of the newly created record
4862 context = context.copy()
4863 data = self.copy_data(cr, uid, id, default, context)
4864 new_id = self.create(cr, uid, data, context)
4865 self.copy_translations(cr, uid, id, new_id, context)
4868 def exists(self, cr, uid, ids, context=None):
4869 """Checks whether the given id or ids exist in this model,
4870 and return the list of ids that do. This is simple to use for
4871 a truth test on a browse_record::
4876 :param ids: id or list of ids to check for existence
4877 :type ids: int or [int]
4878 :return: the list of ids that currently exist, out of
4881 if type(ids) in (int, long):
4885 query = 'SELECT id FROM "%s"' % self._table
4886 cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
4887 return [x[0] for x in cr.fetchall()]
4889 def check_recursion(self, cr, uid, ids, context=None, parent=None):
4890 _logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
4892 assert parent is None or parent in self._columns or parent in self._inherit_fields,\
4893 "The 'parent' parameter passed to check_recursion() must be None or a valid field name"
4894 return self._check_recursion(cr, uid, ids, context, parent)
4896 def _check_recursion(self, cr, uid, ids, context=None, parent=None):
4898 Verifies that there is no loop in a hierarchical structure of records,
4899 by following the parent relationship using the **parent** field until a loop
4900 is detected or until a top-level record is found.
4902 :param cr: database cursor
4903 :param uid: current user id
4904 :param ids: list of ids of records to check
4905 :param parent: optional parent field name (default: ``self._parent_name = parent_id``)
4906 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4909 parent = self._parent_name
4911 # must ignore 'active' flag, ir.rules, etc. => direct SQL query
4912 query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
4915 while current_id is not None:
4916 cr.execute(query, (current_id,))
4917 result = cr.fetchone()
4918 current_id = result[0] if result else None
4919 if current_id == id:
4923 def _check_m2m_recursion(self, cr, uid, ids, field_name):
4925 Verifies that there is no loop in a hierarchical structure of records,
4926 by following the parent relationship using the **parent** field until a loop
4927 is detected or until a top-level record is found.
4929 :param cr: database cursor
4930 :param uid: current user id
4931 :param ids: list of ids of records to check
4932 :param field_name: field to check
4933 :return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
4936 field = self._all_columns.get(field_name)
4937 field = field.column if field else None
4938 if not field or field._type != 'many2many' or field._obj != self._name:
4939 # field must be a many2many on itself
4940 raise ValueError('invalid field_name: %r' % (field_name,))
4942 query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
4946 for i in range(0, len(ids_parent), cr.IN_MAX):
4948 sub_ids_parent = ids_parent[i:j]
4949 cr.execute(query, (tuple(sub_ids_parent),))
4950 ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
4951 ids_parent = ids_parent2
4952 for i in ids_parent:
4957 def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
4958 """Retrieve the External ID(s) of any database record.
4960 **Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
4962 :return: map of ids to the list of their fully qualified External IDs
4963 in the form ``module.key``, or an empty list when there's no External
4964 ID for a record, e.g.::
4966 { 'id': ['module.ext_id', 'module.ext_id_bis'],
4969 ir_model_data = self.pool.get('ir.model.data')
4970 data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
4971 data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
4974 # can't use dict.fromkeys() as the list would be shared!
4976 for record in data_results:
4977 result[record['res_id']].append('%(module)s.%(name)s' % record)
4980 def get_external_id(self, cr, uid, ids, *args, **kwargs):
4981 """Retrieve the External ID of any database record, if there
4982 is one. This method works as a possible implementation
4983 for a function field, to be able to add it to any
4984 model object easily, referencing it as ``Model.get_external_id``.
4986 When multiple External IDs exist for a record, only one
4987 of them is returned (randomly).
4989 :return: map of ids to their fully qualified XML ID,
4990 defaulting to an empty string when there's none
4991 (to be usable as a function field),
4994 { 'id': 'module.ext_id',
4997 results = self._get_xml_ids(cr, uid, ids)
4998 for k, v in results.iteritems():
5005 # backwards compatibility
5006 get_xml_id = get_external_id
5007 _get_xml_ids = _get_external_ids
5009 def print_report(self, cr, uid, ids, name, data, context=None):
5011 Render the report `name` for the given IDs. The report must be defined
5012 for this model, not another.
5014 report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
5015 assert self._name == report.table
5016 return report.create(cr, uid, ids, data, context)
5019 def is_transient(self):
5020 """ Return whether the model is transient.
5022 See :class:`TransientModel`.
5025 return self._transient
5027 def _transient_clean_rows_older_than(self, cr, seconds):
5028 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
5029 # Never delete rows used in last 5 minutes
5030 seconds = max(seconds, 300)
5031 query = ("SELECT id FROM " + self._table + " WHERE"
5032 " COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
5033 " < ((now() at time zone 'UTC') - interval %s)")
5034 cr.execute(query, ("%s seconds" % seconds,))
5035 ids = [x[0] for x in cr.fetchall()]
5036 self.unlink(cr, SUPERUSER_ID, ids)
5038 def _transient_clean_old_rows(self, cr, max_count):
5039 # Check how many rows we have in the table
5040 cr.execute("SELECT count(*) AS row_count FROM " + self._table)
5042 if res[0][0] <= max_count:
5043 return # max not reached, nothing to do
5044 self._transient_clean_rows_older_than(cr, 300)
5046 def _transient_vacuum(self, cr, uid, force=False):
5047 """Clean the transient records.
5049 This unlinks old records from the transient model tables whenever the
5050 "_transient_max_count" or "_max_age" conditions (if any) are reached.
5051 Actual cleaning will happen only once every "_transient_check_time" calls.
5052 This means this method can be called frequently called (e.g. whenever
5053 a new record is created).
5054 Example with both max_hours and max_count active:
5055 Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
5056 table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5057 5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
5058 - age based vacuum will leave the 22 rows created/changed in the last 12 minutes
5059 - count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
5060 would immediately cause the maximum to be reached again.
5061 - the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
5063 assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
5064 _transient_check_time = 20 # arbitrary limit on vacuum executions
5065 self._transient_check_count += 1
5066 if not force and (self._transient_check_count < _transient_check_time):
5067 return True # no vacuum cleaning this time
5068 self._transient_check_count = 0
5070 # Age-based expiration
5071 if self._transient_max_hours:
5072 self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
5074 # Count-based expiration
5075 if self._transient_max_count:
5076 self._transient_clean_old_rows(cr, self._transient_max_count)
5080 def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
5081 """ Serializes one2many and many2many commands into record dictionaries
5082 (as if all the records came from the database via a read()). This
5083 method is aimed at onchange methods on one2many and many2many fields.
5085 Because commands might be creation commands, not all record dicts
5086 will contain an ``id`` field. Commands matching an existing record
5087 will have an ``id``.
5089 :param field_name: name of the one2many or many2many field matching the commands
5090 :type field_name: str
5091 :param commands: one2many or many2many commands to execute on ``field_name``
5092 :type commands: list((int|False, int|False, dict|False))
5093 :param fields: list of fields to read from the database, when applicable
5094 :type fields: list(str)
5095 :returns: records in a shape similar to that returned by ``read()``
5096 (except records may be missing the ``id`` field if they don't exist in db)
5099 result = [] # result (list of dict)
5100 record_ids = [] # ids of records to read
5101 updates = {} # {id: dict} of updates on particular records
5103 for command in commands:
5104 if not isinstance(command, (list, tuple)):
5105 record_ids.append(command)
5106 elif command[0] == 0:
5107 result.append(command[2])
5108 elif command[0] == 1:
5109 record_ids.append(command[1])
5110 updates.setdefault(command[1], {}).update(command[2])
5111 elif command[0] in (2, 3):
5112 record_ids = [id for id in record_ids if id != command[1]]
5113 elif command[0] == 4:
5114 record_ids.append(command[1])
5115 elif command[0] == 5:
5116 result, record_ids = [], []
5117 elif command[0] == 6:
5118 result, record_ids = [], list(command[2])
5120 # read the records and apply the updates
5121 other_model = self.pool[self._all_columns[field_name].column._obj]
5122 for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
5123 record.update(updates.get(record['id'], {}))
5124 result.append(record)
5128 # for backward compatibility
5129 resolve_o2m_commands_to_record_dicts = resolve_2many_commands
5131 def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
5133 Performs a ``search()`` followed by a ``read()``.
5135 :param cr: database cursor
5136 :param user: current user id
5137 :param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
5138 :param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
5139 :param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
5140 :param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
5141 :param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
5142 :param context: context arguments.
5143 :return: List of dictionaries containing the asked fields.
5144 :rtype: List of dictionaries.
5147 record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
5151 if fields and fields == ['id']:
5152 # shortcut read if we only want the ids
5153 return [{'id': id} for id in record_ids]
5155 # read() ignores active_test, but it would forward it to any downstream search call
5156 # (e.g. for x2m or function fields), and this is not the desired behavior, the flag
5157 # was presumably only meant for the main search().
5158 # TODO: Move this to read() directly?
5159 read_ctx = dict(context or {})
5160 read_ctx.pop('active_test', None)
5162 result = self.read(cr, uid, record_ids, fields, context=read_ctx)
5163 if len(result) <= 1:
5167 index = dict((r['id'], r) for r in result)
5168 return [index[x] for x in record_ids if x in index]
5170 def _register_hook(self, cr):
5171 """ stuff to do right after the registry is built """
5174 def __getattr__(self, name):
5175 if name.startswith('signal_'):
5176 signal_name = name[len('signal_'):]
5178 return (lambda *args, **kwargs:
5179 self.signal_workflow(*args, signal=signal_name, **kwargs))
5180 get = getattr(super(BaseModel, self), '__getattr__', None)
5181 if get is not None: return get(name)
5182 raise AttributeError(
5183 "'%s' object has no attribute '%s'" % (type(self).__name__, name))
5185 # keep this import here, at top it will cause dependency cycle errors
5188 class Model(BaseModel):
5189 """Main super-class for regular database-persisted OpenERP models.
5191 OpenERP models are created by inheriting from this class::
5196 The system will later instantiate the class once per database (on
5197 which the class' module is installed).
5200 _register = False # not visible in ORM registry, meant to be python-inherited only
5201 _transient = False # True in a TransientModel
5203 class TransientModel(BaseModel):
5204 """Model super-class for transient records, meant to be temporarily
5205 persisted, and regularly vaccuum-cleaned.
5207 A TransientModel has a simplified access rights management,
5208 all users can create new records, and may only access the
5209 records they created. The super-user has unrestricted access
5210 to all TransientModel records.
5213 _register = False # not visible in ORM registry, meant to be python-inherited only
5216 class AbstractModel(BaseModel):
5217 """Abstract Model super-class for creating an abstract class meant to be
5218 inherited by regular models (Models or TransientModels) but not meant to
5219 be usable on its own, or persisted.
5221 Technical note: we don't want to make AbstractModel the super-class of
5222 Model or BaseModel because it would not make sense to put the main
5223 definition of persistence methods such as create() in it, and still we
5224 should be able to override them within an AbstractModel.
5226 _auto = False # don't create any database backend for AbstractModels
5227 _register = False # not visible in ORM registry, meant to be python-inherited only
5230 def itemgetter_tuple(items):
5231 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
5232 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
5237 return lambda gettable: (gettable[items[0]],)
5238 return operator.itemgetter(*items)
5240 class ImportWarning(Warning):
5241 """ Used to send warnings upwards the stack during the import process
5245 def convert_pgerror_23502(model, fields, info, e):
5246 m = re.match(r'^null value in column "(?P<field>\w+)" violates '
5247 r'not-null constraint\n',
5249 field_name = m.group('field')
5250 if not m or field_name not in fields:
5251 return {'message': unicode(e)}
5252 message = _(u"Missing required value for the field '%s'.") % field_name
5253 field = fields.get(field_name)
5255 message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
5258 'field': field_name,
5261 def convert_pgerror_23505(model, fields, info, e):
5262 m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
5264 field_name = m.group('field')
5265 if not m or field_name not in fields:
5266 return {'message': unicode(e)}
5267 message = _(u"The value for the field '%s' already exists.") % field_name
5268 field = fields.get(field_name)
5270 message = _(u"%s This might be '%s' in the current model, or a field "
5271 u"of the same name in an o2m.") % (message, field['string'])
5274 'field': field_name,
5277 PGERROR_TO_OE = collections.defaultdict(
5278 # shape of mapped converters
5279 lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
5280 # not_null_violation
5281 '23502': convert_pgerror_23502,
5282 # unique constraint error
5283 '23505': convert_pgerror_23505,
5285 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: